diff --git "a/3717.jsonl" "b/3717.jsonl" new file mode 100644--- /dev/null +++ "b/3717.jsonl" @@ -0,0 +1,570 @@ +{"seq_id":"73321466","text":"import copy, os, sys\nfrom RootTools.core.Sample import Sample\nimport ROOT\n\ndef get_parser():\n import argparse\n argParser = argparse.ArgumentParser(description = \"Argument parser for samples file\")\n argParser.add_argument('--overwrite', action='store_true', help=\"Overwrite current entry in db?\")\n argParser.add_argument('--update', action='store_true', help=\"Update current entry in db?\")\n return argParser\n \n# Logging\nif __name__==\"__main__\":\n import nanoMET.tools.logger as logger\n logger = logger.get_logger(\"INFO\", logFile = None )\n import RootTools.core.logger as logger_rt\n logger_rt = logger_rt.get_logger(\"INFO\", logFile = None )\n options = get_parser().parse_args()\n ov = options.overwrite\n if options.update:\n ov = 'update'\nelse:\n import logging\n logger = logging.getLogger(__name__)\n ov = False\n\n# Redirector\nfrom nanoMET.tools.user import redirector_global as redirector\n\n# DB\nfrom nanoMET.tools.user import dbDir\ndbFile = dbDir+\"/samples/DB_Run2017_31Mar2018_private.sql\"\n\nlogger.info(\"Using db file: %s\", dbFile)\n\n# DoubleMuon\nDoubleMuon_Run2017B_31Mar2018 = Sample.nanoAODfromDAS(\"DoubleMuon_Run2017B_31Mar2018\", \"/DoubleMuon/schoef-crab_Run2017B-31Mar2018-v1_legacy_nano_v4-ef59f0c1717f190a6e4b4df4955a4722/USER\", dbFile=dbFile, redirector=redirector, instance=\"phys03\", overwrite=ov)\nDoubleMuon_Run2017C_31Mar2018 = Sample.nanoAODfromDAS(\"DoubleMuon_Run2017C_31Mar2018\", \"/DoubleMuon/schoef-crab_Run2017C-31Mar2018-v1_legacy_nano_v4-ef59f0c1717f190a6e4b4df4955a4722/USER\", dbFile=dbFile, redirector=redirector, instance=\"phys03\", overwrite=ov)\nDoubleMuon_Run2017D_31Mar2018 = Sample.nanoAODfromDAS(\"DoubleMuon_Run2017D_31Mar2018\", \"/DoubleMuon/schoef-crab_Run2017D-31Mar2018-v1_legacy_nano_v4-ef59f0c1717f190a6e4b4df4955a4722/USER\", dbFile=dbFile, redirector=redirector, instance=\"phys03\", overwrite=ov)\nDoubleMuon_Run2017E_31Mar2018 = Sample.nanoAODfromDAS(\"DoubleMuon_Run2017E_31Mar2018\", \"/DoubleMuon/schoef-crab_Run2017E-31Mar2018-v1_legacy_nano_v4-ef59f0c1717f190a6e4b4df4955a4722/USER\", dbFile=dbFile, redirector=redirector, instance=\"phys03\", overwrite=ov)\nDoubleMuon_Run2017F_31Mar2018 = Sample.nanoAODfromDAS(\"DoubleMuon_Run2017F_31Mar2018\", \"/DoubleMuon/schoef-crab_Run2017F-31Mar2018-v1_legacy_nano_v4-ef59f0c1717f190a6e4b4df4955a4722/USER\", dbFile=dbFile, redirector=redirector, instance=\"phys03\", overwrite=ov)\n\nDoubleMuon = [\n DoubleMuon_Run2017B_31Mar2018,\n DoubleMuon_Run2017C_31Mar2018,\n DoubleMuon_Run2017D_31Mar2018,\n DoubleMuon_Run2017E_31Mar2018,\n DoubleMuon_Run2017F_31Mar2018,\n]\n\nallSamples = DoubleMuon\n\nfor s in allSamples:\n s.json = os.path.expandvars(\"$CMSSW_BASE/src/Samples/Tools/data/json/Cert_294927-306462_13TeV_EOY2017ReReco_Collisions17_JSON.txt\")\n s.isData = True\n\nfrom nanoMET.tools.AutoClass import AutoClass\nsamples = AutoClass( allSamples )\n","sub_path":"nanoAOD/python/Run2017_31Mar2018_private.py","file_name":"Run2017_31Mar2018_private.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"416164094","text":"#!/usr/bin/env python\n\nimport PyD_alpha_init as self\nfrom PyD_alpha_init import *\n\n#==================================================================================================================\n\ndef calc_dist2dip(qsoname):\n\n idx = np.where(self.qsolist['name']==qsoname)[0][0]\n val = self.qsolist['RA\\n(hh:mm:ss)'][idx].split(':')\n ra = float(val[0])+float(val[1])/60+float(val[2])/3600 # in hours\n val = self.qsolist['DEC\\n(dd:mm:ss)'][idx].split(':')\n if (val[0][0]=='-'):\n dec = float(val[0])-float(val[1])/60-float(val[2])/3600 # in degrees\n else:\n dec = float(val[0])+float(val[1])/60+float(val[2])/3600 # in degrees\n zem = float(self.qsolist['z_em'][idx])\n \n # Dot product using spherical coordinates phi and theta in radians\n \n xdipole = self.alphara*360/24. # in degrees\n ydipole = self.alphadec # in degrees\n distance = calc_spheredist(ra*360/24.,dec,xdipole,ydipole)\n\n return zem,ra,dec,distance\n\n#==================================================================================================================\n\ndef calc_spheredist(ra1,dec1,ra2,dec2):\n \n theta1 = ra1*math.pi/180\n phi1 = math.pi/2-dec1*math.pi/180\n x1 = math.sin(phi1)*math.cos(theta1)\n y1 = math.sin(phi1)*math.sin(theta1) \n z1 = math.cos(phi1)\n theta2 = ra2*math.pi/180\n phi2 = math.pi/2-dec2*math.pi/180\n x2 = math.sin(phi2)*math.cos(theta2)\n y2 = math.sin(phi2)*math.sin(theta2) \n z2 = math.cos(phi2)\n distance = math.acos(x1*x2+y1*y2+z1*z2)\n distance = distance*180/math.pi\n \n return distance\n\n#==================================================================================================================\n","sub_path":"alpha/bak/PyD_alpha_calc.py","file_name":"PyD_alpha_calc.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"129752712","text":"import re\n\nfrom .. import config\nfrom ..h import E, outerHTML\nfrom ..messages import die\nfrom . import steps\n\n\nclass IdlShorthand:\n def __init__(self):\n self.stage = \"start\"\n self.escapedText = None\n self.linkText = []\n self.bsAutolink = \"\"\n self.linkFor = None\n self.lt = None\n self.linkType = None\n\n def respond(self, match, dom=None):\n if self.stage == \"start\":\n return self.respondStart(match)\n elif self.stage == \"link text\":\n return self.respondLinkText(match, dom)\n elif self.stage == \"end\":\n return self.respondEnd()\n\n def respondStart(self, match):\n self.bsAutolink = match.group(0)\n escape, self.linkFor, self.lt, self.linkType, hasLinkText = match.groups()\n if escape:\n self.escapedText = match.group(0)[1:]\n\n if self.linkFor == \"\":\n self.linkFor = \"/\"\n\n if self.linkType is None:\n self.linkType = \"idl\"\n\n if hasLinkText:\n self.stage = \"link text\"\n return steps.NextBody(endRe)\n else:\n self.stage = \"end\"\n return steps.NextLiteral(endRe)\n\n def respondLinkText(self, match, dom): # pylint: disable=unused-argument\n self.linkText = dom\n self.bsAutolink += outerHTML(dom)\n return self.respondEnd()\n\n def respondEnd(self):\n if self.escapedText:\n return steps.Success(\n skips=[\"{\"], nodes=[self.escapedText[1:], *self.linkText, \"}}\"]\n )\n\n self.bsAutolink += \"}}\"\n\n if self.linkType not in config.idlTypes:\n die(\n \"Shorthand {0} gives type as '{1}', but only IDL types are allowed.\",\n self.bsAutolink,\n self.linkType,\n )\n return steps.Success(E.span({}, self.bsAutolink))\n\n if not self.linkText:\n if (\n self.lt.startswith(\"constructor(\")\n and self.linkFor\n and self.linkFor != \"/\"\n ):\n # make {{Foo/constructor()}} output as \"Foo()\" so you know what it's linking to.\n self.linkText = self.linkFor + self.lt[11:]\n else:\n self.linkText = self.lt\n\n attrs = {\n \"data-link-type\": self.linkType,\n \"for\": self.linkFor,\n \"lt\": self.lt,\n \"bs-autolink-syntax\": self.bsAutolink,\n }\n return steps.Success(\n E.code({\"class\": \"idl\", \"nohighlight\": \"\"}, E.a(attrs, self.linkText))\n )\n\n\nIdlShorthand.startRe = re.compile(\n r\"\"\"\n(\\\\)?\n{{\n(?:([^}|]*)/)?\n([^}/|]+?)\n(?:!!([\\w-]+))?\n(\\|)?\"\"\",\n re.X,\n)\n\nendRe = re.compile(\"}}\")\n","sub_path":"bikeshed/shorthands/idl.py","file_name":"idl.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"579392760","text":"import numpy as np\n\n\ndef clip_pks(raw, peak_locs, peak_mags, half_length, min_dist=400):\n if min_dist > 800:\n min_dist = 800\n\n p_lc_d = np.diff(peak_locs)\n\n p_pw = np.zeros([len(peak_locs), ])\n for i in range(len(p_pw)):\n p_pw[i] = np.linalg.norm(raw[peak_locs[i] - half_length:peak_locs[i] + half_length]) ** 2\n\n jg_arr = np.ones([len(peak_locs), ], dtype=np.bool)\n for i in range(len(p_lc_d)):\n if (p_lc_d[i]) <= min_dist:\n pw1 = p_pw[i] / np.sum(p_pw[i:i + 2])\n pw2 = p_pw[i + 1] / np.sum([p_pw[i:i + 2]])\n s1 = pw1 + peak_mags[i]\n s2 = pw2 + peak_mags[i + 1]\n if s1 > s2:\n jg_arr[i + 1] = False\n else:\n jg_arr[i] = False\n return peak_locs[jg_arr], peak_mags[jg_arr]\n\n\ndef gaussian_kernel(size):\n size = int(size)\n x = np.mgrid[-size:size+1]\n g = np.exp(-(x**2/(2*(2*size+1)**2/25)))\n return g / g.sum()\n\n\ndef find_env(signal, w_n, pci):\n signal = signal.squeeze()\n abs_sig = np.abs(signal)\n abs_sig = np.r_[np.zeros([w_n, ]), abs_sig, np.zeros([w_n, ])]\n\n int1 = np.zeros([len(signal)])\n for i in range(len(signal)):\n int1[i] = np.sum(abs_sig[w_n + i - (w_n * pci // 100):w_n + i + w_n * (100 - pci) // 100])\n int1 = np.r_[np.zeros([w_n, ]), int1, np.zeros([w_n, ])] / w_n\n\n int2 = np.zeros([len(signal)])\n for i in range(len(signal)):\n int2[i] = np.sum(int1[w_n + i - (w_n * pci // 100):w_n + i + w_n * (100 - pci) // 100])\n int2 = np.r_[np.zeros([w_n, ]), int2, np.zeros([w_n, ])] / w_n\n\n f = np.zeros([len(signal)])\n for i in range(len(signal)):\n f[i] = np.sum(int2[w_n + i - (w_n * pci // 100):w_n + i + w_n * (100 - pci) // 100])\n f = f / w_n\n return f\n","sub_path":"core/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"63536800","text":"try:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nimport djangocms_oscar\n\nversion = djangocms_oscar.__version__\n\nsetup(\n name = 'djangocms_oscar',\n packages = ['djangocms_oscar'],\n include_package_data = True,\n version = version,\n description = 'djangocms oscar integration',\n author = 'byteyard',\n author_email = 'info@byteyard.de',\n license='BSD License',\n url = 'https://github.com/byteyard/djangocms-oscar',\n keywords = ['djangocms', 'django', 'oscar',], \n install_requires = ['django-cms>=3.0, <3.1',],\n classifiers = [\n 'Operating System :: OS Independent',\n 'Development Status :: 4 - Beta',\n 'Programming Language :: Python',\n 'Framework :: Django',\n 'Programming Language :: Python :: 3.4',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"190814593","text":"# Brute Force Solution\nclass Solution(object):\n\tdef twoSum(list, sum):\n\t\t# First Check\n\t\tfor i in list:\n\t\t\t# Second Check\n\t\t\tfor x in list:\n\t\t\t\tif i + x == sum:\n\t\t\t\t\t# Create a new array containing the solution\n\t\t\t\t\treturnList = [list.index(i), list.index(x)]\n\t\t\t\t\tprint(returnList)\n\t\t\t\t\t# Break out of the loop\n\t\t\t\t\treturn returnList\n\t\t# If the loop goes through (solution can't be found)\n\t\tprint(\"There are no elements in the array that give \" + str(sum)+ \" as a sum!\")\n\n# Test the code\nmyList = [12, 14, 18, 21, 55, 32, 421, 42, 32, 5, 99, 21, 623, 325, 56, 86, 4, 31]\nSolution.twoSum(myList, 50)\n","sub_path":"TwoSum.py","file_name":"TwoSum.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"348262347","text":"\n\n#calss header\nclass _BRONZE():\n\tdef __init__(self,): \n\t\tself.name = \"BRONZE\"\n\t\tself.definitions = [u'being dark orange-brown in colour, like the metal bronze']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_bronze.py","file_name":"_bronze.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"57238521","text":"\"\"\"\nThe dataset service.\nIt provides a method for generating records according to the specified dataset configuration.\n\"\"\"\n\nfrom os.path import isdir, join, exists\nfrom os import listdir, stat\nfrom shutil import rmtree\nfrom datetime import datetime\n\nfrom randomizer.pseudo_vets import generate_from_config\nfrom rest.decorators import service\nfrom rest.services import dataset_configuration_service\nfrom rest.errors import EntityNotFoundError\nfrom config import DATASET_PREFIX, GENERATED_DATASETS_DIR\nfrom rest.logger import logger\n\nDATASET_GENERATING = 'Generating'\nDATASET_COMPLETED = 'Completed'\n\n\nclass DataSetManager:\n \"\"\"\n the dataset manager class\n \"\"\"\n\n def __init__(self):\n \"\"\"\n init cache map\n \"\"\"\n self.cache_map = {}\n\n def update_entity(self, name, status, progress):\n \"\"\"\n update dataset entity by name\n :param name: the dataset name\n :param status: the dataset status\n :param progress: the dataset progress\n :return: None\n \"\"\"\n entity = self.cache_map.get(name)\n if entity is not None:\n entity['status'] = status\n entity['progress'] = progress\n logger.info(\"%s status = %s, progress = %.2f%%\" % (name, status, progress))\n if status == DATASET_COMPLETED and progress >= 100:\n entity['completedOn'] = datetime.now().isoformat()\n\n def push_entity(self, name, entity):\n \"\"\"\n push new dataset entity into manager\n :param name: the dataset name\n :param entity: the dataset entity\n :return: None\n \"\"\"\n self.cache_map[name] = entity\n\n def remove_by_configuration_title(self, config_title):\n \"\"\"\n remove datasets when configuration file removed\n :param config_title: the configuration title\n :return: None\n \"\"\"\n pass\n\n def remove_by_name(self, name):\n \"\"\"\n remove dataset entity by name\n :param name: the dataset name\n :return: None\n \"\"\"\n if self.cache_map.get(name) is not None:\n self.cache_map.pop(name, None)\n\n def get_all_keys(self):\n \"\"\"\n get all dataset names\n :return: the dataset names array\n \"\"\"\n return self.cache_map.keys()\n\n def get_by_name(self, name):\n \"\"\"\n get dataset by name\n :param name: the dataset name\n :return: the dateset entity\n \"\"\"\n return self.cache_map.get(name)\n\n\n# the global dataset_manager\ndataset_manager = DataSetManager()\n\n\n@service(schema={'title': {'type': 'string', 'required': True}})\ndef generate(title):\n \"\"\"\n Generate a dataset according to dataset configuration file with the specified title.\n It raises EntityNotFoundError if file cannot be found.\n :param title: the dataset configuration title\n :return: the number of generated report files\n \"\"\"\n configurations = dataset_configuration_service.get(title) # the length will be 1\n if len(configurations) <= 0:\n raise EntityNotFoundError('Cannot find configuration with title ' + title)\n\n return generate_from_config(configurations[0], dataset_manager)\n\n\n@service(schema={'title': {'type': 'string', 'required': True}})\ndef delete_dataset_by_title(title):\n \"\"\"\n Delete dateset by title\n It raises EntityNotFoundError if dataset not found\n :param title: the dataset title\n \"\"\"\n\n dataset_path = join(GENERATED_DATASETS_DIR, title)\n if not exists(dataset_path):\n raise EntityNotFoundError(\"Dataset not found where name = \" + title)\n rmtree(dataset_path)\n dataset_manager.remove_by_name(title)\n\n\ndef preload_datasets():\n \"\"\"\n preload all datasets into manager\n Get all dataset by scan output folder, if folder name start begin DATASET_PREFIX, that's mean this dataset generate\n by rest api\n If get configuration by title failed, then this api will skip the dataset\n :return: None\n \"\"\"\n datasets_folders = [f for f in listdir(GENERATED_DATASETS_DIR) if isdir(join(GENERATED_DATASETS_DIR, f))]\n for dataset_name in datasets_folders:\n if not dataset_name.startswith(DATASET_PREFIX): # not generate by rest api\n continue\n dataset_parts = dataset_name.split('.')\n name = len(dataset_parts) > 1 and dataset_parts[1] or 'ERROR TO GET NAME'\n output_format = len(dataset_parts) > 3 and dataset_parts[3] or 'CCDA'\n try:\n configurations = dataset_configuration_service.get_configuration_by_title(name)\n dataset = {\n 'title': name,\n 'completedOn': datetime.fromtimestamp(\n stat(join(GENERATED_DATASETS_DIR, dataset_name)).st_mtime).isoformat(),\n 'configuration': configurations,\n 'status': DATASET_COMPLETED,\n 'progress': 100,\n 'outputFormat': output_format,\n 'datasetName': dataset_name\n }\n dataset_manager.push_entity(dataset_name, dataset)\n logger.info(\"succeed load dataset = \" + dataset_name)\n except Exception as e:\n # if get configuration error, then skip this dataset, so we don't need raise error here\n logger.error(e)\n\n\n@service()\ndef get_all_datasets():\n \"\"\"\n get all datasets from cache\n :return: the rest api generated datasets\n \"\"\"\n datasets = []\n global dataset_manager\n keys = dataset_manager.get_all_keys()\n for dataset_name in keys:\n dataset = dataset_manager.get_by_name(dataset_name)\n if dataset is None:\n continue\n dataset_parts = dataset_name.split('.')\n name = len(dataset_parts) > 1 and dataset_parts[1] or 'ERROR TO GET NAME'\n try:\n configurations = dataset_configuration_service.get_configuration_by_title(name)\n dataset['configuration'] = configurations\n datasets.append(dataset)\n except Exception as e:\n # if get configuration error, then skip this dataset, so we don't need raise error here\n logger.error(e)\n if len(datasets) > 0:\n datasets = sorted(datasets, key=lambda k: k['completedOn'], reverse=True)\n return datasets\n\n\ndef remove_dateset_by_config_title(title):\n \"\"\"\n remove dataset by config title from cache\n :param title: the config title\n :return: None\n \"\"\"\n global dataset_manager\n dataset_manager.remove_by_configuration_title(title)\n\n\ndef get(title):\n \"\"\"\n get dataset by title\n :param title: the dataset cache key\n :return: the cached dataset\n \"\"\"\n return dataset_manager.get_by_name(title)\n","sub_path":"pseudoVet-backend/rest/services/dataset_generation_service.py","file_name":"dataset_generation_service.py","file_ext":"py","file_size_in_byte":6661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"385790192","text":"#coding:utf8\r\nimport torch\r\nfrom torch.utils.data import DataLoader\r\nfrom torchvision.models import googlenet\r\nimport torch.nn as nn\r\nimport datetime\r\nimport pandas as pd\r\nfrom load_data import DogCat\r\n\r\n\r\n# 1=dog 0=cat\r\npath = r'C:\\Users\\caeit\\Desktop\\binru7_1\\train'\r\ntrain_data = DogCat(path,train=True)\r\ntrain_dataloader = DataLoader(train_data,batch_size=16,shuffle=True)\r\nmodel = googlenet(pretrained=True)\r\nmodel.fc = nn.Linear(in_features=1024, out_features=2, bias=True)\r\nmodel = model.cuda()\r\ncost = torch.nn.CrossEntropyLoss().cuda()\r\noptimizer = torch.optim.SGD(model.parameters(),lr=1e-4,momentum=0.9)\r\nstart = datetime.datetime.now()\r\nfor i in range(2):\r\n correct = 0.0\r\n total = 25000\r\n running_loss = 0.0\r\n accuracy = 0.0\r\n print('-----epoch', i+1, '-----')\r\n for num, image in enumerate(train_dataloader):\r\n x_train, y_train = image\r\n x_train = x_train.cuda()\r\n y_train = y_train.cuda()\r\n optimizer.zero_grad()\r\n output = model(x_train)\r\n loss = cost(output, y_train)\r\n _, predicted = torch.max(output, 1)\r\n correct += (predicted == y_train).sum().item()\r\n if num%100 == 0:\r\n print(num*16, '/ 25000', 'loss:', running_loss, ',accuracy:{}%'.format((100*correct/25000)),'correct:%s'%correct)\r\n loss.backward()\r\n optimizer.step()\r\n running_loss += loss.item()\r\n print('-----------Epoch:', i+1, ', one_epoch_loss:', running_loss, '-----------',',accuracy:%f'%(100*correct/25000))\r\nprint(datetime.datetime.now()-start)\r\ntorch.save(model, 'fine_tuning_20_epoch_dog_cat_googlenet.pkl')\r\n\r\n#测试\r\nmodel = torch.load('fine_tuning_20_epoch_dog_cat_googlenet.pkl')\r\nmodel = model.cuda()\r\npath = r'C:\\Users\\caeit\\Desktop\\binru7_1\\test1'\r\ntest_data = DogCat(path,train=False,test=True)\r\ntest_dataloader = DataLoader(test_data,batch_size=16,shuffle=False,num_workers=0)\r\nresult = []\r\nstart = datetime.datetime.now()\r\nfor num, image in enumerate(test_dataloader):\r\n x_train, y_train = image\r\n x_train = x_train.cuda()\r\n y_train = y_train.cuda()\r\n output = model(x_train)\r\n _, predicted = torch.max(output, 1)\r\n result.append(predicted)\r\nprint(datetime.datetime.now()-start)\r\nlabel = []\r\nfor i in result:\r\n i = i.cpu()\r\n for j in i.data.numpy():\r\n label.append(j)\r\nfor j,i in enumerate(label):\r\n if i == 0:\r\n label[j] = 0.05\r\n else:\r\n label[j] = 0.95\r\nid = [i for i in range(1,12501)]\r\ndataframe = pd.DataFrame({'id':id,'label':label})\r\ndataframe.to_csv(\"Submission.csv\",index=False, sep=',')","sub_path":"googlenet.py","file_name":"googlenet.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"221927740","text":"def fatorial(numero, show=False):\n \"\"\"\n -> Calcula o Fatorial de um número\n :param numero: O valor do fatorial a ser calculado\n :param show: (opcional) Mostrar ou não o calculo\n :return: O valor fatorial da variavel numero \n \"\"\"\n f = 1\n for i in range(numero, 0, -1):\n if show:\n if i == 1:\n print(f'{i} = ', end='')\n else:\n print(f'{i} x ', end='')\n f *= i\n return f\n\n\nprint('--' * 30)\nprint(fatorial(7, True))\nhelp(fatorial)\n","sub_path":"mundo3/exercicios/102.py","file_name":"102.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"322104456","text":"\"\"\"\nThis script runs the conformance tests to validate the implementation.\n\"\"\"\nimport os.path, tempfile\nfrom arelle.tests import TestCntlr, check_and_setup, make_checker\n\ndef xbrl_test():\n dirpath=tempfile.gettempdir()\n short_name = \"XBRL-CONF-CR4-2008-07-02.zip\"\n url = \"http://www.xbrl.org/2008/XBRL-CONF-CR4-2008-07-02.zip\"\n svn_repo = \"http://publicsvn.xbrl.org/svn/public/base-specification-conformance/2008-07-02/\"\n index = \"XBRL-CONF-CR4-2008-07-02/xbrl.xml\"\n file_name = os.path.join(dirpath, short_name)\n check_and_setup(file_name, url, dirpath, short_name)\n for index, test, variation in TestCntlr().run(os.path.join(dirpath, index), False, False, True):\n yield(make_checker(\"XBRL\", test, variation))\n ","sub_path":"arelle/tests/xbrl_conformance_test.py","file_name":"xbrl_conformance_test.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"320141982","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 5 15:33:07 2019\n\n@author: TAPAN\n\nCode Challenge:\ndataset: BreadBasket_DMS.csv\n\nQ1. In this code challenge, you are given a dataset which has data and time wise transaction on \na bakery retail store.\n1. Draw the pie chart of top 15 selling items.\n2. Find the associations of items where min support should be 0.0025, min_confidence=0.2, min_lift=3.\n3. Out of given results sets, show only names of the associated item from given result row wise.\n\n\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nbasket = pd.read_csv('BreadBasket_DMS.csv') \n\n# 1.Draw the pie chart of top 15 selling items.\nbasket[basket['Item']=='NONE']=np.nan\n\nvalues=basket['Item'].value_counts()[:15]\nplt.pie(values,labels=values.index,autopct='%1.1f%%',radius=2)\n\n# 2.\ngrpd_items=pd.Series(basket.groupby('Transaction')['Item'])\n\ntransactions=[]\n\nfor i in range(0,9465):\n transactions.append(list(grpd_items[i][1]))\n \nfrom apyori import apriori\n\nrules = apriori(transactions, min_support = 0.0025, min_confidence = 0.2, min_lift = 3)\n\nresults=list(rules)\n\n\nfor item in results:\n\n # first index of the inner list\n # Contains base item and add item\n pair = item[0] \n items = [x for x in pair]\n print(\"Rule: \" + items[0] + \" -> \" + items[1])\n\n #second index of the inner list\n print(\"Support: \" + str(item[1]))\n\n #third index of the list located at 0th\n #of the third index of the inner list\n\n print(\"Confidence: \" + str(item[2][0][2]))\n print(\"Lift: \" + str(item[2][0][3]))\n print(\"=====================================\")\n\n# 3. to access seperate items\nIst_item=list(results[0][0])[0]\n\nIIst_item=list(results[0][0])[1]\n","sub_path":"Day 23/Work/breadbasket.py","file_name":"breadbasket.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"334185331","text":"from collections import Counter\nfrom datetime import datetime\nimport re\nimport textwrap\nfrom dateutil.parser import parse\nfrom googleapiclient import discovery\nfrom googleapiclient.http import build_http\nfrom logging import warning\nfrom logging import error\nfrom oauth2client.client import HttpAccessTokenRefreshError\nfrom PIL import Image\nfrom PIL.ImageDraw import Draw\n\nfrom firestore import DataError\nfrom firestore import GoogleCalendarStorage\nfrom graphics import draw_text, calculate_character_widths\nfrom graphics import SUBVARIO_CONDENSED_MEDIUM\nfrom content import ContentError\nfrom content import ImageContent\nfrom local_time import LocalTime\n\n# The name of the Google Calendar API.\nAPI_NAME = 'calendar'\n\n# The Google Calendar API version.\nAPI_VERSION = 'v3'\n\n# The ID of the calendar to show.\nCALENDAR_ID = 'primary'\n\n# The color of the image background.\nBACKGROUND_COLOR = (255, 255, 255)\n\n# The color used for the current day and events.\nBLACK_COLOR = (0, 0, 0)\nRED_COLOR = (255, 0, 0)\n\nLEFT_MARGIN = 60\n\n# The maximum number of events to show.\nMAX_EVENTS = 3\n\nDAY_FONT = SUBVARIO_CONDENSED_MEDIUM.copy()\nDAY_FONT['size'] = 90\nDAY_FONT['height'] = 96\nDAY_FONT['width_overrides'] = {}\n\nCAL_FONT = SUBVARIO_CONDENSED_MEDIUM.copy()\nCAL_FONT['size'] = 72\nCAL_FONT['height'] = 68\nCAL_FONT['width_overrides'] = {}\n\n\nclass GoogleCalendarMeetings(ImageContent):\n \"\"\"A daily calendar backed by the Google Calendar API.\"\"\"\n\n def __init__(self, geocoder):\n self._local_time = LocalTime(geocoder)\n\n def _upcomming_events(self, time, user):\n \"\"\"Retrieves the upcomming events using the Google Calendar API.\"\"\"\n\n # Create an authorized connection to the API.\n storage = GoogleCalendarStorage(user.id)\n credentials = storage.get()\n if not credentials:\n error('No valid Google Calendar credentials.')\n return Counter()\n authed_http = credentials.authorize(http=build_http())\n service = discovery.build(API_NAME, API_VERSION, http=authed_http,\n cache_discovery=False)\n\n # Process calendar events for each day of the current month.\n time_max = time.replace(hour=23, minute=59, second=0,\n microsecond=0)\n # Request this month's events.\n request = service.events().list(calendarId=CALENDAR_ID,\n timeMin=time.isoformat(),\n timeMax=time_max.isoformat(),\n singleEvents=True,\n pageToken=None,\n orderBy=\"startTime\",\n maxResults=10)\n\n try:\n response = request.execute()\n except HttpAccessTokenRefreshError as e:\n warning('Google Calendar request failed: %s' % e)\n return []\n\n # Sort by start time, end time\n response['items'].sort(key=lambda x: (x['start']['dateTime'], x['end']['dateTime']))\n \n return response['items']\n \n def _strip_emojis(self, text):\n regrex_pattern = re.compile(pattern = \"[\"\n \"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n \"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n \"\\U0001F600-\\U0001F64F\" # emoticons\n \"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n \"\\U0001F700-\\U0001F77F\" # alchemical symbols\n \"\\U0001F780-\\U0001F7FF\" # Geometric Shapes Extended\n \"\\U0001F800-\\U0001F8FF\" # Supplemental Arrows-C\n \"\\U0001F900-\\U0001F9FF\" # Supplemental Symbols and Pictographs\n \"\\U0001FA00-\\U0001FA6F\" # Chess Symbols\n \"\\U0001FA70-\\U0001FAFF\" # Symbols and Pictographs Extended-A\n \"\\U00002702-\\U000027B0\" # Dingbats\n \"\\U000024C2-\\U0001F251\" \n \"]+\", flags = re.UNICODE)\n return regrex_pattern.sub(r'',text)\n\n \n def image(self, user, size):\n \"\"\"Generates an image with a calendar view.\"\"\"\n\n # Show a calendar relative to the current date.\n try:\n time = self._local_time.now(user)\n except DataError as e:\n raise ContentError(e)\n\n # Create a blank image.\n image = Image.new(mode='RGB', size=size,\n color=BACKGROUND_COLOR)\n draw = Draw(image)\n\n\n y = 80 \n\n draw_text(time.strftime('%A, %B %d'), DAY_FONT, BLACK_COLOR,\n xy=(LEFT_MARGIN, y), image=image, align='left')\n \n y += 100\n\n upcomming_events = self._upcomming_events(time, user)\n \n count = 0\n for event in upcomming_events:\n \n declined = False\n for attendee in event.get('attendees', []):\n if attendee.get('self', False) is True \\\n and attendee.get('responseStatus') == 'declined':\n declined = True\n \n if declined:\n continue\n\n event_time = datetime.fromisoformat(event['start']['dateTime'])\n draw_text(event_time.strftime('%H:%M'), CAL_FONT, RED_COLOR,\n xy=(LEFT_MARGIN, y), image=image, align='left')\n \n\n available_width = size[0] - LEFT_MARGIN - 180 - 20\n\n text = self._strip_emojis(event['summary']).strip()\n character_widths = calculate_character_widths(text, CAL_FONT, draw)\n \n textlines = []\n if sum(character_widths) < available_width:\n textlines = [text]\n else:\n num_char_per_line = available_width / (sum(character_widths) / len(character_widths))\n textlines = textwrap.wrap(text, width=num_char_per_line)\n\n\n for i, line in enumerate(textlines[:3]):\n if i == 2 and len(textlines) > 3:\n line += \"...\"\n \n draw_text(line, CAL_FONT, BLACK_COLOR,\n xy=(LEFT_MARGIN + 180, y), image=image, align='left')\n y += CAL_FONT['height']\n\n \n y += 20\n count += 1\n\n if y > size[1] - 100:\n break\n\n if count == MAX_EVENTS:\n break\n \n return image\n","sub_path":"server/google_calendar_meetings.py","file_name":"google_calendar_meetings.py","file_ext":"py","file_size_in_byte":6420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"245388491","text":"# from django.shortcuts import render\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom .models import UserIP, UserInterviewInfo\nfrom .serializers import UserIPSerializer\nfrom apps.article.models import Article\n\nfrom collections import Counter\nimport datetime\n# Create your views here.\n\n\n@api_view(http_method_names=['GET'])\ndef interview_count(request):\n \"\"\"\n 访问数量统计,返回网站访问总数量,访问网站的IP总数量\n \"\"\"\n users = UserIP.objects.all()\n \n if not users.exists():\n return Response({'count': 0})\n\n count = sum([user.count for user in users])\n\n return Response({'count': count, \"IP_count\": users.count()})\n\n@api_view(http_method_names=['GET'])\ndef one_day_visits(request):\n \"\"\"\n 一天的访问量\n \"\"\"\n date = request.query_params.get('date')\n\n if not date:\n # 未传入时间,默认为当前时间\n date = datetime.datetime.now()\n date = date.strftime('%Y-%m-%d')\n year, month, day = date.split('-')\n users = UserInterviewInfo.objects.filter(interview_time__year=year, interview_time__month=month, interview_time__day=day)\n ip_count_set = set(users.values_list('ip', flat=True))\n\n return Response({'count': users.count(), \"IP_count\": len(ip_count_set)})\n\n\n@api_view(http_method_names=['GET'])\ndef article_read_count(request):\n \"\"\"\n 文章的访问数量\n \"\"\"\n article = request.query_params.get('article', \"\")\n\n if not article:\n return Response(\"为传入文章ID\")\n\n path = \"/api/blogv2/articles/{}/\".format(article)\n\n users = UserInterviewInfo.objects.filter(interview_url=path)\n \n return Response({'count': users.count()})\n\n\n@api_view(http_method_names=['GET'])\ndef today_read_article(request):\n \"\"\"\n 今天被访问的文章\n \"\"\"\n today = datetime.datetime.now()\n year, month, day = today.strftime('%Y-%m-%d').split('-')\n path = r'/api/blogv2/articles/[0-9]*/'\n\n users = UserInterviewInfo.objects.filter(interview_time__year=year, interview_time__month=month, interview_time__day=day)\n urls = users.filter(interview_url__regex=path).values_list('interview_url', flat=True)\n\n # url_dict = dict()\n # for url in urls:\n # if url in url_dict.keys():\n # url_dict[url] += 1\n # else:\n # url_dict[url] = 1\n\n # 使用Counter()也可以实现上述功能\n url_dict = Counter(urls)\n\n url_list = list()\n for k, v in url_dict.items():\n res = dict()\n res['url'] = k\n res['count'] = v\n res['id'] = k.split('/')[-2]\n art = Article.objects.filter(pk=res['id']).values_list('title', flat=True)\n res['title'] = art[0] if art else \"\"\n\n url_list.append(res)\n\n return Response(url_list)\n\n\n@api_view(http_method_names=['GET'])\ndef hot_ip(request):\n \"\"\"\n 访问次数最多的IP\n \"\"\"\n ip = UserIP.objects.all().order_by('-count')\n num = ip.count()\n\n if num > 5:\n num = 5\n\n ser = UserIPSerializer(instance=ip[:num], many=True)\n\n return Response(ser.data)","sub_path":"apps/user_statistics/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"371422961","text":"import dbf\nfrom NHDPlus_Extractor_Class import NHDPlusExtractor\nimport os\nfrom shapefile import Reader,Writer\nfrom osgeo import ogr\n### PlusFlowlinVAA files are encoded in cp1252\n###\n\n\n\nx = NHDPlusExtractor(r'C:\\Users\\User\\Data')\n\nb = [21376788, (2012, 7, 12), 4, 2, 2, 720048542, 720102010, 720026398, 720026398, 0.0, 720026398, 22.984, 0, 0, 1, 0, 0, 720026398, 720050315, 0, 0, 0, 0, 0.0, 100.0, '15080303000101', 0.971, 46003, 0, 0, 0, 0, 0, 0.6444, 21.0942, 13.3308, 0, 0.0488124732414, '']\nprint(len(b))\nprint([i for i in range(len(b))])\na=0\nz=0\n\n\nflowattributes = [['COMID', 'N', 9, 0], ['HYDROSEQ', 'N', 11, 0], ['UPHYDROSEQ','N', 11, 0],\n ['DNHYDROSEQ','N', 11, 0], ['REACHCODE', 'C', 14, 0],['AREASQKM', 'N', 15, 6],\n ['TOTDASQKM','N', 15, 6], ['DIVDASQKM','N', 15, 6]]\nslopeattributes = ['COMID', 'MAXELEVSMO', 'MINELEVSMO','SLOPELENKM']\neromattributes = ['COMID', 'Q0001E', 'V0001E', 'SMGAGEID']\n\ndef read_dbf(source, comids=None, attributes = None,verbose=True):\n mydbf = open(source,'rb')\n sf = Reader(dbf=mydbf)\n print(sf.fields)\n w = Writer()\n w.fields = attributes\n fields = sf.fields[1:]\n fields = [item[0].upper() for item in fields]\n fieldsindex = {}\n\n #iterate over the dbf file accessing the records using attributes as a fields\n ##query. if attributes is none return all the records for all the fields\n if attributes is None: attributes = fields\n\n\n for attribute in attributes:\n print(attribute)\n y = [pos for pos,j in enumerate(fields) if attribute[0] == j]\n print(y)\n fieldsindex[attribute[0]] = y[0]\n\n for rec in enumerate(sf.records()):\n pass\n\n\n print(fieldsindex.keys())\nread_dbf(r\"C:\\Users\\User\\Data\\NHDPlusCO\\NHDPlus15\\NHDPlusAttributes\\PlusFlowlineVAA.dbf\",attributes=flowattributes)\n\n\n# for DA in x.DA_to_VPU.keys():\n# # print(x.DA_to_VPU[DA])\n# # print(len(x.DA_to_VPU[DA]))\n# for i in range(len(x.DA_to_VPU[DA])):\n# DAPATH = r'{}\\NHDPlus{}'.format(x.destination,DA)\n# VAAPATH = r'{}\\{}{}\\{}\\{}'.format(DAPATH,'NHDPlus',x.DA_to_VPU[DA][i],\n# 'NHDPlusAttributes','PlusFlowlineVAA.dbf')\n# mydbf = open(VAAPATH,'rb')\n# sf = Reader(dbf=mydbf)\n#\n# fields = sf.fields[1:]\n# fields = [item[0].upper() for item in fields]\n#\n# for attribute in flowattributes:\n#\n# y = [pos + 1 for pos,j in enumerate(fields) if attribute == j]\n# print(y, attribute)\n\n\n# mydbf = open(r\"C:\\Users\\User\\Data\\NHDPlusCO\\NHDPlus15\\NHDPlusAttributes\\PlusFlowlineVAA.dbf\",'rb')\n# sf = Reader(dbf=mydbf)\n# w = Writer()\n# w.fields = sf.fields\n# fields = sf.fields[1:]\n# fields = [item[0].upper()for item in fields]\n# print(fields)\n# for attribute in flowattributes:\n#\n# y = [pos + 1 for pos,j in enumerate(fields) if attribute == j ]\n# print(y, attribute)\n#\n# if y is not []:\n\n# for attribute in flowattributes:\n# for pos, field in enumerate(sf.fields)\n\n\n\n# print(sf.fields)\n# print(len(sf.fields))\n# # for b in range(len(sf.fields)):\n# # for attribute in flowattributes:\n# # if attribute == sf.fields[b][0].upper():\n# # y = sf.fields[b]\n#\n#\n#\n# for rec in enumerate(sf.records()):\n# print(rec)\n# print(fields)\n# mydbf.close()\n# def readdbf(source, attributes=None, comids=None, verbose=True):\n# sourcedbf = open(source,'rb')\n# sf = Reader(dbf=sourcedbf)\n# for attribute in range(len(sf.fields)):\n# fields.append(sf.fields[attribute][0].upper())\n#\n# for attribute in fields:\n# if attribute == 'COMID':\n# comid_index = fields.index(attribute)\n# elif attribute == 'HYDROSEQ':\n# hyd\n\n\n\n# for DA in x.DA_to_VPU.keys():\n# # print(x.DA_to_VPU[DA])\n# # print(len(x.DA_to_VPU[DA]))\n# for i in range(len(x.DA_to_VPU[DA])):\n# DAPATH = r'{}\\NHDPlus{}'.format(x.destination,DA)\n# VAAPATH = r'{}\\{}{}\\{}\\{}'.format(DAPATH,'NHDPlus',x.DA_to_VPU[DA][i],\n# 'NHDPlusAttributes','PlusFlowlineVAA.dbf')\n# mydbf = open(VAAPATH,'rb')\n# sf = Reader(dbf=mydbf)\n#\n# for b in range(len(sf.fields)):\n# fields.append(sf.fields[b][0].upper())\n#\n# a+=1\n# print(a)\n# print([x for x in fields])\n#\n# for attribute in flowattributes:\n# if attribute\n# # w.fields = list(sf.fields)\n# # records = sf.records()\n# # for row in records:\n# # args = row\n# # w.record(*args)\n# # w.save(VAAPATH)\n# # mydbf.close()\n#\n#\n#\n#\n# # w = Writer()\n# # for b in range(1,len(sf.fields)):\n# # sf.fields[b][0] = sf.fields[b][0].upper()\n# # w.fields = list(sf.fields)\n# # w.save('VAAPATH')\n# # mydbf.close()\n# # print(w.fields)\n#\n#\n# # for i in range(len(x.DA_to_VPU[DA])):\n# # DAPATH = '{}\\\\NHDPlus{}'.format(x.destination,DA)\n# # ELEVSLOPEVPATH = '{}\\\\{}{}\\\\{}\\\\{}'.format(DAPATH,'NHDPlus',x.DA_to_VPU[DA][i],\n# # 'NHDPlusAttributes','elevslope.dbf')\n# #\n# # table = dbf.Table(ELEVSLOPEVPATH)\n# # print(table)\n# # z+=1\n# # print(z)\n# #\n# # for i in range(len(x.DA_to_VPU[DA])):\n# # DAPATH = '{}\\\\NHDPlus{}'.format(x.destination,DA)\n# # EROMPATH = '{}\\\\{}{}\\\\{}\\\\{}'.format(DAPATH,'NHDPlus',x.DA_to_VPU[DA][i],\n# # 'EROMExtension','EROM_MA0001.DBF')\n# # if os.path.isfile(EROMPATH):\n# # table = dbf.Table(EROMPATH)\n# # print(table)\n# # a+=1\n# # print(a)\n","sub_path":"NHDPlus_Extractor/shapes.py","file_name":"shapes.py","file_ext":"py","file_size_in_byte":5738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"210143335","text":"description = [\n '1. Реверс числа: ',\n '2. Перевод из 10чной в любую другую (N, base): ',\n '3. Ход конём (x1, x2, y1, y2): ']\n\nprint(input(description[0])[::-1])\n\nfrom string import printable as alphabet\ndef dec_to_base(N=0, base=100):\n x, y = divmod(N, base)\n return dec_to_base(x, base) + alphabet[y] if x else alphabet[y]\nprint(dec_to_base(*list(map(int, input(description[1]).split()))[:2]))\n\ndef horse(x1=0, x2=0, y1=0, y2=0):\n return \"Y\" if (x1 - x2) ** 2 + (x1 - x2) ** 2 == 5 else \"N\"\nprint(horse(*list(map(int, input(description[2]).split()))[:4]))","sub_path":"AVEENTROPY.py","file_name":"AVEENTROPY.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"410948724","text":"# coding: utf-8\n# Repetições While\n# Validadação de Dados\n\n# 1 modo\ns = \"MmFf\"\nwhile True:\n sexo = input(\"Informe seu sexo [M/F] \")\n if (sexo in s):\n print(\"Sexo {} registrado com sucesso.\".format(sexo.upper()))\n break\n print(\"Dados inválidos. Tente novamente\")\n\n# 2 modo\nsexo = \"\"\nwhile sexo != \"M\" and sexo != \"F\":\n sexo = input(\"Informe seu sexo [M/F]\").upper()\n print(\"Dados inválidos. Tente novamente\")\nprint(\"Sexo {} registrado com sucesso.\".format(sexo.upper()))\n\n","sub_path":"mundo2/exercicio57.py","file_name":"exercicio57.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"500371952","text":"# https://leetcode.com/problems/3sum/\nclass Solution:\n def threeSum(self, nums: List[int]) -> List[List[int]]:\n res = []\n nums.sort()\n for i, n in enumerate(nums):\n if n > 0:\n break\n if i == 0 or nums[i-1] != nums[i]:\n self.twoSum(i, res, nums)\n return res\n\n def twoSum(self, i, res, nums):\n j = i + 1\n seen = set()\n while j < len(nums):\n c = -nums[j] - nums[i]\n if c in seen:\n res.append([nums[i], nums[j], c])\n while j + 1 < len(nums) and nums[j] == nums[j + 1]:\n j += 1\n seen.add(nums[j])\n j += 1\n","sub_path":"problems/3Sum.py","file_name":"3Sum.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"635013481","text":"'''\nCreated on 19-Dec-2013\n\n@author: ganesh\n'''\n\nimport horizon\n\n# Rename \"User Settings\" to \"User Options\"\nproject = horizon.get_dashboard(\"project\")\nadmin = horizon.get_dashboard(\"admin\")\ncnext = horizon.get_dashboard(\"cnext\")\nwangle = horizon.get_dashboard(\"wangle\")\namazon = horizon.get_dashboard(\"amazon\")\nhpcloud = horizon.get_dashboard(\"hpcloud\") \n\n\npermissions = list(getattr(project, 'permissions', []))\npermissions.append('openstack')\nproject.permissions = tuple(permissions)\n\nvolume_panel = project.get_panel(\"volumes\")\nvolume_panel.permissions = tuple()\n\nnetwork_panel = project.get_panel(\"networks\")\nnetwork_panel.permissions = tuple()\n\npermission = list(getattr(admin, 'permissions', []))\npermission.append('openstack')\nadmin.permissions = tuple(permission)\n\n\npermission = list(getattr(cnext, 'permissions', []))\npermission.append('netjson')\ncnext.permissions = tuple(permission)\n\npermission = list(getattr(amazon, 'permissions', []))\npermission.append('amazon')\namazon.permissions = tuple(permission)\n\npermission = list(getattr(hpcloud, 'permissions', []))\npermission.append('hpcloud')\nhpcloud.permissions = tuple(permission)\n","sub_path":"cloud_mongo/overrides.py","file_name":"overrides.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"650943617","text":"#this script transforms word embeddings between file formats. See CLI arguments list for available formats.\n\n#python ModelTypeTransform.py --input_file_type wordvectors --input_file ..\\Models\\Word2vec_model\\word2vec_5_200_sg.wordvectors --output_file_type txt --output_file ..\\Models\\Word2vec_model\\word2vec_5_200_sg.txt\n#python ModelTypeTransform.py --input_file_type vec --input_file ..\\tf-morphotagger-master\\embeddings\\fasttext_baseline_300.vec --output_file_type wordvectors --output_file ..\\Models\\FastText_model\\fasttext_baseline_300.wordvectors\n#python ModelTypeTransform.py --input_file_type vec --input_file ..\\ngram2vec-master\\outputs\\combined_clean_corpus\\ngram_ngram\\sgns\\ng2v_5_200_sg.output --output_file_type txt --output_file ..\\Models\\Ngram2vec_model\\ngram2vec_5_200_sg.txt\n\nimport argparse\n#import fasttext\nfrom gensim.models import Word2Vec, KeyedVectors\n\ndef main():\n #CLI arguments\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--input_file_type\", type=str, required=True, help=\"[bin|txt|wordvectors|vec|model]\")\n parser.add_argument(\"--input_file\", type=str, default='', help=\"Path to the input file including extention.\")\n parser.add_argument(\"--output_file_type\", type=str, required=True, help=\"[bin|txt|wordvectors]\")\n parser.add_argument(\"--output_file\", type=str, default='', help=\"Path to the output file including extention.\")\n args = parser.parse_args()\n\n word_vectors = None\n\n #load model of file type\n if args.input_file_type == \"bin\":\n word_vectors = KeyedVectors.load_word2vec_format(args.input_file, binary=True, encoding='utf-8')\n elif args.input_file_type == \"txt\":\n word_vectors = KeyedVectors.load_word2vec_format(args.input_file, binary=False, encoding='utf-8')\n elif args.input_file_type == \"wordvectors\":\n word_vectors = KeyedVectors.load(args.input_file)\n elif args.input_file_type == \"vec\": #works for original fasttext vec and bin pretrained. Also for ngram2vec .output files\n word_vectors = KeyedVectors.load_word2vec_format(args.model_file, binary=False, encoding='utf-8')\n elif args.input_file_type == \"model\":\n word_vectors = Word2Vec.load(args.input_file).wv\n else:\n print(\"input_file_type not supported!\")\n return\n print(\"model loaded!\")\n\n #save model of file type\n if args.output_file_type == \"bin\":\n word_vectors.save_word2vec_format(args.output_file, binary=True)\n elif args.output_file_type == \"txt\":\n word_vectors.save_word2vec_format(args.output_file, binary=False)\n elif args.output_file_type == \"wordvectors\":\n word_vectors.save(args.output_file)\n else:\n print(\"output_file_type not supported!\")\n return\n print(\"model saved!\")\n\nif __name__ == '__main__':\n main()","sub_path":"ModelTypeTransform.py","file_name":"ModelTypeTransform.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"221224066","text":"# interval list .tsv file column names\ncontig_column_name = \"CONTIG\"\nstart_column_name = \"START\"\nend_column_name = \"END\"\ncount_column_name = \"COUNT\"\n\n# prefix for saving posteriors for multiple samples\nsample_folder_prefix = \"SAMPLE_\"\n\n# log copy number posterior matrix column name prefix for each integer copy number state\ncopy_number_column_prefix = \"COPY_NUMBER_\"\n\n# ploidy prior table header column names\nploidy_prior_contig_name_column = \"CONTIG_NAME\"\nploidy_prior_prefix = \"PLOIDY_PRIOR_\"\n\n# column names for ploidy and depth .tsv outputs\nsample_name_column_name = \"SAMPLE_NAME\"\nglobal_read_depth_column_name = \"GLOBAL_READ_DEPTH\"\naverage_ploidy_column_name = \"AVERAGE_PLOIDY\"\nploidy_column_name = \"PLOIDY\"\nploidy_gq_column_name = \"PLOIDY_GQ\"\n\n# column names for copy-number segments file\ncall_copy_number_column_name = \"CALL_COPY_NUMBER\"\nnum_points_column_name = \"NUM_POINTS\"\nquality_some_called_column_name = \"QUALITY_SOME_CALLED\"\nquality_all_called_column_name = \"QUALITY_ALL_CALLED\"\nquality_start_column_name = \"QUALITY_START\"\nquality_end_column_name = \"QUALITY_END\"\n\n# column name for baseline copy-number file\nbaseline_copy_number_column_name = \"BASELINE_COPY_NUMBER\"\n\n# column name for denoised copy-number files\ndenoised_copy_ratio_mean_column_name = \"DENOISED_COPY_RATIO_MEAN\"\ndenoised_copy_ratio_std_column_name = \"DENOISED_COPY_RATIO_STD\"\n\n# regular expression for matching sample name from header comment line\nsample_name_header_regexp = \"^@RG.*SM:(.*)[\\t]*.*$\"\n\n# prefix for adding sample name as a header comment line\nsample_name_sam_header_prefix = \"RG\\tID:GATKCopyNumber\\tSM:\"\n\ndefault_comment_char = \"@\"\ndefault_delimiter_char = \"\\t\"\n\n# default file names for loading and saving models, posteriors, and configurations\ndefault_sample_read_depth_tsv_filename = \"global_read_depth.tsv\"\ndefault_sample_name_txt_filename = \"sample_name.txt\"\ndefault_sample_contig_ploidy_tsv_filename = \"contig_ploidy.tsv\"\ndefault_copy_number_log_posterior_tsv_filename = \"log_q_c_tc.tsv\"\ndefault_copy_number_log_emission_tsv_filename = \"log_c_emission_tc.tsv\"\ndefault_class_log_posterior_tsv_filename = \"log_q_tau_tk.tsv\"\ndefault_baseline_copy_number_tsv_filename = \"baseline_copy_number_t.tsv\"\ndefault_copy_number_segments_tsv_filename = \"copy_number_segments.tsv\"\ndefault_denoised_copy_ratios_mean_tsv_filename = \"denoised_copy_ratios_mu.tsv\"\ndefault_denoised_copy_ratios_std_tsv_filename = \"denoised_copy_ratios_std.tsv\"\n\ndefault_denoising_config_json_filename = \"denoising_config.json\"\ndefault_calling_config_json_filename = \"calling_config.json\"\ndefault_ploidy_config_json_filename = \"ploidy_config.json\"\ndefault_gcnvkernel_version_json_filename = \"gcnvkernel_version.json\"\n\ndefault_interval_list_filename = \"interval_list.tsv\"\ndefault_contig_ploidy_prior_tsv_filename = \"contig_ploidy_prior.tsv\"\n\ndefault_adamax_m_filename = \"adamax_m.npy\"\ndefault_adamax_u_filename = \"adamax_u.npy\"\ndefault_adamax_res_filename = \"adamax_res.npy\"\n","sub_path":"src/main/python/org/broadinstitute/hellbender/gcnvkernel/io/io_consts.py","file_name":"io_consts.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"428415552","text":"'''\n Preprocessing and Transformation\n'''\n\n#앞서 우리는 SVM과 뉴럴 네트워크같은 몇며 알고리즘은 데이터의 스케일링에 민감하다는 것을 보았다.\n#여기서는 간단한 데이터 가공을 살펴볼 것이다.\n\nimport mglearn\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.model_selection import train_test_split\ncancer = load_breast_cancer()\n\nmglearn.plots.plot_scaling()\nplt.show()\n #그림의 x값은 10~15 사이, y값은 1~9 사이에 있다.\n #그림은 데이터를 변형하는 4가지 방법을 보여준다.\n\n\n#1. Different Kinds of Preprocessing\n\n #1) StandardScaler\n #scikit-learn의 StandardScaler는 각 feature마다 평균이 0이고 분산이 1이 되도록 한다.\n #그리고 모든 feature를 같은 크기로 만든다.\n #하지만 이는 각 feature마다 특정 최소값이나 최대값을 지정하지 않는다.\n\n #2) RobustScaler\n #RobustScaler는 Standard와 비슷하지만, 얘는 median(중위값)과 quartiles(분위값)을 사용한다.\n #이는 특이점(outlier)에 있는 수치들을 무시하도록 만들어준다.\n\n #3) MinMaxScaler\n #MinMaxScaler는 모든 값들이 정확히 0과 1 사이에 있도록 데이터를 움직인다.\n\n #4) Normalizer\n #Normalizer는 약간 다른 방식의 스케일링이다.\n #이는 feature 벡터가 1의 길이(Euclidean)를 갖도록 각 데이터를 재조정한다.\n #다시 말해서, 한 데이터를 반경 1인 원(혹은 구)에 투사한다.\n #이는 각 데이터가 서로 다른 숫자에 의해 스케일 된다는 의미이다.\n #이 정규화는 feature 벡터의 크기가 아닌, 데이터의 방향만이 중요할 때 쓰��다.\n\n\n\n#2. Applying Data Transformations\n #이제 위의 데이터들을 실제 데이터에 적용시켜보자.\n\nX_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target, random_state=1)\nprint(X_train.shape)\nprint(X_test.shape)\nfrom sklearn.preprocessing import MinMaxScaler\n\nscaler = MinMaxScaler()\nscaler.fit(X_train)\n\n#transform train data\nX_train_scaled = scaler.transform(X_train)\n\n#print dataset properties before and after scaling\nprint(\"transformed shape: {}\".format(X_train_scaled.shape))\nprint(\"per-feature minimum before scaling:\\n {}\".format(X_train.min(axis=0)))\nprint(\"per-feature maximum before scaling:\\n {}\".format(X_train.max(axis=0)))\nprint(\"per-feature minimum after scaling:\\n {}\".format(X_train_scaled.min(axis=0)))\nprint(\"per-feature maximum after scaling:\\n {}\".format(X_train_scaled.max(axis=0)))\n\n\n\n#transform test data\nX_test_scaled = scaler.transform(X_test)\n#print test data properties after scaling\nprint(\"per-feature minimum after scaling:\\n{}\".format(X_test_scaled.min(axis=0)))\nprint(\"per-feature maximum after scaling:\\n{}\".format(X_test_scaled.max(axis=0)))\n #여기서 값이 0과 1을 벗어나는 것을 알 수 있다.\n #이는 MinMaxScaler가 항상 같은 형태의 변형을 트레이닝과 테스트 셋에 시행하기 때문이다.\n #트레이닝 셋의 min 값을 빼서 범위로 나누기 때문에, 테스트 셋에는 차이가 있다.\n\n\n\n\n#3. Scaling Training and Test Data the Same way\n #테스트 셋에 실험하기 위해서는 정확히 같은 방식의 transform을 취하는 게 중요하다.\n #위와 달리 테스트 셋의 방식을 사용했으면 아래와 같은 결과가 나타난다.\n #즉, 트레이닝 셋의 Scaler를 그대로 쓰는게 맞다는 말!!!\n\nfrom sklearn.datasets import make_blobs\n# make synthetic data\nX, _ = make_blobs(n_samples=50, centers=5, random_state=4, cluster_std=2)\n# split it into training and test sets\nX_train, X_test = train_test_split(X, random_state=5, test_size=.1)\n\n#plot the training and test set\nfig, axes = plt.subplots(1, 3, figsize=(13,4))\n\naxes[0].scatter(X_train[:,0], X_train[:,1], c=mglearn.cm2(0), label=\"Training set\", s=60)\naxes[0].scatter(X_test[:,0], X_test[:,1], marker='^', c=mglearn.cm2(1), label=\"Test set\", s=60)\naxes[0].legend(loc='upper left')\naxes[0].set_title(\"Original Data\")\n\n#scale the data using MinMaxScaler\nscaler = MinMaxScaler()\nscaler.fit(X_train)\nX_train_scaled = scaler.transform(X_train)\nX_test_scaled = scaler.transform(X_test)\n\n#visualize the properly scaled data\naxes[1].scatter(X_train_scaled[:,0], X_train_scaled[:,1], c=mglearn.cm2(0), label=\"Training set\", s=60)\naxes[1].scatter(X_test_scaled[:,0], X_test_scaled[:,1], marker='^', c=mglearn.cm2(1), label=\"Test set\", s=60)\naxes[1].set_title(\"Scaled Data\")\n\n#rescale the test set separately\n#so test set min is 0 and test set max is 1\n#DO NOT DO THIS! For illustration purposes only.\ntest_scaler = MinMaxScaler()\ntest_scaler.fit(X_test)\nX_test_scaled_badly = test_scaler.transform(X_test)\n\n# visualize wrongly scaled data\naxes[2].scatter(X_train_scaled[:,0], X_train_scaled[:,1], c=mglearn.cm2(0), label=\"training set\", s=60)\naxes[2].scatter(X_test_scaled_badly[:,0], X_test_scaled_badly[:,1], marker='^', c=mglearn.cm2(1), label=\"test set\", s=60)\naxes[2].set_title(\"Improperly Scaled Data\")\n\nfor ax in axes:\n ax.set_xlabel(\"Feature 0\")\n ax.set_ylabel(\"Feature 1\")\n\nplt.show()\n #두 번째와 첫 번째 그림이 완벽히 같은 것을 알 수 있다.(단위를 제외하고)\n #두 번째 그림에서,\n #트레이닝 셋은 0과 1의 최소 최대값을,\n #테스트 셋은 이를 지키지 않는 값을 가지고 있다.\n #세 번째 그림은 테스트 셋의 위치가 잘못된 것을 알 수 있다.\n #그러니까 저렇게 하지마!\n\n'''\nShortCut\nscaler = StandardScaler()\n1) X_scaled = scaler.fit(X).transform(X)\n2) X_scaled_onlyfortraining = scaler.fit_transform(X)\n1은 메서드 체이닝 방식이고, 2는 transform 메서드를 가진 모든 모델이 가지고 있는 메서드이다. 트레이닝 셋을 변형할 때는 2가 좋다.\n'''\n\n\n\n\n#4. Effect of preprocessing on Supervised learning\n #이제 그 차이를 알았으니, 다시 cancer 데이터로 돌아가자.\n #먼저 가공하지 않은 데이터에 SVM을 적용한다.\n\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import StandardScaler\n\nX_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target, random_state=0)\n\nsvm = SVC(C=100)\nsvm.fit(X_train,y_train)\nprint(\"Test set accuracy: {:.2f}\".format(svm.score(X_test, y_test)))\n\n#MinMaxScaler를 이용해 가공해보자.\nscaler = MinMaxScaler()\nscaler.fit(X_train)\nX_train_scaled = scaler.transform(X_train)\nX_test_scaled = scaler.transform(X_test)\n\nsvm.fit(X_train_scaled, y_train)\nprint(\"Scaled test set accuracy: {:.2f}\".format(svm.score(X_test_scaled, y_test)))\n #데이터 가공이 정말 중요하다�� 것을 알 수 있다!\n #모든 스케일러는 같은 인터페이스를 갖고 있기 때문에,\n #그저 scaler = StandardScaler()만 변경해줘도 모두 변경할 수 있다.\n\n\n\n'''\n여기서 우리는 전처리 작업을 위한 간단한 데이터 변형이 어떻게 이루어지는지 보았다.\n다음에는 Transformation의 흥미로운 예제들을 더 살펴보자.\n'''\n","sub_path":"Data_Science/16.Aug-Intro to ML Oreilly/3. Unsupervised Learning and Preprocessing/1.Preprocessing and Transformation.py","file_name":"1.Preprocessing and Transformation.py","file_ext":"py","file_size_in_byte":7116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"324338426","text":"\"\"\"\nConstants used in repository\n\"\"\"\n\nALGO_VERSION_NAMES = (\"scan\", \"tree\", \"hash\")\n\nALLOWED_DELIMITERS = (\"\\t\", \",\")\n\n# SAM Format Cigarstring Operators See table in SAMv1 reference documents page 7\nCIGAR_STRING_OPERATIONS = (\"M\", \"I\", \"D\", \"N\", \"S\", \"H\", \"P\", \"=\", \"X\")\nMATCH_TYPES = (\"M\", \"N\", \"=\", \"X\")\nDEL_TYPES = \"D\"\nINS_TYPES = \"I\"\n\n\nTRANSCRIPT_KEYS = [\"transcript\", \"chr\", \"start\", \"cigar\"]\n\nTRANSCRIPT_FILE_COLUMNS = 4\nQUERY_FILE_COLUMNS = 2\n","sub_path":"transcript_converter/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"520789363","text":"import matplotlib.pyplot as plt\r\nimport jieba\r\nfrom wordcloud import WordCloud\r\ntext=open(r'.\\jay.txt','r').read()\r\n#print(text)\r\ncut_text=jieba.cut(text)\r\n# print(type(cut_text))\r\n# print(next(cut_text))\r\n# print(next(cut_text))\r\nresult='/'.join(cut_text)\r\n# print(result)\r\nwc=WordCloud(\r\n font_path=r'.\\simhei.ttf',\r\n background_color='white',\r\n width=1000,\r\n height=500,\r\n max_font_size=300,\r\n min_font_size=10,\r\n)\r\n#插卡\r\nwc.generate(cut_text)\r\nwc.to_file(r'.\\wordcloud.png')","sub_path":"豆瓣爬虫/film_plot.py","file_name":"film_plot.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"284892321","text":"from datetime import datetime\nfrom dateutil import tz\nimport glob\nimport os\nimport pandas as pd\nimport sys\nimport time\n\n\n\ndef coregister_records():\n \"\"\"\n\n \"\"\"\n\n print(\"running coregister records\")\n\n sensors = ['EDA', 'HR', 'TEMP']\n\n path_folders = os.path.join('..', '..', 'source_measurements', 'PMR', 'ref' )\n save_file = os.path.join(path_folders, 'source_list_02' + '.csv' )\n df = pd.read_csv(save_file)\n del df['Unnamed: 0']\n print(df)\n\n timestamped_path = []\n\n for sensor in sensors:\n for record in df['record']:\n\n\n path_folders = os.path.join('..', '..', 'source_measurements', 'PMR', 'timestamped', sensor )\n save_file = os.path.join(path_folders, str(str(wearable) + ' ' + str(record).zfill(2) + '.csv'))\n\n\n\n df_record = df[(df['record']==record)]\n print('df_record =')\n print(df_record)\n shared_start = max(list(df_record['starts']))\n shared_end = min(list(df_record['ends']))\n\n for path in df_record['path_long']:\n\n wearable = list(df[(df['path_long']==path)]['wearable'])[0]\n\n source_path = os.path.join(path, sensor + '.csv')\n\n df_source = pd.read_csv(source_path)\n print('df_source = ')\n print(df_source)\n header = list(df_source.columns.values)[0]\n print('header = ')\n print(header)\n information = list(df_source[header])\n freq = information[0]\n print('frequency = ' + str(freq))\n information = information[1:]\n\n record_start = float(header)\n record_length = len(information)/freq\n record_end = record_start + record_length\n print('record start = ' + str(record_start) + ' record end = ' + str(record_end) + ' length = ' + str(record_length/60) )\n\n time_unix = []\n for info in information:\n time_unix.append(record_start + len(time_unix)/freq)\n\n df_timestamped = pd.DataFrame()\n df_timestamped[str(str(wearable) + '_time_unix')] = time_unix\n df_timestamped[str(str(wearable) + '_measurements')] = information\n\n\n path_folders = os.path.join('..', '..', 'source_measurements', 'PMR', 'timestamped', sensor )\n print('path folders = ' + str(path_folders))\n if not os.path.exists(path_folders):\n os.mkdir(path_folders)\n\n save_file = os.path.join(path_folders, str(str(wearable) + ' ' + str(record).zfill(2) + '.csv'))\n # os.mkdir(save_file)\n df_timestamped.to_csv(save_file)\n\n timestamped_path.append(save_file)\n\n\n\n\nif __name__ == \"__main__\":\n coregister_records()\n","sub_path":"code/python/archive/c0104_coregister_records.py","file_name":"c0104_coregister_records.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"456811882","text":"# Exercise 12-6.\n# Here’s another Car Talk Puzzler (http://www.cartalk.com/content/puzzler/transcripts/\n# 200651):\n# What is the longest English word, that remains a valid English word, as you remove its\n# letters one at a time?\n# Now, letters can be removed from either end, or the middle, but you can’t rearrange any\n# of the letters. Every time you drop a letter, you wind up with another English word. If you\n# do that, you’re eventually going to wind up with one letter and that too is going to be an\n# English word—one that’s found in the dictionary. I want to know what’s the longest word\n# and how many letters does it have?\n# I’m going to give you a little modest example: Sprite. Ok? You start off with sprite, you\n# take a letter off, one from the interior of the word, take the r away, and we’re left with the\n# word spite, then we take the e off the end, we’re left with spit, we take the s off, we’re left\n# with pit, it, and I.\n# Write a program to find all words that can be reduced in this way, and then find the\n# longest one.\n# This exercise is a little more challenging than most, so here are some suggestions:\n# 1. You might want to write a function that takes a word and computes a list of all the\n# words that can be formed by removing one letter. These are the “children” of the\n# word.\n# 2. Recursively, a word is reducible if any of its children are reducible. As a base case,\n# you can consider the empty string reducible.\n# 3. The wordlist I provided, words.txt , doesn’t contain single letter words. So you\n# might want to add “I”, “a”, and the empty string.\n# 4. To improve the performance of your program, you might want to memoize the\n# words that are known to be reducible.\n\n\ndef is_reducible(word):\n if len(word) == 0: # base case '' is reducible\n return True\n if len(words[word]) == 0: # words without children are not reducible\n return False\n for child in words[word]:\n if is_reducible(child): # reducible if any children are\n return True\n else:\n return False # not reducible if no children are\n\n\nwords = {}\nreducible_words = []\nfin = open('words.txt')\nfor line in fin:\n word = line.strip()\n words[word] = []\nwords['a'] = []\nwords['i'] = []\nwords[''] = []\n\n# Compute all the children for each word,\n# so that words[word] is a list of all the children of word.\nfor word in words:\n for i in range(0, len(word)):\n ith_removed = word[0:i] + word[i + 1:]\n if ith_removed in words:\n words[word].append(ith_removed)\n\nn = len(words)\ni = 0\n\nfor word in words:\n if is_reducible(word):\n reducible_words.append(word)\n i += 1\n if i % 10000 == 0:\n print(i, 'words processed of', n)\n\nreducible_words.sort(key=len,reverse=True)\nprint(\"The longest reducible word is '\",reducible_words[0],\"'\",sep='')\n\n# => The longest reducible word is \"complecting\", with 11 letters.\n","sub_path":"chapter_12_tuples/12-6_reducible_words.py","file_name":"12-6_reducible_words.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"605519947","text":"import string\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom .GraphManager import GraphManager\n\n\nclass GraphManager3d(GraphManager):\n words_available_sizes = 3\n\n def draw_graph(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n words = super().get_words(word_length=self.words_available_sizes)\n\n nodes = {\n (\n string.ascii_lowercase.index(j[0]),\n string.ascii_lowercase.index(j[1]),\n string.ascii_lowercase.index(j[2])\n ): j for j in words\n }\n X, Y, Z = zip(*[(i[0], i[1], i[2]) for i in nodes.keys()])\n\n ax.scatter(X, Y, Z, c='r', marker='o')\n\n ages = []\n for n1 in nodes:\n for n2 in nodes:\n if (\n (nodes[n1][0] == nodes[n2][0] or\n nodes[n1][1] == nodes[n2][1] or\n nodes[n1][2] == nodes[n2][2]) and\n (n1, n2) not in ages and n1 != n2\n ):\n ages.append((n1, n2))\n\n for line in ages:\n x, y, z = zip(*((dot[0], dot[1], dot[2]) for dot in line))\n ax.plot(x, y, z, color='r')\n\n for i, txt in enumerate(words):\n ax.text(X[i], Y[i], Z[i], txt, size=10)\n\n plt.show()\n","sub_path":"src/graphs/GraphManager3d.py","file_name":"GraphManager3d.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"291953947","text":"\"\"\"Home page shown when the user enters the application\"\"\"\nimport streamlit as st\n\ndef write(sesh=None):\n \"\"\"Used to write the page in the app.py file\"\"\"\n with st.spinner(\"Loading Home ...\"):\n st.write(\n \"\"\"\n# Home page\n\nThis app helps you cluster and filter a list of ligands from virtual screening.\nFollow the buttons in the panel on the left hand side:\n- First you will need to upload a CSV with, at minimum, a column with the \nSMILES codes of the molecules. \n- Second step is to featurize with some fingerprint, i.e. Morgan or MACCS keys\n- Third step is to choose a clustering algorithm \n- Fourth set sliders\n\n\n\n \"\"\"\n )\n\n# if st.button('move on:'):\n# sesh.curr_page+=1\n# fun(fun)\n","sub_path":"src/steps/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"178788389","text":"\r\nclass complex_number:\r\n\r\n\r\n\r\n def __init__(self,i=0,r=0):\r\n self.Im=i\r\n self.Re=r\r\n \r\n\r\n\r\n\r\n def show(self):\r\n if self.Re<0:\r\n print(self.Im,self.Re,\"i\")\r\n else:\r\n print(self.Im,\"+\",self.Re,\"i\")\r\n \r\n\r\n\r\n\r\n def __add__(self,other):\r\n result=complex_number()\r\n result.Im=self.Im+other.Im\r\n result.Re=self.Re+other.Re\r\n return(result)\r\n\r\n\r\n\r\n\r\n def __sub__(self,other):\r\n result=complex_number()\r\n result.Im=self.Im-other.Im\r\n result.Re=self.Re-other.Re\r\n return(result)\r\n \r\n\r\n\r\n\r\n def __mul__(self,other):\r\n result=complex_number()\r\n result.Im=(self.Im*other.Im)-(self.Re*other.Re)\r\n result.Re=(self.Re*other.Im)+(self.Im*other.Re)\r\n return(result)\r\n \r\n\r\n\r\n\r\na=complex_number(9,6)\r\n\r\nb=complex_number(5,8)\r\n\r\nc=a+b\r\nc.show()\r\n\r\nc=a-b\r\nc.show()\r\n\r\nc=a*b\r\nc.show()","sub_path":"copmlex.py","file_name":"copmlex.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"560519967","text":"from djsocial_core.tests import common\nfrom djsocial_core.models import User, Favorite\nfrom djsocial_tools.models import Location, LocationType\nfrom djsocial_notifications.models import create_notification_types, ActionNotificationType\nfrom django.contrib.contenttypes.models import ContentType\n\n\nclass LocationTests(common.BaseTests):\n\n def setUp(self):\n super(LocationTests, self).setUp()\n self.loc_type = LocationType.objects.create(name='Test location type')\n create_notification_types()\n\n def test_add_to_favorites(self):\n user1 = User.objects.get(username='user1')\n location = Location.objects.create(\tuser=user1,\n location='Test Location',\n address='Test address',\n location_type=self.loc_type)\n\n user2 = User.objects.get(username='user2')\n content_type = ContentType.objects.get_for_model(Location)\n Favorite.objects.create(user=user2, object_id=location.id, content_type=content_type)\n self.assertTrue(Favorite.objects.is_favorite(user2, location))\n location.delete()\n\n def tearDown(self):\n ActionNotificationType.objects.all().delete()\n self.loc_type.delete()\n super(LocationTests, self).tearDown()","sub_path":"djsocial_tools/tests/location_tests.py","file_name":"location_tests.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"109611751","text":"names = ['james', 'john', 'jack']\r\nemail_domains = ['gamil', 'hotmail', 'yahoo']\r\n\r\n# use zip to iterate more than one list at a time\r\nfor i, j in zip(names, email_domains):\r\n\tprint(i, j)\r\n\r\n\r\n'''\r\nzip(...)\r\n zip(seq1 [, seq2 [...]]) -> [(seq1[0], seq2[0] ...), (...)]\r\n\r\n Return a list of tuples, where each tuple contains the i-th element\r\n from each of the argument sequences. The returned list is truncated\r\n in length to the length of the shortest argument sequence.\r\n'''\r\n","sub_path":"PythonBasics/for_loops.py","file_name":"for_loops.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"173247876","text":"\"\"\"\n\n.. module:: site\n :synopsis: Restful User-Service outgoing point\n.. moduleauthor:: Ecevit Emre Okan \n\n\n\"\"\"\nfrom flask import Flask,jsonify\nfrom flask_cors import CORS\nfrom flask_restful import reqparse, abort, Api, Resource\n\n# import jreader\n\napp = Flask(__name__)\napi = Api(app)\n# data = jreader\n\nCORS(app)\n\n\"\"\"\n\n.. module:: CRUD\n :synopsis: Restful User-Service CRUD for the JSON-File\n.. moduleauthor:: Ecevit Emre Okan \n\n\"\"\"\nimport json\n\nusers = ''\n\n\"\"\"\nLieset die Daten aus dem JSON File heraus\n\"\"\"\nwith open('user.json', 'r') as f:\n users = json.load(f)\n\n\ndef reader():\n \"\"\"\n Returns the User in the file\n :return: the list of the user in the user.json file\n \"\"\"\n return users\n\n\ndef delete(name):\n \"\"\"\n Deletes a specific user, first check if exists, then pop it out, then execute writer()\n :param name: the username\n \"\"\"\n if name in users:\n users.pop(name)\n writer()\n\n\ndef create(name, id, email):\n \"\"\"\n Adds the user in the users dict and executes writer()\n :param name: username\n :param id: autogenerated id\n :param email: emailaddress\n \"\"\"\n users.update({name: {'id': id, 'username': name, 'email': email}})\n writer()\n\n\ndef writer(user):\n \"\"\"\n Writes the users into the JSON File\n :param user: the users dict\n \"\"\"\n users = user\n with open('user.json', 'w') as f:\n f.write(json.dumps(users))\n\n\nUSERS = reader()\n\n\ndef abort_if_user_doesnt_exist(username):\n \"\"\"\n Check if user exists\n :param usernmane: username\n :return: User doesn't exist error in JSONs\n \"\"\"\n position = 0\n for user in USERS:\n if username in user['username']:\n return position\n else:\n abort(404, message=\"User {} doesn't exist\".format(username))\n position += 1\n\nparser = reqparse.RequestParser()\nparser.add_argument('user')\n\n\nclass User(Resource):\n\n def get(self, username):\n \"\"\"\n **Get information of a specific user**\n\n This function allows user to get a specific user's information through their username.\n :param username: id of the teacher\n :return: user's information accessed by user in json and http status code\n - Example::\n curl http://127.0.0.1:5000/user/eecevit\n - Expected Success Response::\n {\n \"id\": \"1\",\n \"username\": \"eecevit\",\n \"email\": \"eecevit@student.tgm.ac.at\"\n \"picture\":\"...\"\n }\n - Expected Fail Response::\n HTTP Status Code: 404\n\n \"\"\"\n pos = abort_if_user_doesnt_exist(username)\n\n return [USERS[pos]]\n\n def delete(self, username):\n \"\"\"\n **Delete User Record**\n\n This function allows user to delete a user record.\n\n :param username: name of the user\n :return: delete status in json and http status code\n\n - Example::\n curl http://127.0.0.1:5000/user/eecevit -X DELETE -v\n\n - Expected Success Response::\n HTTP Status Code: 204\n\n - Expected Fail Response::\n HTTP Status Code: 404\n\n \"\"\"\n abort_if_user_doesnt_exist(username)\n del USERS[username]\n delete(username)\n return '', 204\n\n def put(self, username):\n \"\"\"\n **Update Information of a Specific User Record**\n This function allows user to update a specific users's information through their username.\n :param username: name of the user\n :return: users's information updated\n - Example::\n curl http://127.0.0.1:5000/user/eecevit -d \"name=newName,mail@mail.com, eecevit.jpg\" -X PUT -v\n - Expected Success Response::\n HTTP Status Code: 201\n {\n \"username\": \"newName\",\n \"email\": \"mail@mail.com\",\n \"picture\": \".....\"\n }\n - Expected Fail Response::\n HTTP Status Code: 404\n \"\"\"\n args = parser.parse_args()\n name = args['user'].split(\",\")\n user = {'username': name[0], 'email': name[1]}\n USERS[username] = user\n return user, 201\n\n\n# Userlist\n# shows a list of all users, and lets you POST to add new user\nclass UserList(Resource):\n def get(self):\n \"\"\"\n **Get List of Users**\n This function allows users to get a list of users and their id, username, email and image.\n :return: user's information in json and http status code\n - Example::\n curl http://localhost:5000/users -X GET -v\n - Expected Success Response::\n HTTP Status Code: 200\n {\n \"eecevit\": {\"id\": \"1\", \"username\": \"eecevit\", \"email\": \"eecevit@student.tgm.ac.at\", \"picture\": \"....\"},\n \"danho\": {\"id\": \"2\", \"username\": \"eecevit\", \"email\": \"eecevit@student.tgm.ac.at\", \"picture\": \"....\"},\n \"dsunaric\": {\"id\": \"3\", \"username\": \"eecevit\", \"email\": \"eecevit@student.tgm.ac.at\", \"picture\": \"....\"},\n \"elshal\": {\"id\": \"4\", \"username\": \"eecevit\", \"email\": \"eecevit@student.tgm.ac.at\", \"picture\": \"....\"}\n }\n \"\"\"\n return USERS\n\n def post(self):\n \"\"\"\n **Create User Record**\n\n This function allows user to create(post) a user record.\n\n :return: user's information added by the user in json\n - Example::\n curl http://localhost:5000/user -d \"name=newUser,newUser@mail.at,eecevit.jpg\" -X POST -v\n - Expected Success Response::\n HTTP Status Code: 201\n {\n \"username\": \"newUser\",\n \"email\": \"newUser@mail.at\",\n \"picture\": \".....\"\n }\n - Expected Fail Response::\n HTTP Status Code: 400\n \"\"\"\n args = parser.parse_args()\n id = len(USERS) + 1\n name = args['user'].split(\",\")\n # image = str(encoder.encode(name[2]))\n image = name[2]\n USERS[name[0]] = {'id': id, 'username': name[0], 'email': name[1], 'picture': image}\n writer(USERS)\n return USERS[name[0]], 201\n\n\n##\n## Actually setup the Api resource routing here\n##\napi.add_resource(UserList, '/user')\n# api.add_resource(UserList, '/')\napi.add_resource(User, '/user/')\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"src/main/python/server/site.py","file_name":"site.py","file_ext":"py","file_size_in_byte":6404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"63888741","text":"# -*- coding: utf-8 -*-\nimport os, sys\nfrom bson.objectid import ObjectId\n\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\nsys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../util'))\nimport loghelper, db, util\n\n#logger\nloghelper.init_logger(\"migrate_news_categories\", stream=True)\nlogger = loghelper.get_logger(\"migrate_news_categories\")\n\n# task.news categories 两个删除的标签迁移\n# article.news.categories还原\nmongo = db.connect_mongo()\n\ndef main():\n # step1()\n # step2()\n step3()\n\ndef step1():\n REPLACEMENT = [\n # 汇总性新闻->其他新闻类型\n (578358, 578359),\n # 竞争格局->行业研究\n (578355, 578356)\n ]\n for old_tag_id, new_tag_id in REPLACEMENT:\n items = list(mongo.task.news.find({\"categories\": old_tag_id}))\n for item in items:\n logger.info(\"%s, %s\", item[\"_id\"], item[\"categories\"])\n if new_tag_id not in item[\"categories\"]:\n mongo.task.news.update({\"_id\":item[\"_id\"]},{\"$addToSet\":{\"categories\": new_tag_id}})\n #exit()\n mongo.task.news.update({\"_id\":item[\"_id\"]},{\"$pull\":{\"categories\": old_tag_id}})\n # exit()\n\n\ndef step3():\n REPLACEMENT = [\n # 汇总性新闻->其他新闻类型\n (578358, 578359),\n # 竞争格局->行业研究\n (578355, 578356),\n # 电子商务->消费\n (8,758),\n # 游戏->文化娱乐\n (40,1713),\n # 广告营销->企业服务\n (133,81),\n # 招聘->企业服务\n (578,81)\n ]\n for old_tag_id, new_tag_id in REPLACEMENT:\n items = list(mongo.article.news.find({\"features\": old_tag_id}))\n for item in items:\n logger.info(\"%s, %s\", item[\"_id\"], item[\"features\"])\n if new_tag_id not in item[\"features\"]:\n mongo.article.news.update({\"_id\": item[\"_id\"]}, {\"$addToSet\": {\"features\": new_tag_id}})\n mongo.article.news.update({\"_id\": item[\"_id\"]}, {\"$pull\": {\"features\": old_tag_id}})\n # exit()\n\n\ndef step2():\n item = mongo.task.news.find_one({},limit=1,sort=[(\"_id\", 1)])\n _id = item[\"_id\"]\n dup(item)\n #logger.info(\"_id: %s\", _id)\n\n cnt = 0\n while True:\n items = list(mongo.task.news.find({\"_id\":{\"$gt\": _id}},limit=100,sort=[(\"_id\", 1)]))\n if len(items) == 0:\n break\n for item in items:\n _id = item[\"_id\"]\n # logger.info(\"%s\", _id)\n dup(item)\n cnt += 1\n logger.info(\"total: %s\", cnt)\n\n\ndef dup(item):\n if item.get(\"news_id\") is None:\n return\n\n news_id = item[\"news_id\"]\n logger.info(\"newsId: %s\", news_id)\n # news = mongo.article.news.find_one({\"_id\": ObjectId(news_id)})\n # if news is not None:\n # logger.info(news[\"title\"])\n if item.get(\"section\") is None:\n mongo.task.news.update({\"_id\": item[\"_id\"]}, {\"$set\":{\"section\": \"step1\"}})\n\n if item.get(\"categories\") is None:\n mongo.article.news.update({\"_id\": ObjectId(news_id)}, {\"$set\": {\"categories\": []}})\n else:\n mongo.article.news.update({\"_id\": ObjectId(news_id)}, {\"$set\":{\"categories\":item[\"categories\"]}})\n # exit()\n\n\nif __name__ == '__main__':\n main()","sub_path":"data/migrate/migrate_news_categories.py","file_name":"migrate_news_categories.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"93806063","text":"# -*-coding:utf-8 -*-\nimport sys\n#print(sys.executable)\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nimport stock_sql\nfrom abc import ABCMeta, abstractmethod\nfrom multiprocessing import cpu_count\nfrom concurrent.futures import ProcessPoolExecutor,as_completed\nimport time\nimport get_info\nimport datetime\n\n\n#pd.show_versions()\ndaily = {\n \"ts_code\": \"股票代码\",\n \"trade_date\": \"交易日期\",\n \"open\": \"开盘价\",\n \"high\": \"最高价\",\n \"low\": \"最低价\",\n \"close\": \"收盘价\",\n \"pre_close\": \"昨收价\",\n \"change\": \"涨跌额\",\n \"pct_chg\": \"涨跌幅(未复权)\",\n \"vol\": \"成交量(手)\",\n \"amount\": \"成交额(千元)\",\n}\n\n\nclass stockInfo:\n def __init__(self,symbol:str =\"000001\",name:str=\"平安银行\"):\n self.symbol=symbol\n self.name=name\n\n\nSEQUENCE_LEN=20\nRREDICT_LEN=30\n\ndef parseUntrainedData(symbol,df):\n \"\"\"\n 解析未训练用的数据\n :return:\n \"\"\"\n index =0\n dateInfo =stock_sql.getLastDate(symbol)\n lastDate = int(dateInfo[1])\n for i,row in df.iterrows():\n if row[\"trade_date\"] >= lastDate:\n index = i\n #df= df.iloc[:index+ SEQUENCE_LEN]\n break\n if index < SEQUENCE_LEN -1 : #数据从未训练过 应该从0开始训练\n index =0\n elif SEQUENCE_LEN -1 <= index: # 数据训练过:\n index = index+1 -(SEQUENCE_LEN -1)\n return index\n\ndef getTrianColnum(stockinfo:stockInfo,length=RREDICT_LEN):\n symbol = stockinfo.symbol\n df = stock_sql.getDailyFrame(symbol)\n # 对数据进行训了测试的分离处理\n x, y = prepareLogistics().dataProcess(df)\n col =[\"close\"]\n close = np.array(x[col])[-length:]\n return np.array( close)\n\ndef columnSplit(verify=False):\n \"\"\"\n 将数据按照列的方式拆分成 x和y\n verify :验证y 的日期是否正确 开启时 将带上y对应的日期\n :return:\n \"\"\"\n all = list(daily.keys());\n column_y=['ts_code']\n column_x = [x for x in all if x not in column_y]\n if verify:# 带上日期 验证 日期是否对应\n column_y = [\"close\",'trade_date']\n else:\n column_y = [\"close\"]\n return column_x ,column_y\n\n\ndef getPredictTrainnData( stockInfo,length =RREDICT_LEN):\n #获取stock 原始信息 ,去重 、预测数据对其\n symbol = stockInfo[\"symbol\"]\n df = stock_sql.getDailyFrame(symbol)\n column_x, column_y = columnSplit()\n x = np.array(df[column_x])\n x= np.around(x,decimals=2)\n return x[-length:]\n\n\ndef dataSplit(npArr, split=0.9):\n \"\"\"\n :param npArr:\n :param split:\n :return:\n \"\"\"\n #对数据进行拆分 获得验证数据集\n split_boundary = int(npArr.shape[0] * split)\n train_x = npArr[: split_boundary]\n test_x = npArr[split_boundary:]\n return train_x, test_x\n\n\nclass prepareBase:\n def __init__(self):\n print(str( type(self) )+\" __build __\")\n self.verify = False\n self.only_untrain = False\n self.batch_size = 200\n self.SEQUENCE_LEN = 50\n self.RREDICT_LEN=30\n self.tuple_x=1\n pass\n\n def getInputShape(self):\n \"\"\"\n 获取训练数据的shape\n :return: shape\n \"\"\"\n column_x, _ = columnSplit()\n return (self.SEQUENCE_LEN, len(column_x))\n\n\n def dataProcess(self,df:pd.DataFrame)->(np.array,np.array):\n \"\"\"\n :param df:\n :return:\n \"\"\"\n # 4.对数据按照表格拆分\n column_x, column_y = columnSplit(self.verify)\n x, y = df[column_x], df[column_y]\n # 5拼凑数据 x的最后一行没有预测值 y的第一行没有 训练值\n x, y = x.drop(len(x) - 1, axis=0), y.drop(0, axis=0)\n y.reset_index(drop=True, inplace=True)\n return x, y\n\n def dataSequence(self,x:pd.DataFrame,nor:bool=True)->np.array:\n \"\"\"\n 将dataFrame数据 组成训练序列\n :param df: 原始dataFrame数据\n :param nor: 是否对数据标准化\n :param len: 序列长度\n :return: 序列化数据以及 标准化的Scaler\n \"\"\"\n scaler = MinMaxScaler()\n if nor :\n data_all = np.array(x).astype(\"float64\")\n data_all = scaler.fit_transform(data_all)\n else:\n data_all = np.array(x)\n data = []\n for i in range(len(data_all) - self.SEQUENCE_LEN + 1):\n data.append(data_all[i: i + self.SEQUENCE_LEN])\n x = np.array(data).astype('float64')\n\n return x, scaler\n\n def dataSequence_y(self,y:pd.DataFrame,nor:bool=True)->np.array:\n \"\"\"\n 将dataFrame数据 组成训练序列\n :param df: 原始dataFrame数据\n :param nor: 是否对数据标准化\n :param len: 序列长度\n :return: 序列化数据以及 标准化的Scaler\n \"\"\"\n scaler = MinMaxScaler()\n if nor :\n data_all = np.array(y).astype(\"float64\")\n data_all = scaler.fit_transform(data_all)\n else:\n data_all = np.array(y)\n data = []\n for i in range(len(data_all) - self.SEQUENCE_LEN + 1):\n data.append(data_all[i + self.SEQUENCE_LEN - 1])\n x = np.array(data)\n\n return x, scaler\n\n def test(self):\n print(\"+++++++++++++++++++++++\")\n self.verify = True\n self.only_untrain = True\n df = stock_sql.getStockFrame()\n df.sort_values(by=[\"symbol\"], inplace=True, ignore_index=True)\n stock_list = np.array(df)\n for info in stock_list:\n data = self.getTrainData(stockInfo(symbol=info[0]))\n data_x, data_y = data[0], data[1]\n for i in range(len(data_x)):\n print(data_x[i][self.SEQUENCE_LEN - 1])\n print(data_y[i])\n\n def getTrainData(self, stockinfo: stockInfo) -> (np.array, np.array):\n \"\"\"\n 获取单只股票的训练数据\n :param stockInfo: stock 信息\n :param only_untrain: 只使用未训练的数据\n :return: 如果数据不够返回None\n \"\"\"\n symbol = stockinfo.symbol\n df = stock_sql.getDailyFrame(symbol)\n if len(df) <= self.SEQUENCE_LEN:\n return None\n # 对数据进行训了测试的分离处理\n x, y = self.dataProcess(df)\n train_x, scaler_x = self.dataSequence(x, nor= not self.verify)\n train_y, scaler_y = self.dataSequence_y(y,nor =not self.verify)\n if self.only_untrain:\n index = parseUntrainedData(symbol, df)\n if index >= len(train_x):\n # 起始训练数据超出是长度,没有数据\n return None\n train_x, train_y = train_x[index:], train_y[index:]\n return train_x, train_y,scaler_x,scaler_y\n\n def getTestData(self, stockInfo:stockInfo):\n data =self.getTrainData(stockInfo)\n if data is not None and len(data[0])>=0:\n train_x, train_y =data[0],data[1]\n return train_x[-self.RREDICT_LEN:], train_y[-self.RREDICT_LEN:],data[2],data[3]\n else:\n return None\n\n def dataGenerator(self, index: int = 0):\n df = stock_sql.getStockFrame()\n df.sort_values(by=[\"symbol\"], inplace=True, ignore_index=True)\n # 截取index以后的数据\n stock_array = np.array(df)[index:]\n batch_array = stock_sql.arr_split(stock_array, self.batch_size)\n\n for s_array in batch_array:\n # 多进程\n list_x, list_y = self.dataConcat(s_array)\n # list_x,list_y =dataConcat(s_array,only_untrain)\n if len(list_x[0]) == 0 or len(list_y) == 0: continue\n data_x = []\n for i in range(self.tuple_x):\n data_x.append( np.concatenate(list_x[i], axis=0))\n data_y = np.concatenate(list_y, axis=0)\n yield data_x, data_y, s_array\n # yield (getTrainData({\"symbol\":info[0]},only_untrain),info)\n\n def dataConcat(self,stock_list):\n \"\"\"\n 批量处理stock list 数据 将每只处理的stock数据放到 listx 和 listy中\n :param stock_list: 待处理的stock list 包含symbol name 信息\n :param only_untrain: 只处理未使用的数据\n :return: list_x,list_x\n \"\"\"\n list_x, list_y = [], []\n for i in range(self.tuple_x):\n list_x.append([])\n\n for info in stock_list:\n data =self.getTrainData(stockInfo(symbol=info[0]))\n print(\"stock info :\" + info + \"\\r\\n\")\n if data == None: continue\n for i in range(self.tuple_x):\n list_x[i].append(data[0][i])\n list_y.append(data[1])\n return list_x, list_y\n\n def dataConcatMultiple(self, stock_array, only_untrain=False):\n list_x, list_y = [], []\n for i in range(self.tuple_x):\n list_x.append([])\n split_array = np.array_split(stock_array, 4, axis=0)\n executor = ProcessPoolExecutor(max_workers=4)\n\n all_task = [executor.submit(self.dataConcat, list_a) for list_a in split_array]\n for future in as_completed(all_task):\n res = future.result()\n for i in range(self.tuple_x):\n list_x[i].extend(res[i][0])\n list_y.extend(res[1])\n return list_x, list_y\n\nclass logisticsAllScaler(prepareBase):\n def __init__(self):\n super(logisticsAllScaler,self).__init__()\n self.SEQUENCE_LEN=50\n self.list_x= None\n time.sleep(10)\n pass\n\n def getDataWithIndex(self, stock_arr, only_untrain=False):\n list_x, list_y, index_list = [], [], []\n trian_map = {}\n index = 0\n for item in stock_arr:\n print(item[1])\n df = stock_sql.getDailyFrame(item[0])\n if only_untrain:\n index = parseUntrainedData(item[0], df)\n # df.drop(columns=[\"ts_code\"],inplace=True)\n x, y = self.dataProcess(df)\n trian_map[item[0]] = index\n list_x.append(x)\n list_y.append(y)\n index_list.append(len(x))\n return list_x, list_y, index_list, trian_map\n\n def getDataWithIndexMultiple(self,stock_arr, only_untrain=False):\n list_x, list_y, index_list = [], [], []\n trian_map = {}\n a_split = np.array_split(stock_arr, 4, axis=0)\n executor = ProcessPoolExecutor(max_workers=4)\n all_tasks = [executor.submit(self.getDataWithIndex, stockS_arr, only_untrain) for stockS_arr in a_split]\n for furture in as_completed(all_tasks):\n res = furture.result();\n list_xt, list_yt, index_listt, trian_mapt = res[0], res[1], res[2], res[3]\n list_x.extend(list_xt)\n list_y.extend(list_yt)\n index_list.extend(index_listt)\n trian_map.update(trian_mapt)\n return list_x, list_y, index_list, trian_map\n\n def allScaler(self):\n if self.list_x is not None:\n return self.list_x, self.list_y, self.scaler_x, self.scaler_y, self.train_map\n\n df = stock_sql.getStockFrame()\n df.sort_values(by=[\"symbol\"], inplace=True, ignore_index=True)\n arr = np.array(df)\n data_new_x, data_new_y, list_index, self.train_map = self.getDataWithIndexMultiple(arr, self.only_untrain)\n data_new_x = np.concatenate(data_new_x, axis=0)\n data_new_y = np.concatenate(data_new_y, axis=0)\n self.scaler_x, self.scaler_y = MinMaxScaler(), MinMaxScaler()\n data_new_x, data_new_y = self.scaler_x.fit_transform(data_new_x), self.scaler_y.fit_transform(data_new_y)\n self.list_x, self.list_y = {}, {}\n list_symbols = list(self.train_map.keys())\n for i in range(len(list_index)):\n data_a, data_b = data_new_x[:list_index[i]], data_new_y[:list_index[i]]\n data_new_x, data_new_y = data_new_x[list_index[i]:], data_new_y[list_index[i]:]\n self.list_x[list_symbols[i]] = data_a\n self.list_y[list_symbols[i]] = data_b\n\n return self.list_x, self.list_y, self.scaler_x, self.scaler_y, self.train_map\n\n def getTrainData(self, stockinfo: stockInfo) -> (np.array, np.array):\n self.allScaler()\n symbol= stockinfo.symbol\n x, y = self.list_x[symbol], self.list_y[symbol]\n train_x, _ = self.dataSequence(x, nor=False)\n train_y, _ = self.dataSequence_y(y, nor=False)\n if self.only_untrain:\n index = self. trian_map[symbol]\n if index >= len(train_x): # 起始训练数据超出是长度,没有数据\n return None\n train_x, train_y = train_x[index:], train_y[index:]\n return train_x,train_y,self.scaler_x,self.scaler_y\n\n\nclass prepareLogistics(prepareBase):\n def __init__(self):\n super(prepareLogistics,self).__init__()\n self.SEQUENCE_LEN=50\n time.sleep(10)\n pass\n\n\nclass prepareClassify(prepareBase):\n\n def __init__(self):\n super(prepareClassify,self).__init__()\n time.sleep(10)\n pass\n\n def dataProcess(self,df:pd.DataFrame)-> (np.array,np.array):\n \"\"\"\n :param df: 包含stock信息的 dataframe\n :return:\n \"\"\"\n # 4.对数据按照表格拆分\n column_x, column_y = columnSplit(self.verify)\n x, y_t = np.array( df[column_x]),np.array( df[column_y])\n # 5拼凑数据 x的最后一行没有预测值 y的第一行没有 训练值\n y_value= [ y_t[i+1][0]-y_t[i][0] for i in range(len(y_t)-1)]\n y_value= np.int32( np.array( y_value)> 0).reshape(-1, 1)\n if self.verify:\n y_date= [str(y_t[i][1]) + \"->\" + str(y_t[i + 1][1]) + \" :\" + str(y_t[i + 1][0]) + \"-\" + str(y_t[i][0]) for i in range(len(y_t)-1)]\n y_date = np.array(y_date).reshape(-1, 1)\n y_value = np.concatenate(( y_date ,y_value),axis=1)\n return x[: len(x)-1], y_value\n\n def dataSequence_y(self,y:pd.DataFrame,nor :bool =False )-> np.array:\n data_all = np.array(y)\n data = []\n for i in range(len(data_all) -self.SEQUENCE_LEN + 1):\n data.append(data_all[i + self.SEQUENCE_LEN - 1])\n return np.array(data) ,None\n\n\nclass prepareLogistics_Ex(prepareBase):\n def __init__(self):\n super(prepareLogistics_Ex,self).__init__()\n self.SEQUENCE_LEN=50\n self.tuple_x=3\n stock_base = get_info.stock_basic()\n self.area_vec,self.indu_vec ,self.area_shape,self.indu_shape= stock_base.getExVec()\n time.sleep(10)\n pass\n\n def getInputShape(self):\n daily_shape = super().getInputShape()\n area_shape = (self.area_shape[1],)\n indu_shape = (self.indu_shape[1],)\n return daily_shape,area_shape,indu_shape\n\n def getTrainData(self, stockinfo: stockInfo) -> (np.array, np.array):\n data= super().getTrainData(stockinfo)\n if data is None:\n return None\n train_x, train_y, self.scaler_x, self.scaler_y = data\n sample_len = train_x.shape[0]\n ex_area ,ex_indu=self.area_vec[stockinfo.symbol],self.indu_vec[stockinfo.symbol]\n ex_area,ex_indu = np.tile(ex_area,(sample_len,1)), np.tile(ex_indu,(sample_len,1))\n\n train_x =train_x,ex_area,ex_indu\n\n return train_x,train_y,self.scaler_x,self.scaler_y\n\n\nif __name__ == \"__main__\":\n # getTrainData(1)\n # allScaler()\n # for data in dataGenerator(0,True):\n # print(\"data\")\n pre =prepareClassify()\n pre.test()\n\n","sub_path":"prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":15452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"103671873","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer as Vectorizer # changed from sub 2\nfrom nltk.stem import SnowballStemmer\nimport pickle\n\n# ******************* import data ***************************\n# 159571 data predictors from wikipedia comments\n# 15294 toxic\n# 1595 severe_toxic\n# 8449 obscene\n# 478 threat\n# 7877 insult\n# 1405 identity_hate # print(train_data.groupby('identity_hate').count())\n\ntotal_number_of_predictors = 1000 # total number of rows = 159571\ntotal_number_of_predictions = 1000 # total number of rows = 153164\nnumber_of_most_informative_words = 189460 # cant be larger than total number of words (189460 words in \"train\" predictors and 353652 words in \"test+train\" predictors)\npredictor_variables = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']\ntrain_data = pd.read_csv('train.csv', nrows=total_number_of_predictors, usecols=predictor_variables + ['comment_text'])\ntest_data = pd.read_csv('test.csv', nrows=total_number_of_predictions, usecols=['id', 'comment_text'])\n\n# ************************ Construct a bag of words matrix and write a csv file\n# This will lowercase everything, and ignore all punctuation by default.\n# It will also remove stop words.\nStemmer = SnowballStemmer('english')\nanalyzer = Vectorizer().build_analyzer()\n\n\n# create an stemmer function to be used in CountVectorizer + removing words with <= 2 characters\ndef stemmed_words(doc):\n return (Stemmer.stem(w) for w in analyzer(doc) if len(w) > 2)\n\nvectorizer = Vectorizer(lowercase=True, stop_words='english', analyzer=stemmed_words,\n max_features=number_of_most_informative_words, max_df=0.85, min_df=3)\ntraining_full_bow_matrix = vectorizer.fit_transform(train_data.loc[:, 'comment_text'].values) # for the test data set use only .transform() method to have the same order of words (same vocabulary with the same order)\nprint('full train BOW matrix size: {}'.format(training_full_bow_matrix.shape))\n\n# pickle the full bow matrix file for further use\npickle.dump(training_full_bow_matrix, open('pickled_training_bow.p', 'wb'))\nprint('training BOW is completed')\n\n# ********************** do the same for test data\ntesting_full_bow_matrix = vectorizer.transform(test_data.loc[:, 'comment_text'].values)\nprint('full test BOW matrix size: {}'.format(testing_full_bow_matrix.shape))\n\npickle.dump(testing_full_bow_matrix, open('pickled_test_bow.p', 'wb'))\nprint('test BOW is completed')\n\n\n\n\n\n\n\n\n# ADD META FEATURES (such as count of caps letters) TO INCREASE ACCURACY!!!\n# other predictive algorithms such as random forrest or NN might give a better result\n# I can also use n-grams\n# tf-idf transformation can also increase accuracy\n\n","sub_path":"submission_3_preprocessing.py","file_name":"submission_3_preprocessing.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"398721975","text":"from Manager import Manager\nfrom Dropout import Dropout\nfrom UnitLayer import UnitLayer\nfrom Channel import Channel\nfrom .common import unimplemented\n\nimport scipy as sp\nimport numpy as np\nimport inspect\nimport math\nimport scipy.special\nimport scipy.stats as st\n\nclass UnitDropout(UnitLayer):\n def __init__(self, *args, **kwargs):\n UnitLayer.__init__(self, *args, **kwargs)\n self.prop_dropout = 0.5\n self.shape = (100,100)\n self.num_inst = 1\n\n def setUp(self):\n super(UnitDropout, self).setUp()\n\n self.input_channel.shape = self.shape\n\n self.layer = Dropout(\n self.manager,\n input_channel = self.input_channel.name,\n prob_dropout = self.prob_dropout\n )\n self.layer.build()\n\n self.output_channel = self.layer.output_channel\n\n self.lower = self.rand_normal_like(self.input_channel.array, loc=10.0)\n self.upper = self.rand_normal_like(self.input_channel.array, loc=-10.0)\n\n def tearDown(self):\n self.layer.unregister()\n super(UnitDropout, self).tearDown()\n\n def test_forward(self):\n self.input_channel.phase = \"forward\"\n\n self.input_channel.array.set(self.lower)\n self.output_channel.array.set(self.upper)\n\n self.layer.forward()\n self.manager.compute_queue.finish()\n\n self.lower = self.input_channel.array.get()\n self.upper = self.output_channel.array.get()\n\n self.assertTrue(np.all(np.logical_or((self.lower - self.upper) == self.lower, self.lower == self.upper)))\n\n z_sum = float(np.sum((np.absolute(self.upper) < 1e-6).astype(int)))\n z_prob = float(z_sum) / float(self.input_channel.size * self.input_channel.num_inst)\n z_stddev = math.sqrt(self.prob_dropout * (1 - self.prob_dropout) / self.upper.size)\n z_score = np.absolute(z_prob - self.prob_dropout) / z_stddev\n self.assertTrue(sp.special.ndtr(z_score) < 0.999)\n\n def test_backward(self):\n pass\n\ndef main():\n UnitDropout.run_parameterized_tests(\n default_condition=\n {'prob_dropout':0.75, 'num_inst':10, 'shape':(100,100)},\n condition_list=[\n {'prob_dropout':0.2, 'num_inst':1},\n {'prob_dropout':0.5, 'num_inst':1},\n {'prob_dropout':0.9, 'num_inst':1},\n\n {'prob_dropout':0.2, 'num_inst':10},\n {'prob_dropout':0.5, 'num_inst':10},\n {'prob_dropout':0.9, 'num_inst':10},\n\n {'prob_dropout':0.2, 'num_inst':100},\n {'prob_dropout':0.5, 'num_inst':100},\n {'prob_dropout':0.9, 'num_inst':100},\n ])\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Axono/UnitDropout.py","file_name":"UnitDropout.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"638955149","text":"# 14.1 - Use SQLite\n# Solutions to review exercises\n\nimport sqlite3\n\n# Create a temporary database connection in RAM\nwith sqlite3.connect(\":memory:\") as connection:\n c = connection.cursor()\n\n # Exercise 1\n # Create a \"Roster\" table with Name, Species and IQ fields\n c.execute(\"CREATE TABLE Roster(Name TEXT, Species TEXT, IQ INT)\")\n\n # Exercise 2\n # Add some data into the database\n roster_data = (\n (\"Jean-Baptiste Zorg\", \"Human\", 122),\n (\"Korben Dallas\", \"Meat Popsicle\", 100),\n (\"Ak'not\", \"Mangalore\", -5),\n )\n c.executemany(\"INSERT INTO Roster VALUES(?, ?, ?)\", roster_data)\n\n # Exercise 3\n # Update the Species of Korben Dallas to \"Human\"\n c.execute(\n \"UPDATE Roster SET Species=? WHERE Name=?\", (\"Human\", \"Korben Dallas\")\n )\n\n # Exercise 4\n # Display the names and IQs of everyone classified as Human\n c.execute(\"SELECT Name, IQ FROM Roster WHERE Species = 'Human'\")\n for row in c.fetchall():\n print(row)\n","sub_path":"ch14-sql-database-connections/1-use-sqlite.py","file_name":"1-use-sqlite.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"297726855","text":"from array import array\nfrom random import randint\n\n\ndef line_search(array_a, array_b):\n key = 3409\n value_a = len(array_a)\n value_b = len(array_b)\n if value_a <= 1 or value_b <= 1:\n print(\"\\nError!\")\n print(\"\\n\\nSearching elements:\")\n for i in range(value_a - 1):\n for j in range(i + 1, value_a):\n if array_a[i] == array_a[j]:\n for k in range(value_b):\n if array_a[i] == array_b[k]: break\n if k == value_b - 1:\n if array_a[i] != key:\n print(f\"{array_a[i]} | \")\n key = array_a[i]\n\ndef binary(array, key, first, last):\n if first > last: return -78\n middle = (first + last) // 2\n middle_value = array[middle]\n if middle_value == key: return middle\n else:\n if middle_value > key:\n return binary(array, key, first, middle - 1)\n else:\n return binary(array, key, middle + 1, last)\n\ndef binary_search(array_a, array_b):\n key = 8898\n array_a.sort()\n array_b.sort()\n print(\"\\n\\nSearching elements:\")\n for i in range(len(array_a) - 1):\n if array_a[i] == key: continue\n for j in range(i + 1, len(array_a)):\n if array_a[i] == array_a[j]:\n if binary(array_b, array_a[i], 0, len(array_b) - 1) == -78:\n print(f\"{array_a[i]} | \")\n key = array_a[i]\n break\n\n# def long_com_substr(a, b):\n# x = ' '.join(str(e) for e in a)\n# y = ' '.join(str(e) for e in b)\n# m = len(x)\n# n = len(y)\n# counter = [[0]*(n+1) for x in range(m+1)]\n# longest = 0\n# lcs_set = set()\n# for i in range(m):\n# for j in range(n):\n# if x[i] == y[j]:\n# c = counter[i][j] + 1\n# counter[i+1][j+1] = c\n# if c > longest:\n# lcs_set = set()\n# longest = c\n# lcs_set.add(x[i-c+1:i+1])\n# elif c == longest:\n# lcs_set.add(x[i-c+1:i+1])\n\n# return lcs_set\ndef long_com_substr(a, b):\n x = ' '.join(str(e) for e in a)\n y = ' '.join(str(e) for e in b)\n m = len(x)\n n = len(y)\n counter = [[0]*(n+1) for x in range(m+1)]\n longest = 0\n lcs_set = []\n for i in range(m):\n for j in range(n):\n if x[i] == y[j]:\n c = counter[i][j] + 1\n counter[i+1][j+1] = c\n if c > longest:\n lcs_set = []\n longest = c\n lcs_set.append(x[i-c+1:i+1])\n elif c == longest:\n lcs_set.append(x[i-c+1:i+1])\n\n return lcs_set\n\n# def search_lcs(a, b):\n \n\n# x = ' '.join(str(e) for e in a)\n# y = ' '.join(str(e) for e in b)\n# flag = True\n# while flag:\n# if (long_com_substr == []): flag = False\n# else:\n# kk = long_com_substr(x, y)\n# k = ''.join(str(e) for e in kk)\n# print(k)\n# yy = y.replace(k, \"\")\n# print(yy)\n# print(yy)\n \n\n\nVALUE = 20\narray_a = [randint(0, 10) for item in range(VALUE)]\narray_b = [randint(0, 10) for item in range(VALUE)]\nprint(\"*** Array A ***\")\nprint(array_a)\nprint(\"\\n\\n*** Array B ***\")\nprint(array_b)\nprint(\"\\n_______________________________________________\\n*** Sorted array A ***\")\narray_a.sort()\nprint(array_a)\nprint(\"\\n_______________________________________________\\n*** Sorted array B ***\")\narray_b.sort()\nprint(array_b)\nline_search(array_a, array_b)\nbinary_search(array_a, array_b)\nprint(\"\\n\\nThe biggest coincidence:\\t\")\nprint(*long_com_substr(array_a, array_b))\n","sub_path":"python/Lab7/Lab7.py","file_name":"Lab7.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"389478470","text":"import json\nimport os\nimport sqlalchemy\nfrom database import Location\nfrom wunderground import weather_for_url, autocomplete_user_input\n\nAPI_KEY = os.environ.get('WUNDERGROUND_KEY', 'development')\nSQLALCHEMY_DATABASE_URI = 'sqlite:///db.db'\n\ndef get_db_session():\n engine = sqlalchemy.create_engine(SQLALCHEMY_DATABASE_URI, echo=True)\n return sqlalchemy.orm.sessionmaker(bind=engine)()\n\nif __name__ == '__main__':\n session = get_db_session()\n zipcodes = {'10003', '12180', '11105', '11215'}\n for z in zipcodes:\n url, name = autocomplete_user_input(z)\n cache = json.dumps(weather_for_url(url, API_KEY))\n session.merge(Location(url, name=name, cache=cache))\n session.commit()\n","sub_path":"auto_fetch_data.py","file_name":"auto_fetch_data.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"628618116","text":"# -*- coding: utf-8 -*-\n#################################################################################\n# Author : Acespritech Solutions Pvt. Ltd. ()\n# Copyright(c): 2012-Present Acespritech Solutions Pvt. Ltd.\n# All Rights Reserved..\n#\n# This program is copyright property of the author mentioned above.\n# You can`t redistribute it and/or modify it.\n#\n#################################################################################\n\nfrom odoo import fields,api,models,_\nfrom odoo.tools import float_is_zero, float_compare, pycompat\n\n\nclass InvoiceInfo(models.Model):\n _inherit = 'account.invoice'\n\n @api.model\n def get_outstanding_info(self,vals):\n if(vals):\n partner_id = self.env['res.partner'].browse(vals);\n account_id = partner_id.property_account_receivable_id\n comp_id = self.env['res.partner']._find_accounting_partner(partner_id).id;\n domain = [('account_id', '=', account_id.id),\n ('partner_id', '=', self.env['res.partner']._find_accounting_partner(partner_id).id),\n ('reconciled', '=', False), '|', ('amount_residual', '!=', 0.0),\n ('amount_residual_currency', '!=', 0.0)]\n domain.extend([('credit', '>', 0), ('debit', '=', 0)])\n type_payment = _('Outstanding credits')\n lines = self.env['account.move.line'].search(domain)\n info = {'title': '', 'outstanding': True, 'content': [], 'invoice_id': self.id}\n if len(lines) != 0:\n for line in lines:\n if line.currency_id and line.currency_id == self.currency_id:\n amount_to_show = abs(line.amount_residual_currency)\n else:\n amount_to_show = line.company_id.currency_id.with_context(date=line.date).compute(abs(line.amount_residual), self.currency_id)\n if float_is_zero(amount_to_show, precision_rounding=self.currency_id.rounding):\n continue\n info['content'].append({\n 'journal_name': line.ref or line.move_id.name,\n 'amount': amount_to_show,\n 'id': line.id,\n })\n info['title'] = type_payment\n return info\n\n @api.model\n def get_credit_info(self,vals):\n lines_info = []\n move_line_obj = self.env['account.move.line']\n if vals:\n for each in vals:\n if each['partner_id']:\n partner_id = self.env['res.partner'].browse(each['partner_id']);\n credit_aml = self.env['account.move.line'].browse(each['journal_id'])\n move_line_obj |= credit_aml\n credit_journal_id = credit_aml.journal_id.default_credit_account_id\n debit_account_id = credit_aml.journal_id.default_debit_account_id\n account_id = partner_id.property_account_receivable_id\n lines_info.append((0, 0, {'account_id': account_id.id,\n 'debit': each['amount'],\n 'partner_id': partner_id.id,\n }))\n lines_info.append((0, 0, {'account_id': credit_journal_id.id,\n 'credit': each['amount'],\n 'partner_id': partner_id.id,\n }))\n\n move = self.env['account.move'].create({'ref':'',\n 'journal_id':credit_aml.payment_id.journal_id.id,\n 'line_ids':lines_info,\n })\n lines_info = []\n line_id = move.line_ids.filtered(lambda l:l.account_id.id==account_id.id and l.partner_id.id == partner_id.id)\n self.env['account.partial.reconcile'].create(\n {'credit_move_id': credit_aml.id, 'debit_move_id': line_id.id,\n 'amount': line_id.debit,\n })\n move.post()\n return True\n\n#vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"flexiretail_ee_advance/models/account_invoice.py","file_name":"account_invoice.py","file_ext":"py","file_size_in_byte":4303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"652980275","text":"\"\"\"\nProblem: 763. Partition Labels\nUrl: https://leetcode.com/problems/partition-labels/ \nAuthor: David Wang\nDate: 11/15/2018\n\"\"\"\nclass Solution(object):\n def partitionLabels(self, S):\n \"\"\"\n :type S: str\n :rtype: List[int]\n \"\"\"\n # index of last occurence of a particular letter\n ll_map = {l: i for i, l in enumerate(S)}\n part = []\n last = 0\n length = 0\n for i, s in enumerate(S):\n last = max(last, ll_map[s])\n # only one occurence of the letter\n if i == last:\n part.append(i - length + 1)\n length = i + 1\n return part\n","sub_path":"763_Partition_Labels/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"99947007","text":"# coding: utf-8\n\nfrom sympy import Symbol\nfrom sympy.core.containers import Tuple\nfrom sympy import symbols\nfrom sympy import pi, cos, sin\nfrom sympy import srepr\nfrom sympy import I\n\nfrom sympde.core import Constant\nfrom sympde.calculus import grad, dot, inner, cross, rot, curl, div\nfrom sympde.calculus import laplace, hessian, bracket, convect\nfrom sympde.topology import (dx, dy, dz)\nfrom sympde.topology import ScalarFunctionSpace, VectorFunctionSpace\nfrom sympde.topology import Domain\nfrom sympde.topology import Mapping\nfrom sympde.topology import elements_of\nfrom sympde.expr import BilinearForm\nfrom sympde.expr import integral\n\nfrom gelato import gelatize\n\nDIM = 2\n\n#==============================================================================\ndef test_bilinear_2d_1():\n domain = Domain('Omega', dim=DIM)\n\n V = VectorFunctionSpace('V', domain)\n\n u,v = elements_of(V, names='u,v')\n\n c = Constant('c')\n\n expr = c * div(v) * div(u) + curl(v) * curl(u)\n expr = BilinearForm((u,v), integral(domain, expr))\n\n print('> input >>> {0}'.format(expr))\n print('> gelatized >>> {0}'.format(gelatize(expr)))\n\n#==============================================================================\ndef test_bilinear_2d_2():\n domain = Domain('Omega', dim=DIM)\n\n V = ScalarFunctionSpace('V', domain)\n\n u,v = elements_of(V, names='u,v')\n\n expr = dot(grad(v), grad(u))\n expr = BilinearForm((u,v), integral(domain, expr))\n\n print('> input >>> {0}'.format(expr))\n print('> gelatized >>> {0}'.format(gelatize(expr)))\n\n#==============================================================================\ndef test_bilinear_2d_3():\n domain = Domain('Omega', dim=DIM)\n\n V = ScalarFunctionSpace('V', domain)\n\n u,v = elements_of(V, names='u,v')\n\n c = Constant('c')\n\n expr = dot(grad(v), grad(u)) + c*v*u\n expr = BilinearForm((u,v), integral(domain, expr))\n\n print('> input >>> {0}'.format(expr))\n print('> gelatized >>> {0}'.format(gelatize(expr)))\n\n#==============================================================================\ndef test_bilinear_2d_mapping_1():\n domain = Domain('Omega', dim=DIM)\n\n M = Mapping('M', DIM)\n\n mapped_domain = M(domain)\n\n V = ScalarFunctionSpace('V', mapped_domain)\n\n u,v = elements_of(V, names='u,v')\n\n c = Constant('c')\n\n expr = dot(grad(v), grad(u)) + c*v*u\n expr = BilinearForm((u,v), integral(mapped_domain, expr))\n\n print('> input >>> {0}'.format(expr))\n print('> gelatized >>> {0}'.format(gelatize(expr, mapping=M, human=True)))\n\n#==============================================================================\ndef test_bilinear_2d_mapping_2():\n domain = Domain('Omega', dim=DIM)\n\n M = Mapping('M', DIM)\n\n mapped_domain = M(domain)\n\n V = VectorFunctionSpace('V', mapped_domain)\n\n u,v = elements_of(V, names='u,v')\n\n c = Constant('c')\n\n expr = c * div(v) * div(u) + curl(v) * curl(u)\n expr = BilinearForm((u,v), integral(mapped_domain, expr))\n\n print('> input >>> {0}'.format(expr))\n print('> gelatized >>> {0}'.format(gelatize(expr, mapping=M, human=True)))\n\n#==============================================================================\n# CLEAN UP SYMPY NAMESPACE\n#==============================================================================\n\ndef teardown_module():\n from sympy import cache\n cache.clear_cache()\n\ndef teardown_function():\n from sympy import cache\n cache.clear_cache()\n","sub_path":"gelato/tests/test_bilinear_2d.py","file_name":"test_bilinear_2d.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"17148564","text":"import os\nimport operator\nimport random\nimport time\nimport io\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom audio_reader import AudioReader\nfrom file_logger import FileLogger\nfrom utils import FIRST_INDEX, sparse_tuple_from\nfrom utils import convert_inputs_to_ctc_format\n\n\nsample_rate = 16000\n# Some configs\nnum_features = 26 # log filter bank or MFCC features\n# Accounting the 0th index + space + blank label = 28 characters\nnum_classes = ord('z') - ord('a') + 1 + 1 + 1\n\n# Hyper-parameters\nnum_epochs = 1\nnum_hidden = 256\nbatch_size = 346\n\nnum_examples = 1\nnum_batches_per_epoch = 10\n\naudio = AudioReader(audio_dir='test', cache_dir='test_cache', sample_rate=sample_rate)\n\nfile_logger = FileLogger('out_test.tsv', ['curr_epoch', 'train_cost', 'train_ler', 'val_cost', 'val_ler'])\n\ndef next_batch(bs=batch_size, train=True):\n x_batch = []\n y_batch = []\n seq_len_batch = []\n original_batch = []\n i=0\n for k in range(bs):\n ut_length_dict = dict([(k, len(v['target'])) for (k, v) in audio.cache.items()])\n utterances = sorted(ut_length_dict.items(), key=operator.itemgetter(0))\n test_index = 346\n if train:\n utterances = [a[0] for a in utterances[test_index:]]\n else:\n utterances = [a[0] for a in utterances[:test_index]]\n training_element = audio.cache[utterances[i]]\n target_text = training_element['target']\n audio_buffer = training_element['audio']\n x, y, seq_len, original = convert_inputs_to_ctc_format(audio_buffer,\n sample_rate,\n 'whatever',\n num_features)\n x_batch.append(x)\n y_batch.append(y)\n seq_len_batch.append(seq_len)\n original_batch.append(original)\n i+=1\n\n y_batch = sparse_tuple_from(y_batch)\n seq_len_batch = np.array(seq_len_batch)[:, 0]\n for i, pad in enumerate(np.max(seq_len_batch) - seq_len_batch):\n x_batch[i] = np.pad(x_batch[i], ((0, 0), (0, pad), (0, 0)), mode='constant', constant_values=0)\n\n x_batch = np.concatenate(x_batch, axis=0)\n return x_batch, y_batch, seq_len_batch, original_batch\n\n\ndef decode_batch(d, original, phase='training'):\n for jj in range(batch_size): # just for visualisation purposes. we display only 2.\n values = d.values[np.where(d.indices[:, 0] == jj)]\n str_decoded = ''.join([chr(x) for x in np.asarray(values) + FIRST_INDEX])\n # Replacing blank label to none\n str_decoded = str_decoded.replace(chr(ord('z') + 1), '')\n # Replacing space label to space\n str_decoded = str_decoded.replace(chr(ord('a') - 1), ' ')\n print(str_decoded)\n output_txt = io.open(\"output.txt\", \"a\", encoding=\"utf-8\")\n result = str(jj+1) + ' ' + str_decoded + '\\n'\n output_txt.writelines(result)\n output_txt.close()\n\n\ndef run_ctc():\n # make sure the values match the ones in generate_audio_cache.py\n merged = tf.summary.merge_all()\n with tf.Session() as session:\n saver = tf.train.import_meta_graph('model/ctc-5615-236.meta')\n saver.restore(session, tf.train.latest_checkpoint('model/'))\n \n graph = tf.get_default_graph()\n inputs, targets, seq_len, original = next_batch(train=False)\n input_x = graph.get_operation_by_name(\"inputs\").outputs[0]\n val_feed = {\"inputs:0\": inputs, \"seq_len:0\": seq_len}\n logits = graph.get_tensor_by_name(\"Reshape_1:0\")\n \n decoded, log_prob = tf.nn.ctc_greedy_decoder(logits, seq_len)\n \n d = session.run(decoded[0], feed_dict=val_feed)\n decode_batch(d, original, phase='validation')\n\n\nif __name__ == '__main__':\n run_ctc()\n","sub_path":"test/ctc_tensorflow_test.py","file_name":"ctc_tensorflow_test.py","file_ext":"py","file_size_in_byte":3811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"320252938","text":"from thespian.actors import *\n\nfrom datastructs.entities import Entity\nfrom utilities.logger import log\nfrom utilities.random_generator import RANDOM\nfrom datastructs.events import Event\n\nclass Entity_System(Actor):\n def __init__(self):\n self.identity = \"Entity_System\"\n self.parent = None\n self.entity_loader = None\n self.geometry_system = None\n self.main_event_loop = None\n self.event_system = None\n self.entity_types = {}\n self.entity_set = set()\n self.current_id = 0\n self.current_events = set()\n self.geom_proccessing = set()\n\n\n def get_entity_loader(self):\n self.send(self.parent, (\"actor_request\",\"entity_loader\"))\n\n def get_geometry_system(self):\n self.send(self.parent, (\"actor_request\",\"geometry_system\"))\n\n def get_event_system(self):\n self.send(self.parent, (\"actor_request\",\"event_system\"))\n\n def get_entity_data(self):\n self.send(self.entity_loader, \"entity_request\")\n \n def print_status(self):\n return str(len(self.entity_types)) + \" Entity types\"\n \n def construct_entities_types(self, entity_data):\n #construct dummy entities out of each entity type\n #this ensures that each entity type can be instantiated into a valid entity\n for entity in entity_data:\n self.current_id = self.current_id + 1\n e = Entity(self.current_id, entity_data[entity])\n self.entity_types.update({e.name: e})\n log.INFO(\"Loaded \" + self.print_status(),self.identity)\n self.send(self.parent,\"TASK_COMPLETE\")\n\n\n #this throws a request for entity state initialization into the queue\n def request_initialize_state(self,triggerEvent,entity):\n #print(\"requesting init of state for \" + str(entity))\n e = Event({\"time\": triggerEvent.time,\n \"event\": \"InitializeState()\",\n \"source\": entity})\n self.current_events.add(e)\n self.send(self.event_system,(\"add_event\",e))\n\n #handles simple spawning at a random location\n def get_spawn_location(self,args):\n ######if an exact coordinate is specified for spawning#####\n if \"x\" in args and \"y\" in args:\n if not alt in args: args[\"alt\"] = 0#default altitude to 0\n ####if no location is specified at all#########\n else:\n #throw warning\n log.WARN(\"No location specified for spawning event with args \" + str(args) + \" entity will spawn at a random location.\")\n args[\"x\"] = RANDOM.next_float(180,-180)\n args[\"y\"] = RANDOM.next_float(90,-90)\n args[\"alt\"] = 0\n return args\n\n\n\n #handles simple spawning at random or pre-defined coords\n def spawn(self,event):\n #log.EVENT(event)\n if not \"location\" in event.args:\n self.current_id = self.current_id + 1\n event.args = self.get_spawn_location(event.args)\n ent = Entity.create_entity_of_type(self.entity_types[event.args[\"entity\"]],self.current_id,event.args)\n self.entity_set.add(ent)\n self.current_events.discard(event)\n self.request_initialize_state(event,ent)\n self.check_events_finished()\n else:\n #if we need to spawn the entity at a site by tags\n #request a site from the geometry system.\n #The response is handled in spawn_at_site\n self.geom_proccessing.add(event)\n self.send(self.geometry_system,(\"site_by_tags\",event))\n\n\n #handles spawning at a site chosen by tags\n def spawn_at_site(self,event,site):\n self.current_id = self.current_id + 1\n current_pos = site.get_pos_in_site()\n (event.args[\"x\"],event.args[\"y\"],event.args[\"alt\"]) = current_pos\n event.args[\"x\"] = current_pos[0]\n\n ent = Entity.create_entity_of_type(self.entity_types[event.args[\"entity\"]],self.current_id,event.args)\n self.entity_set.add(ent)\n self.current_events.discard(event)\n self.geom_proccessing.discard(event)\n log.EVENT(event)\n\n #initialize the entity's state and then check if we're done\n self.request_initialize_state(event,ent)\n self.check_events_finished()\n\n #discard the older version of the entity, update to the new version\n #this works because entities are compared by UUID, not object values\n #This method is the last to be called when initializing an entity\n def update_entity_state(self,event,entity):\n log.EVENT(event)\n self.entity_set.discard(entity)\n self.entity_set.add(entity)\n self.current_events.discard(event)\n self.check_events_finished()\n\n def process_event(self,event):\n self.current_events.add(event)\n if event.event_type == \"Spawn\":\n self.spawn(event)\n\n def check_events_finished(self):\n if not self.current_events:\n self.send(self.main_event_loop,(\"finished\",self.identity.lower()))\n\n def receiveMessage(self, message, sender):\n context = None\n #print(message)\n if isinstance(message, tuple):\n context,message = message\n if message == \"init\":\n self.parent = sender\n self.get_entity_loader()\n self.get_geometry_system()\n self.get_event_system()\n elif context == \"event\":\n self.main_event_loop = sender\n self.process_event(message)\n elif context == \"entity_loader\":\n self.entity_loader = message\n self.get_entity_data()\n elif context == \"geometry_system\":\n self.geometry_system = message\n elif context == \"event_system\":\n self.event_system = message\n elif context == \"entity_data\":\n self.construct_entities_types(message)\n elif context == \"update_entity_state\":\n #message should be a tuple of (event, entity)\n self.update_entity_state(*message)\n elif context == \"found_site\":\n #message should be a tuple of (event, site)\n self.spawn_at_site(*message)","sub_path":"systems/entity_system.py","file_name":"entity_system.py","file_ext":"py","file_size_in_byte":6121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"438104370","text":"from tkinter import filedialog\nfrom tkinter import *\nfrom PIL import ImageTk,Image\nimport numpy as np\nfrom stl import mesh\nfrom mpl_toolkits import mplot3d\nfrom matplotlib import pyplot\n\nmodelX = 100.0\nmodelY = 100.0\nmodelZ = 1.4\nbottomLayerHeight = 0.14\nlayerHeight = 0.035\n\nclass App:\n\n def upload_button():\n global tkImg\n global img\n global pixelsGrayVal\n root.filename = filedialog.askopenfilename(initialdir = \"/home/james/Pictures\",title = \"Select file\",filetypes = ((\"jpeg files\",\"*.jpg\"),(\"all files\",\"*.*\")))\n\n try:\n img = Image.open(root.filename).convert(\"LA\")\n tkImg = ImageTk.PhotoImage(Image.open(root.filename).convert(\"LA\")) \n except:\n print(\"something went wrong\")\n\n width, height = img.size\n imRender = Label(root, image=tkImg)\n imRender.place(x=300,y=10)\n pixels = img.load()\n pixelsGrayVal = []\n for x in range(width):\n for y in range(height):\n pixelsGrayVal.append(pixels[x,y][0])\n \n maxHeight = max(pixelsGrayVal)\n\n for x in range(len(pixelsGrayVal)):\n pixelsGrayVal[x] = ((((pixelsGrayVal[x]) / maxHeight) * modelZ ) + bottomLayerHeight)\n\n numBins = int(modelZ / layerHeight) + 1\n hist = np.histogram(pixelsGrayVal, bins=numBins)\n #print(hist)\n\n def render_mesh():\n #print(\"render\")\n #modelX = float(xLenEntry.get())\n #print(modelX)\n \n # Or creating a new mesh (make sure not to overwrite the `mesh` import by\n # naming it `mesh`):\n VERTICE_COUNT = 100\n data = np.zeros(VERTICE_COUNT, dtype=mesh.Mesh.dtype)\n your_mesh = mesh.Mesh(data, remove_empty_areas=False)\n\n # The mesh normals (calculated automatically)\n your_mesh.normals\n # The mesh vectors\n your_mesh.v0, your_mesh.v1, your_mesh.v2\n # Accessing individual points (concatenation of v0, v1 and v2 in triplets)\n assert (your_mesh.points[0][0:3] == your_mesh.v0[0]).all()\n assert (your_mesh.points[0][3:6] == your_mesh.v1[0]).all()\n assert (your_mesh.points[0][6:9] == your_mesh.v2[0]).all()\n assert (your_mesh.points[1][0:3] == your_mesh.v0[1]).all()\n\n your_mesh.save('new_stl_file.stl')\n\n def __init__(self, master):\n frame = Frame(master, width=1000, height=500)\n frame.pack()\n\n self.uploadButton = Button(frame, text=\"Upload Photo\", command=App.upload_button)\n self.uploadButton.place(x=10, y=10)\n\n self.render3DButton = Button(frame, text=\"Render Mesh\", command=App.render_mesh)\n self.render3DButton.place(x=10, y=60)\n\n\nroot = Tk()\napp = App(root)\n\nxLenEntry = Entry(root)\nxLenEntry.place(x=10,y=100,width=80)\nxLabel = Label(root, text=\"X\").place(x=95,y=100)\n\nyLenEntry = Entry(root)\nyLenEntry.place(x=10,y=120,width=80)\nyLabel = Label(root, text=\"Y\").place(x=95,y=120)\n\nzLenEntry = Entry(root)\nzLenEntry.place(x=10,y=140,width=80)\nzLabel = Label(root, text=\"Z\").place(x=95,y=140)\n\nlayerHeightEntry = Entry(root)\nlayerHeightEntry.place(x=10,y=160,width=80)\nlayerHeightLabel = Label(root, text=\"layer height\").place(x=95,y=160)\n\ninitialHeightEntry = Entry(root)\ninitialHeightEntry.place(x=10,y=180,width=80)\ninitialHeightLabel = Label(root, text=\"initial height\").place(x=95,y=180)\n\n\nroot.mainloop()\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"631183974","text":"import os\r\nimport pandas as pd\r\n\r\nROOT_DIR = \"C:\\\\Users\\\\cy0sh\\\\work\\\\100Knocks\\\\datas\\\\Section1\"\r\nfiles = os.listdir(ROOT_DIR)\r\nfor file in files :\r\n _, ext = os.path.splitext(file)\r\n if ext != \".csv\" : continue \r\n file_contents = pd.read_csv( ROOT_DIR + \"\\\\\" + file )\r\n print( \"===========\" + file + \"===========\" )\r\n print( file_contents.head() )\r\n\r\n","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"572258068","text":"# -*- coding=utf8 -*-\nimport sqlite3\nimport os\n\n\nclass SQLite3Helper:\n __db = ''\n\n def __init__(self):\n str_path = os.path.dirname(os.path.realpath(__file__))+os.path.sep+\"..\"\n self.__db = str_path + \"/DB/info.db\"\n self.con = sqlite3.connect(self.__db)\n self.cur = self.con.cursor()\n\n def insert(self, sql):\n self.cur.execute(sql)\n self.con.commit()\n return 1\n\n def __del__(self):\n self.cur.close()\n self.con.close()\n\n\nif __name__ == '__main__':\n sh = SQLite3Helper()\n str_sql = \"insert into newsinfo(NEWS,DEL) values('中文信息数据存储及显示',0)\"\n i = sh.insert(str_sql)\n if i == 1:\n print(\"Success.\")\n else:\n print(\"Failed.\")\n\n","sub_path":"News/SQLHelper/SQLite3_helper.py","file_name":"SQLite3_helper.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"546984628","text":"from operator import attrgetter\nfrom typing import Dict, List, Tuple\n\nfrom rich import box\nfrom rich.color import ANSI_COLOR_NAMES\nfrom rich.console import RenderGroup\nfrom rich.layout import Layout\nfrom rich.panel import Panel\nfrom rich.progress import BarColumn, Progress, SpinnerColumn, TaskID, TextColumn\nfrom rich.table import Table\nfrom rich.tree import Tree\n\nfrom ..config import settings\nfrom ..source_control import PullRequest\n\n\ndef get_label_colour_map() -> Dict[str, str]:\n \"\"\"converts a comma separated list of organizations/repositories into a list\n of tuples.\n \"\"\"\n\n def _preproc(label_colour: str) -> List[str]:\n return label_colour.lower().split(sep=\"/\")\n\n return {\n label: f\"[{colour}]\"\n for label, colour in map(_preproc, settings.REVIEWS_LABEL_CONFIGURATION)\n if colour in ANSI_COLOR_NAMES\n }\n\n\ndef render_repository_does_not_exist(\n title: str,\n link: str,\n) -> Table:\n \"\"\"Renders a list of pull requests as a table\"\"\"\n table = Table(show_header=True, header_style=\"bold white\")\n table.add_column(\"#\", style=\"dim\", width=7)\n table.add_column(\n f\"[link={link}]{title}[/link]\",\n width=160,\n )\n\n table.add_row(\n \"\",\n \"Please confirm this repository exists and that you can access it before attempting to use it.\",\n )\n\n return table\n\n\ndef render_pull_request_table(\n title: str,\n pull_requests: List[PullRequest],\n) -> Table:\n \"\"\"Renders a list of pull requests as a table\"\"\"\n\n show_diff = False\n\n if pull_requests and pull_requests[0].repository_url:\n link = f\"[link={pull_requests[0].repository_url}]{title}[/link]\"\n else:\n link = f\"{title}\"\n\n table = Table(show_header=True, header_style=\"bold white\")\n table.add_column(\"#\", style=\"dim\", width=5)\n table.add_column(link, width=75)\n table.add_column(\"Labels\", width=30)\n table.add_column(\"Diff +/-\", width=10)\n table.add_column(\"Activity\", width=15)\n table.add_column(\"Approved\", width=10)\n table.add_column(\"Mergeable\", width=10)\n\n label_colour_map = get_label_colour_map()\n\n for pr in sorted(pull_requests, key=attrgetter(\"updated_at\"), reverse=True):\n\n row = [\n f\"[white]{pr.number} \",\n pr.render_title(),\n pr.render_labels(label_colour_map),\n ]\n\n if show_diff:\n row.append(pr.render_diff())\n else:\n row.append(\" - \")\n\n row.append(pr.render_updated_at())\n row.append(pr.render_approved())\n row.append(pr.render_approved_by_others())\n\n table.add_row(*row)\n\n return table\n\n\ndef generate_layout(log: bool = True, footer: bool = True) -> Layout:\n \"\"\"Define the layout for the terminal UI.\"\"\"\n layout = Layout(name=\"root\")\n\n sections = [Layout(name=\"header\", size=3), Layout(name=\"main\", ratio=1)]\n if footer:\n sections.append(Layout(name=\"footer\", size=7))\n layout.split(*sections)\n\n layout[\"main\"].split_row( # type: ignore\n Layout(name=\"left_side\", size=40),\n Layout(name=\"body\", ratio=2, minimum_size=90),\n )\n\n nav_sections = [Layout(name=\"configuration\")]\n if log:\n nav_sections.append(Layout(name=\"log\"))\n\n layout[\"left_side\"].split(*nav_sections)\n\n return layout\n\n\ndef generate_tree_layout(configuration: List[Tuple[str, str]]) -> RenderGroup:\n \"\"\"Generates a tree layout for the settings configuration\"\"\"\n organization_tree_mapping: Dict[str, Tree] = {}\n for (org, repo) in configuration:\n tree = organization_tree_mapping.get(f\"{org}\", Tree(f\"[white]{org}\"))\n tree.add(f\"[link=https://www.github.com/{org}/{repo}]{repo}[/link]\")\n organization_tree_mapping[org] = tree\n\n return RenderGroup(*organization_tree_mapping.values())\n\n\ndef generate_log_table(logs: List[Tuple[str, str]]) -> Table:\n \"\"\"Generetes a table for logging activity\"\"\"\n table = Table(\"Time\", \"Message\", box=box.SIMPLE)\n\n if logs:\n for log in logs:\n time, message = log\n table.add_row(time, message)\n\n return table\n\n\ndef generate_progress_tracker() -> Tuple[Progress, Progress, TaskID, Table]:\n \"\"\"Tracks the progress of tasks\"\"\"\n progress = Progress(\n \"{task.description}\",\n SpinnerColumn(),\n BarColumn(),\n TextColumn(\"[progress.percentage]{task.percentage:>3.0f}%\"),\n )\n progress.add_task(\"[white]Pull Requests\", total=100)\n\n total = sum(task.total for task in progress.tasks)\n overall_progress = Progress()\n overall_task = overall_progress.add_task(description=\"All\", total=int(total))\n\n progress_table = Table.grid(expand=True)\n progress_table.add_row(\n Panel(\n renderable=overall_progress, # type: ignore\n title=\"Next Refresh\",\n border_style=\"blue\",\n ),\n Panel(\n renderable=progress, # type: ignore\n title=\"[b]Next fetch for:\",\n border_style=\"blue\",\n padding=(1, 2),\n ),\n )\n\n return progress, overall_progress, overall_task, progress_table\n","sub_path":"reviews/layout/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":5077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"638459844","text":"# app/models.py\n\nfrom app import db\nfrom typing import List, Dict, Union\nfrom datetime import date\n\nbibliographies = db.Table(\n 'bibliographies',\n db.Column('book_id', db.Integer, db.ForeignKey('book.id'), primary_key=True),\n db.Column('author_id', db.Integer, db.ForeignKey('author.id'), primary_key=True)\n)\n\n\nclass Author(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20), index=True)\n lastname = db.Column(db.String(30), index=True)\n bibliographies = db.relationship('Book', secondary=bibliographies, backref=\"authors\", lazy='subquery')\n\n def is_in_base(self, author_name: str, author_lastname: str) -> List[int]:\n \"\"\"checks if the author is in the database\"\"\"\n _authors = self.query.filter_by(name=author_name).all()\n if _authors is not None:\n author_id = [author.id for author in _authors if author.lastname == author_lastname]\n return author_id\n\n def add_author(self, author_name: str, author_lastname: str) -> object:\n \"\"\"adds author to database and returns it, if author exists in database returns existing author\"\"\"\n _id = self.is_in_base(author_name=author_name, author_lastname=author_lastname)\n if not bool(_id):\n author = Author(name=author_name, lastname=author_lastname)\n db.session.add(author)\n db.session.commit()\n return author\n return self.query.get(_id[0])\n\n def update(self, author_id: int, author_name: str, author_lastname: str) -> object:\n \"\"\"changes author's data and returns it, if author has more than one book, creates new author and returns him\"\"\"\n author = self.query.get(author_id)\n author_in_base_id = self.is_in_base(author_name=author_name, author_lastname=author_lastname)\n if len(author.bibliographies) > 1 and any([\n author.name != author_name,\n author.lastname != author_lastname\n ]):\n author = Author(name=author_name, lastname=author_lastname)\n db.session.add(author)\n db.session.commit()\n return author\n if bool(author_in_base_id):\n return self.query.get(author_in_base_id)\n author.name = author_name\n author.lastname = author_lastname\n db.session.commit()\n return author\n\n def delete(self, author_id: int) -> None:\n \"\"\"removes the author\"\"\"\n author = self.query.get(author_id)\n db.session.delete(author)\n db.session.commit()\n\n def __str__(self):\n return f\"Author <{self.name} {self.lastname}, id: {self.id}>\"\n\n\nclass Book(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(100), index=True, unique=True)\n genre_id = db.Column(db.Integer, db.ForeignKey('genre.id'))\n publisher_id = db.Column(db.Integer, db.ForeignKey('publisher.id'))\n rating = db.Column(db.Integer)\n description = db.Column(db.Text)\n borrowed_book_card_id = db.Column(db.Integer, db.ForeignKey('borrowed_book_card.id'))\n\n def __str__(self):\n return f\"Book \"\n\n def is_title_in_base(self, title: str) -> int:\n t = self.query.filter_by(title=title).first()\n if t is not None:\n return t.id\n\n def get_authors(self, book_id: int) -> List[Dict[str, str]]:\n \"\"\"gets the authors of the book and returns a list of authors\"\"\"\n book = self.query.get(book_id)\n author = book.authors\n authors = [{'name': names.name.title(), 'lastname': names.lastname.title()} for names in author]\n return authors\n\n def get_one(self, book_id: int) -> object:\n \"\"\"gets the details of the book\"\"\"\n book = self.query.get(book_id)\n author = book.get_authors(book.id)\n genre = Genre.query.get(book.genre_id)\n publisher = Publisher.query.get(book.publisher_id)\n status = BorrowedBookCard().get_status(book_id)\n book = {\n 'id': book.id,\n 'title': book.title,\n 'author_name': author[0]['name'],\n 'author_lastname': author[0]['lastname'],\n 'genre': genre.genre,\n 'publisher': publisher.name,\n 'rating': book.rating,\n 'description': book.description,\n 'status': status\n }\n print(author)\n return book\n\n def get_all(self):\n \"\"\"downloads books from database and returns book list\"\"\"\n books = []\n\n for book in self.query.all():\n authors = self.get_authors(book.id)\n author = [f\"{name['name']} {name['lastname']}\" for name in authors]\n authors = ', '.join(author)\n genre = Genre.query.get(book.genre_id).genre\n publisher = Publisher.query.get(book.publisher_id).name\n status = BorrowedBookCard().get_status(book.id)\n books.append({\n 'id': book.id,\n 'title': book.title,\n 'author': authors,\n 'genre': genre,\n 'publisher': publisher,\n 'rating': book.rating,\n 'description': book.description,\n 'status': status\n })\n return books\n\n def add_title(self, title: str, rating: int, description: str) -> object:\n \"\"\"adds a new title to the database and returns it,\n if the title is already in the database returns the existing title\"\"\"\n _id = self.is_title_in_base(title=title)\n if _id is None:\n book = Book(\n title=title.title(),\n rating=rating,\n description=description\n )\n db.session.add(book)\n db.session.commit()\n return book\n return self.query.get(_id)\n\n def add_book(self, details) -> None:\n \"\"\"adds a new book to the database\"\"\"\n book = self.add_title(\n title=details['title'].title(),\n rating=details['rating'],\n description=details['description']\n )\n author = Author().add_author(\n author_name=details['author_name'].title(),\n author_lastname=details['author_lastname'].title()\n )\n book.authors.append(author)\n genre = Genre().add_genre(genre=details['genre'].capitalize())\n genre.books.append(book)\n publisher = Publisher().add_publisher(publisher=details['publisher'].title())\n publisher.books.append(book)\n db.session.commit()\n\n def update(self, book_id: int, details: Dict[str, Union[str, int]]) -> None:\n book = self.query.get(book_id)\n genre = Genre().update(book.genre_id, details['genre'].capitalize())\n publisher = Publisher().update(book.publisher_id, details['publisher'].title())\n author = Author().update(\n book.authors[0].id,\n details['author_name'].title(),\n details['author_lastname'].title())\n book.authors.clear()\n book.authors.append(author)\n book.publisher_id = publisher.id\n book.genre_id = genre.id\n book.title = details['title'].title()\n book.rating = details['rating']\n book.description = details['description']\n db.session.commit()\n\n def delete(self, book_id: int) -> None:\n \"\"\"removes the book from the database\"\"\"\n book = self.query.get(book_id)\n db.session.delete(book)\n db.session.commit()\n\n\nclass Borrower(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20), index=True)\n lastname = db.Column(db.String(30), index=True)\n borrow = db.relationship(\"BorrowedBookCard\", backref=\"borrow\", lazy='subquery')\n\n def is_in_base(self, borrower_name: str, borrower_lastname: str) -> List[int]:\n \"\"\"checks if the borrower is in the database and returns its id\"\"\"\n borrower = self.query.filter_by(lastname=borrower_lastname).all()\n if borrower is not None:\n borrower_id = [name.id for name in borrower if name.name == borrower_name]\n return borrower_id\n\n def add_borrower(self, borrower_name: str, borrower_lastname: str) -> object:\n \"\"\"adds borrower to database and returns it, if borrower is in database it returns existing one\"\"\"\n _id = self.is_in_base(borrower_name=borrower_name.title(), borrower_lastname=borrower_lastname.title())\n if not bool(_id):\n borrower = Borrower(name=borrower_name.title(), lastname=borrower_lastname.title())\n db.session.add(borrower)\n db.session.commit()\n return borrower\n return self.query.get(_id)\n\n def __str__(self):\n return f\"Borrower <{self.name} {self.lastname}>\"\n\n\nclass BorrowedBookCard(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n book_id = db.Column(db.Integer)\n borrower_id = db.Column(db.Integer, db.ForeignKey('borrower.id'))\n date_of_loan = db.Column(db.Date)\n date_of_borrow = db.Column(db.Date)\n date_of_return = db.Column(db.Date)\n borrowed = db.Column(db.Boolean)\n lend = db.relationship(\"Book\", backref=\"lend\", lazy='subquery')\n\n def is_in_base(self, book_id: int) -> int:\n \"\"\"checks if the card is in the database and returns the card id\"\"\"\n card = self.query.filter_by(book_id=book_id).first()\n if card is not None:\n return card.id\n\n def add_card(self, book_id: int) -> object:\n \"\"\"adds a card to the database and returns it, if the card exists, it returns the existing one\"\"\"\n _id = self.is_in_base(book_id)\n if _id is None:\n card = BorrowedBookCard(borrowed=False)\n db.session.add(card)\n db.session.commit()\n return card\n return self.query.get(_id)\n\n def borrow_book(self, book_id: int, borrower_name: str, borrower_lastname: str) -> None:\n \"\"\"sets borrowed to True\"\"\"\n book = Book().query.get(book_id)\n card = self.add_card(book_id)\n borrower = Borrower().add_borrower(borrower_name, borrower_lastname)\n borrower.borrow.append(card)\n card.book_id = book_id\n book.borrowed_book_card_id = card.id\n card.borrowed = True\n db.session.commit()\n\n def give_back_book(self, book_id: int) -> None:\n \"\"\"sets borrowed to False\"\"\"\n card = self.add_card(book_id)\n card.borrowed = False\n card.borrower_id = None\n db.session.commit()\n\n def get_status(self, book_id: int) -> str:\n \"\"\"checks if the book is on loan and returns its status\"\"\"\n book = Book().query.get(book_id)\n card_id = book.borrowed_book_card_id\n if card_id:\n card = self.query.get(card_id)\n if card.borrowed is True:\n return 'pożyczona'\n return 'na półce'\n\n def __str__(self):\n return f\"Borrow <{self.borrowed}>\"\n\n\nclass Publisher(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(50), index=True, unique=True)\n books = db.relationship(\"Book\", backref=\"publish\", lazy='subquery')\n\n def is_in_base(self, publisher_name: str) -> int:\n \"\"\"checks if the publisher is in the database and returns its id\"\"\"\n publisher = self.query.filter_by(name=publisher_name).first()\n if publisher is not None:\n return publisher.id\n\n def add_publisher(self, publisher: str):\n \"\"\"adds the publisher to the database and returns it,\n if the publisher is in the database, it returns the existing one\"\"\"\n _id = self.is_in_base(publisher_name=publisher)\n if _id is None:\n publisher = Publisher(name=publisher)\n db.session.add(publisher)\n db.session.commit()\n return publisher\n return self.query.get(_id)\n\n def delete(self, publisher_id: int) -> None:\n \"\"\"removes the publisher from the database\"\"\"\n publisher = self.query.get(publisher_id)\n db.session.delete(publisher)\n db.session.commit()\n\n def update(self, publisher_id: int, name: str) -> object:\n \"\"\"changes the publisher's data and returns the publisher,\n if the publisher has more than one book, add a new publisher to the database and return it\"\"\"\n publisher = self.query.get(publisher_id)\n publisher_in_base_id = self.is_in_base(publisher_name=name)\n if len(publisher.books) > 1 and publisher.name != name:\n publisher = Publisher(name=name)\n db.session.add(publisher)\n db.session.commit()\n return publisher\n if bool(publisher_in_base_id):\n return self.query.get(publisher_in_base_id)\n publisher.name = name\n db.session.commit()\n return publisher\n\n def __str__(self):\n return f\"Publisher <{self.name}, id: {self.id}>\"\n\n\nclass Genre(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n genre = db.Column(db.String(50), index=True, unique=True)\n books = db.relationship(\"Book\", backref=\"genre\", lazy='subquery')\n\n def is_in_base(self, genre_name: str) -> int:\n \"\"\"checks if genre is in the database and returns the genre id\"\"\"\n genre = self.query.filter_by(genre=genre_name).first()\n if genre is not None:\n return genre.id\n\n def add_genre(self, genre: str) -> object:\n \"\"\"adds genre to database and returns it, if genre is in database returns existing one\"\"\"\n _id = self.is_in_base(genre_name=genre)\n if _id is None:\n genre = Genre(genre=genre)\n db.session.add(genre)\n db.session.commit()\n return genre\n return self.query.get(_id)\n\n def delete(self, genre_id: int) -> None:\n \"\"\"removes genre from the database\"\"\"\n genre = self.query.get(genre_id)\n db.session.delete(genre)\n db.session.commit()\n\n def update(self, genre_id: int, name: str):\n \"\"\"renames genre and returns them, if there are more books in the genre, creates new ones and returns them\"\"\"\n genre = self.query.get(genre_id)\n genre_is_in_base_id = self.is_in_base(genre_name=name)\n if len(genre.books) > 1 and genre.genre != name:\n genre = Genre(genre=name)\n db.session.add(genre)\n db.session.commit()\n return genre\n if bool(genre_is_in_base_id):\n return self.query.get(genre_is_in_base_id)\n genre.genre = name\n db.session.commit()\n return genre\n\n def __str__(self):\n return f\"Genre <{self.genre}, id: {self.id}>\"\n","sub_path":"app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":14589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"323294491","text":"# -*- coding: utf-8 -*-\n__author__ = \"hulinjun\"\n\nfrom django.conf.urls import url,include\nfrom rest_framework import routers\nfrom .views import TestcaseListViewSet,SubTestcaseListViewSet,ProjectListViewSet,TestEnvironmentViewSet,TestplanViewSet\n\n\nrouter = routers.DefaultRouter()\n#获取测试用例数据\nrouter.register(r'testcases', TestcaseListViewSet,base_name=\"testcases\")\nrouter.register(r'subtestcases',SubTestcaseListViewSet,base_name=\"subtestcases\")\nrouter.register(r'projects',ProjectListViewSet,base_name=\"projects\")\nrouter.register(r'testEnvironments',TestEnvironmentViewSet,base_name='testEnvironments')\nrouter.register(r'testplans',TestplanViewSet,base_name='testplans')\n\n\nurlpatterns = [\n url(r'^',include(router.urls)),\n]","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"183037506","text":"# -*- coding:utf-8 -*-\n'''\nCreated on Sep 2, 2014\n\n@author: xchliu\n\n@module:mega_web.resource.vip_manage\n'''\n\nfrom mega_web.entity.models import Vip\nfrom conf.GlobalConf import MSG_ERR_IP\n\n\n\nclass VipManage():\n '''\n '''\n def __init__(self,request):\n self.vip=request.get('vip')\n self.domain=request.get('domain')\n self.type=request.get('type')\n self.stat=request.get('stat')\n self.plant=request.get('plant')\n self.id=request.get('id')\n \n def _data_check(self): \n if not self.vip :\n return False\n return True\n \n def add_vip(self):\n if not self._data_check():\n return False,MSG_ERR_IP\n v=Vip()\n v.vip=self.vip\n v.domain=self.domain\n v.type=self.type\n v.stat=self.stat\n v.plant=self.plant \n v.save()\n return True,''\n \n def mod_vip(self):\n if not self._data_check():\n return False,MSG_ERR_IP\n v=Vip.objects.get(id=self.id)\n v.vip=self.vip\n v.domain=self.domain\n v.type=self.type\n v.stat=self.stat\n v.plant=self.plant \n v.save()\n return True,''\n \n \nclass VipGet():\n '''\n '''\n def __init__(self):\n self.vip=Vip\n \n def get_vip_list(self,type=1,count=10):\n if count == 0:\n vip_list=self.vip.objects.filter(type=type).order_by('stat','vip').values()\n else:\n vip_list=self.vip.objects.filter(type=type).order_by('stat','vip')[count].values()\n return vip_list\n \n def get_vip_by_ip(self,vip):\n vip_list=self.vip.objects.filter(vip=vip).values()\n return vip_list\n\ndef main():\n return\nif __name__ == \"__main__\":\n main()","sub_path":"src/mega_web/resource/vip_manage.py","file_name":"vip_manage.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"653927004","text":"# -*- coding: utf-8 -*-\nfrom kinopoisk.items import KinopoiskItem\nfrom scrapy import Spider\nfrom scrapy.http import Request\n\n\nclass KinopoiskSpider(Spider):\n name = 'Kinopoisk'\n allowed_domains = ['films.ru']\n start_urls = ['https://www.films.ru/top/lists/322/filtr/all/sort/votes/']\n\n def parse(self, response):\n films = response.xpath(\"*//div[@class='poster']/a/@href\").extract()\n for film in films:\n absolute_url = 'https://www.films.ru' + film\n yield Request(absolute_url, callback=self.parse_film)\n if not response.xpath(\"//div[@class='navigator']//li[@class='arr'][3]/a/@href\"):\n next_page_url = response.xpath(\"//div[@class='navigator']//li[@class='arr'][1]/a/@href\").extract_first()\n # process next page\n else:\n next_page_url = response.xpath(\"//div[@class='navigator']//li[@class='arr'][3]/a/@href\").extract_first()\n absolute_next_page_url = 'https://www.films.ru' + str(next_page_url)\n yield Request(absolute_next_page_url)\n\n\n def parse_film(self, response):\n item = KinopoiskItem()\n item['platform'] = 'Kinopoisk'\n item['title'] = response.xpath('//h1[@class=\"moviename-big\"]/text()').extract_first()\n item['director'] = response.xpath('//td[@itemprop=\"director\"]//a/text()').extract_first()\n item['genre'] = response.xpath('//span[@itemprop=\"genre\"]//a/text()').extract_first()\n temp_dict = response.url.split('/')\n item['movie_id'] = '/' + temp_dict[-3] + '/' + temp_dict[-2] + '/'\n item['date'] = response.xpath('//meta[@itemprop=\"dateCreated\"]/@content').extract_first()\n item['country'] = response.xpath('//table[@class=\"info\"]//tr[2]//a/text()').extract_first()\n yield Request(url=response.url + 'like/', callback=self.parse_like, meta={'item': item})\n\n ## THIS NEEDS TO BE REWORKED TO ONLY GET LIKE ITEMS\n def parse_like(self, response):\n old_item = response.request.meta['item']\n old_item['recommended'] = response.xpath('//a[@class=\" b_gray i_orig\"]').xpath('@href').extract()\n return [old_item]","sub_path":"films/films/spiders/Kinopoisk.py","file_name":"Kinopoisk.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"374064344","text":"from numpy import loadtxt\nfrom numpy.fft import rfft\nfrom pylab import plot,show,figure\n\nfreq = arange(0,44100,1)\n\n#piano\npiano = loadtxt('ExamplesAndUseful\\piano.txt',float)\ncpiano = rfft(piano)\nfigure(1)\nplot(piano)\nfigure(2)\nplot(abs(cpiano[:10000]))\n\n#trumpet\ntrumpet = loadtxt('ExamplesAndUseful/trumpet.txt')\nctrumpet = rfft(trumpet)\nfigure(3)\nplot(trumpet)\nfigure(4)\nplot(abs(ctrumpet[:10000])**2)\nshow()\n\n","sub_path":"Chapter7/Ex7_3.py","file_name":"Ex7_3.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"312898040","text":"from pusion.core.combiner import *\nfrom pusion.util.constants import *\nfrom pusion.util.transformer import *\n\n\nclass BehaviourKnowledgeSpaceCombiner(TrainableCombiner):\n \"\"\"\n The :class:`BehaviourKnowledgeSpaceCombiner` (BKS) is adopted from the decision fusion method originally proposed by\n Huang, Suen et al. :footcite:`huang1993behavior`. BKS analyses the behaviour of multiple classifiers based on their\n classification outputs with respect to each available class.\n This behaviour is recorded by means of a lookup table, which is used for final combination of multiple\n classification outputs for a sample.\n\n .. footbibliography::\n\n \"\"\"\n\n _SUPPORTED_PAC = [\n (Problem.MULTI_CLASS, AssignmentType.CRISP, CoverageType.REDUNDANT),\n ]\n\n SHORT_NAME = 'BKS'\n\n def __init__(self):\n super().__init__()\n self.unique_configs = None\n self.config_class_distribution = None\n self.n_classes = None\n\n def train(self, decision_tensor, true_assignments):\n \"\"\"\n Train the Behaviour Knowledge Space model (BKS) by extracting the classification configuration from all\n classifiers and summarizing samples of each true class that leads to that configuration. This relationship is\n recorded in a lookup table. Only crisp classification outputs are supported.\n\n :param decision_tensor: `numpy.array` of shape `(n_classifiers, n_samples, n_classes)`.\n Tensor of crisp decision outputs by different classifiers per sample.\n\n :param true_assignments: `numpy.array` of shape `(n_samples, n_classes)`.\n Matrix of crisp class assignments which are considered true for each sample during\n the training procedure.\n \"\"\"\n configs = decision_tensor_to_configs(decision_tensor)\n unique_configs = np.unique(configs, axis=0)\n self.n_classes = np.shape(true_assignments)[1]\n n_unique_configs = np.shape(unique_configs)[0]\n config_class_distribution = np.zeros((n_unique_configs, self.n_classes), dtype=int)\n\n for i in range(n_unique_configs):\n unique_config = unique_configs[i]\n # Determine identical classification configurations for each of which\n # the number of samples is accumulated per true class assignment.\n b = np.array([np.all(unique_config == configs, axis=1)] * self.n_classes).transpose()\n config_class_distribution[i] = np.sum(true_assignments, axis=0, where=b)\n\n self.unique_configs = unique_configs\n self.config_class_distribution = np.array(config_class_distribution)\n\n def combine(self, decision_tensor):\n \"\"\"\n Combine decision outputs by the Behaviour Knowledge Space (BKS) method. This procedure involves looking up the\n most representative class for a given classification output regarding the behaviour of all classifiers in the\n ensemble. Only crisp classification outputs are supported. If a trained lookup entry is not present for a\n certain classification configuration, no decision fusion can be made for the sample, which led to that\n configuration. In this case, the decision fusion is a zero vector.\n\n :param decision_tensor: `numpy.array` of shape `(n_classifiers, n_samples, n_classes)`.\n Tensor of crisp decision outputs by different classifiers per sample.\n\n :return: A matrix (`numpy.array`) of crisp class assignments which are obtained by the best representative class\n for a certain classifier's behaviour per sample. Axis 0 represents samples and axis 1 the class labels\n which are aligned with axis 2 in ``decision_tensor`` input tensor.\n \"\"\"\n configs = decision_tensor_to_configs(decision_tensor)\n fused_decisions = np.zeros((len(decision_tensor[0]), self.n_classes))\n\n for i in range(len(configs)):\n # perform a lookup in unique_configs\n lookup = np.where(np.all(configs[i] == self.unique_configs, axis=1))[0]\n if lookup.size > 0:\n uc_index = lookup[0]\n # set the class decision according to the maximum sample numbers for this config\n fused_decisions[i, self.config_class_distribution[uc_index].argmax()] = 1\n return fused_decisions\n\n\nclass CRBehaviourKnowledgeSpaceCombiner(BehaviourKnowledgeSpaceCombiner):\n \"\"\"\n The :class:`CRBehaviourKnowledgeSpaceCombiner` is a modification of :class:`BehaviourKnowledgeSpaceCombiner` that\n also supports complementary-redundant decision outputs. Therefore the input is transformed, such that all missing\n classification assignments are considered as a constant, respectively. To use methods :meth:`train` and\n :meth:`combine` a coverage needs to be set first by the inherited :meth:`set_coverage` method.\n \"\"\"\n\n _SUPPORTED_PAC = [\n (Problem.MULTI_CLASS, AssignmentType.CRISP, CoverageType.COMPLEMENTARY_REDUNDANT),\n ]\n\n def __init__(self):\n super().__init__()\n\n def train(self, decision_outputs, true_assignments):\n \"\"\"\n Train the Behaviour Knowledge Space model (BKS) by extracting the classification configuration from all\n classifiers and summarizing samples of each true class that leads to that configuration. This relationship is\n recorded in a lookup table. Only crisp classification outputs are supported.\n\n :param decision_outputs: `list` of `numpy.array` matrices, each of shape `(n_samples, n_classes')`,\n where `n_classes'` is classifier-specific and described by the coverage.\n Each matrix corresponds to one of `n_classifiers` classifiers and contains crisp decision outputs\n per sample.\n\n :param true_assignments: `numpy.array` of shape `(n_samples, n_classes)`.\n Matrix of crisp class assignments which is considered true for each sample during\n the training procedure.\n \"\"\"\n t_decision_outputs = self.__transform_to_uniform_decision_tensor(decision_outputs, self.coverage)\n super().train(t_decision_outputs, true_assignments)\n\n def combine(self, decision_outputs):\n \"\"\"\n Combine decision outputs by the Behaviour Knowledge Space (BKS) method. This procedure involves looking up the\n most representative class for a given classification output regarding the behaviour of all classifiers in the\n ensemble. Only crisp classification outputs are supported. If a trained lookup entry is not present for a\n certain classification configuration, no decision fusion can be made for the sample, which led to that\n configuration. In this case, the decision fusion is a zero vector.\n\n :param decision_outputs: `list` of `numpy.array` matrices, each of shape `(n_samples, n_classes')`,\n where `n_classes'` is classifier-specific and described by the coverage.\n Each matrix corresponds to one of `n_classifiers` classifiers and contains crisp decision outputs\n per sample.\n\n :return: A matrix (`numpy.array`) of crisp class assignments which are obtained by the best representative class\n for a certain classifier's behaviour per sample. Axis 0 represents samples and axis 1 all the class\n labels which are provided by the coverage.\n\n \"\"\"\n t_decision_outputs = self.__transform_to_uniform_decision_tensor(decision_outputs, self.coverage)\n return super().combine(t_decision_outputs)\n\n @staticmethod\n def __transform_to_uniform_decision_tensor(decision_outputs, coverage):\n n_classifiers = len(decision_outputs)\n n_decisions = len(decision_outputs[0])\n n_classes = len(np.unique(np.concatenate(coverage)))\n # tensor for transformed decision outputs\n t_decision_outputs = np.negative(np.ones((n_classifiers, n_decisions, n_classes)))\n for i in range(n_classifiers):\n t_decision_outputs[i, :, coverage[i]] = decision_outputs[i].T\n return t_decision_outputs\n","sub_path":"pusion/core/behaviour_knowledge_space_combiner.py","file_name":"behaviour_knowledge_space_combiner.py","file_ext":"py","file_size_in_byte":8108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"227937401","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions import Normal, kl_divergence\n\nfrom torch.distributions import Normal\nfrom ..models.modules import Encoder, GaussianDecoder , GaussianLinearDecoder\n\nfrom typing import Tuple, Dict\n\ntorch.backends.cudnn.benchmark = True\n\n\n# Gaussian VAE model\nclass GaussianVAE(nn.Module):\n \"\"\"Variational auto-encoder model.\n\n Parameters\n ----------\n n_input\n Number of input genes\n n_labels\n Number of labels\n n_hidden\n Number of nodes per hidden layer\n n_latent\n Dimensionality of the latent space\n n_layers\n Number of hidden layers used for encoder and decoder NNs\n dropout_rate\n Dropout rate for neural networks\n\n \"\"\"\n\n def __init__(\n self,\n n_input: int,\n n_labels: int = 0,\n n_hidden: int = 128,\n n_latent: int = 10,\n n_layers: int = 1,\n dropout_rate: float = 0.1,\n latent_distribution: str = \"normal\",\n sigma_ldvae: float = None,\n ):\n super().__init__()\n self.n_latent = n_latent\n self.n_labels = n_labels\n self.latent_distribution = latent_distribution\n\n # z encoder goes from the n_input-dimensional data to an n_latent-d\n # latent space representation\n self.z_encoder = Encoder(\n n_input,\n n_latent,\n n_layers=n_layers,\n n_hidden=n_hidden,\n dropout_rate=dropout_rate,\n distribution=latent_distribution,\n )\n\n # decoder goes from n_latent-dimensional space to n_input-d data\n if not sigma_ldvae:\n self.decoder = GaussianDecoder(\n n_latent,\n n_input,\n n_layers=n_layers,\n n_hidden=n_hidden,\n )\n else:\n self.decoder = GaussianLinearDecoder(\n n_input=n_latent,\n n_output=n_input,\n sigma=sigma_ldvae,\n use_batch_norm=False\n )\n\n\n def get_latents(self, x):\n \"\"\"Returns the result of ``sample_from_posterior_z`` inside a list\n\n Parameters\n ----------\n x\n tensor of values with shape ``(batch_size, n_input)``\n\n Returns\n -------\n type\n one element list of tensor\n\n \"\"\"\n return [self.sample_from_posterior_z(x)]\n\n def sample_from_posterior_z(self, x, give_mean=False, give_cov=False, n_samples=5000):\n \"\"\"Samples the tensor of latent values from the posterior\n\n Parameters\n ----------\n x\n tensor of values with shape ``(batch_size, n_input)``\n give_mean\n is True when we want the mean of the posterior distribution rather than sampling (Default value = False)\n n_samples\n how many MC samples to average over for transformed mean (Default value = 5000)\n\n Returns\n -------\n type\n tensor of shape ``(batch_size, n_latent)``\n\n \"\"\"\n qz_m, qz_v, z = self.z_encoder(x)\n if not give_mean:\n samples = Normal(qz_m, qz_v.sqrt()).sample([n_samples])\n z = torch.mean(samples, dim=0)\n else:\n z = qz_m\n if give_cov:\n return z, qz_v\n else:\n return z\n\n def inference(self, x, n_samples=1):\n \"\"\"Helper function used in forward pass\"\"\"\n x_ = x\n # Sampling\n qz_m, qz_v, z = self.z_encoder(x_)\n\n if n_samples > 1:\n qz_m = qz_m.unsqueeze(0).expand((n_samples, qz_m.size(0), qz_m.size(1)))\n qz_v = qz_v.unsqueeze(0).expand((n_samples, qz_v.size(0), qz_v.size(1)))\n z = Normal(qz_m, qz_v.sqrt()).sample()\n\n px_m, px_v = self.decoder(z)\n\n return dict(\n px_m=px_m,\n px_v=px_v,\n qz_m=qz_m,\n qz_v=qz_v,\n z=z\n )\n\n def forward(self, x):\n \"\"\" Returns the reconstruction loss\n\n :param x: tensor of values with shape (batch_size, n_input)\n\n :return: the reconstruction loss and the Kullback divergences\n :rtype: 2-tuple of :py:class:`torch.FloatTensor`\n \"\"\"\n # Parameters for z latent distribution\n outputs = self.inference(x)\n qz_m = outputs[\"qz_m\"]\n qz_v = outputs[\"qz_v\"]\n px_m = outputs[\"px_m\"]\n px_v = outputs[\"px_v\"]\n z = outputs[\"z\"]\n \n # KL divergence\n mean = torch.zeros_like(qz_m)\n scale = torch.ones_like(qz_v)\n\n kl = kl_divergence(Normal(qz_m, torch.sqrt(qz_v)), Normal(mean, scale)).sum(dim=1)\n\n # Reconstruction loss\n reconst_loss = -Normal(px_m, torch.sqrt(px_v)).log_prob(x).sum(dim=-1)\n\n return reconst_loss, kl, 0.0\n\n\n\n\n\n\n","sub_path":"scvi/external/models/gaussian_vae.py","file_name":"gaussian_vae.py","file_ext":"py","file_size_in_byte":4828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"35607858","text":"from django.core.urlresolvers import reverse\n\nfrom profile.eig_apps import views\n\n\nSIGN_OUT_LABEL = '>Sign out<'\n\n\ndef test_about_view_exposes_context_and_template(client):\n response = client.get(reverse('about'))\n\n assert response.context_data['about_tab_classes'] == 'active'\n assert response.template_name == [views.AboutView.template_name]\n\n\ndef test_not_signed_in_does_not_display_email(client):\n response = client.get(reverse('about'))\n\n assert 'You are signed in as' not in str(response.content)\n assert SIGN_OUT_LABEL not in str(response.content)\n","sub_path":"profile/eig_apps/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"288970505","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nVERSION = (0, 2, 0)\n\n__version__ = \".\".join(map(str, VERSION))\n__status__ = \"Alpha\"\n__description__ = \"Flexible & modular CMS powered by Flask and MongoDB\"\n__author__ = \"Bruno Rocha \"\n__email__ = \"quokka-developers@googlegroups.com\"\n__license__ = \"MIT License\"\n__copyright__ = \"Copyright 2013, Quokka Project / PythonHub.com\"\n\nimport os\ntry:\n from .core.admin import create_admin\n from .core.app import QuokkaApp\n # from .core.middleware import HTTPMethodOverrideMiddleware\n\n admin = create_admin() # 创建管理模块\nexcept:\n # Fix setup install:\n # If new environment not return error\n pass\n\n\ndef create_app(config=None, test=False, admin_instance=None, **settings):\n app = QuokkaApp('quokka') # 创建一个flask app\n app.config.from_object(config or 'quokka.settings')\n mode = os.environ.get('MODE', 'local')\n if test:\n mode = 'test'\n try:\n app.config.from_object('quokka.%s_settings' % mode)\n except ImportError:\n pass\n\n app.config.update(settings)\n\n if not test:\n app.config.from_envvar(\"QUOKKA_SETTINGS\", silent=True)\n else:\n app.config.from_envvar(\"QUOKKATEST_SETTINGS\", silent=True)\n\n # testing trick\n # with app.test_request_context():\n from .ext import configure_extensions\n configure_extensions(app, admin_instance or admin) # 配置拓展\n\n # app.wsgi_app = HTTPMethodOverrideMiddleware(app.wsgi_app)\n return app\n\n\ndef create_api(config=None, **settings):\n return None\n\n\ndef create_celery_app(app=None):\n from celery import Celery\n app = app or create_app()\n celery = Celery(__name__, broker=app.config['CELERY_BROKER_URL'])\n celery.conf.update(app.config)\n TaskBase = celery.Task\n\n class ContextTask(TaskBase):\n abstract = True\n\n def __call__(self, *args, **kwargs):\n with app.app_context():\n return TaskBase.__call__(self, *args, **kwargs)\n\n celery.Task = ContextTask\n return celery\n","sub_path":"quokka/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"393875541","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport glob\r\nimport logging\r\nfrom os import path\r\n\r\nimport numpy\r\n\r\nlog = logging.getLogger(__name__)\r\n\r\n\r\nclass Character(object):\r\n def __init__(self, char, data):\r\n self.char = char\r\n self.data = data\r\n\r\n def cal_similarity(self, data):\r\n x, y = data.shape\r\n similarity = 1 - numpy.sum((self.data - data) ** 2) * 0.1 / (x * y)\r\n return self.char, similarity\r\n\r\n\r\nclass CharacterIdentify(object):\r\n def __init__(self, train_dir):\r\n self.train_data = self._load_train_data(train_dir)\r\n\r\n def _load_train_data(self, train_dir):\r\n train_data = []\r\n for fp in glob.glob(path.join(train_dir, '*', '*.txt')):\r\n char = path.basename(path.dirname(fp))\r\n with open(fp) as f:\r\n data = []\r\n for line in f.readlines():\r\n line_array = line.strip().split(' ')\r\n data.append(map(int, line_array))\r\n train_data.append(Character(char, numpy.array(data)))\r\n return train_data\r\n\r\n def identify(self, data, knn=3):\r\n if not self.train_data:\r\n return '?', 0.0\r\n result = [c.cal_similarity(data) for c in self.train_data]\r\n valid_result = sorted(result, key=lambda x: x[1], reverse=True)[0:knn]\r\n valid_chars, accuracy = zip(*valid_result)\r\n char_count = [valid_chars.count(char) for char in valid_chars]\r\n index = char_count.index(max(char_count))\r\n # 精确度为 该字符单个最高精确度 * 字符出现个数 / 有效字符个数\r\n return valid_chars[index], accuracy[index] * char_count[index] / knn\r\n","sub_path":"imagecode/imageIdentify.py","file_name":"imageIdentify.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"625880234","text":"from Cimpl import *\r\nimport Cimpl\r\n\r\nfile = choose_file()\r\noriginal_image = load_image(file)\r\n\r\n\r\ndef blue_channel(image: Cimpl.Image) -> Cimpl.Image:\r\n \"\"\"Function takes an image and applies a blue channel filter over the image \r\n without affecting the original image and returns a filtered new image.\r\n -Function written by Nathan Gomes - 101143780\r\n \r\n >>> blue_channel(original_image)\r\n \r\n \"\"\"\r\n\r\n new_image = copy(image)\r\n for pixel in image:\r\n x, y, (r, g, b) = pixel\r\n blue_increased = create_color(r - r, g - g, b)\r\n set_color(new_image, x, y, blue_increased)\r\n\r\n return new_image\r\n\r\n\r\ndef blue_channel_test(image: Cimpl.Image) -> str:\r\n \"\"\"Function accepts an image and calls blue_channel function on the image\r\n and checks whether the filter has been properly applied for each pixel, \r\n printing the result and showing the picture.\r\n -Function written by Nathan Gomes - 101143780\r\n \r\n >>>blue_channel_test(original_image)\r\n Test passed. Channel filter works as expected.\r\n \"\"\"\r\n test = blue_channel(image)\r\n red_or_green = 0\r\n for pixel in test:\r\n x, y, (r, g, b) = pixel\r\n if r > 0 or g > 0:\r\n red_or_green += 1\r\n if red_or_green > 0:\r\n show(test)\r\n print(\"Test failed. Filter does not work properly.\")\r\n else:\r\n show(test)\r\n print(\"Test passed. Channel filter works as expected.\")\r\n\r\n\r\nblue_image = blue_channel(original_image)\r\n","sub_path":"Module 2/Part 1/L5_6_P2_blue.py","file_name":"L5_6_P2_blue.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"242709844","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse\nfrom .models import Auction\nfrom user_profile.models import Profile\nfrom .form import AuctionForm\nfrom item.models import Item\nimport json\n\ndef all_auction_json(request):\n # get the last 10 auction records\n # if the number of records are less than 10, get them all\n obj_dict = {'auctions' : []}\n objects = Auction.objects.all()\n if Auction.objects.count() >= 10:\n objects = objects.order_by('-id')[:10]\n \n for obj in objects:\n obj_dict['auctions'].append(obj.to_dict())\n \n obj_json = json.loads(\n json.dumps(obj_dict)\n )\n\n return JsonResponse(obj_json)\n\ndef home_view(request):\n obj = Auction.objects.filter(active = True)\n \n if obj.count() >= 10: \n obj = obj.order_by('-id')[:10]\n\n context = {\n 'obj' : obj\n }\n return render(request, 'auction/home_view.html', context)\n\n\ndef auction_json(request, id):\n obj = get_object_or_404(Auction, id=id)\n obj.update_active()\n\n obj_dict = obj.to_dict()\n obj_json = json.loads(\n json.dumps(obj_dict)\n )\n return JsonResponse(obj_json)\n\n\ndef auction_view(request, id):\n # The parameter of the URL is passed into html file via context.\n # ID will be used for dynamic url.\n object = get_object_or_404(Auction, id=id)\n object.update_active()\n context = {\n 'obj' : object,\n 'id' : id,\n\n }\n return render(request, 'auction/view_auction_listing.html', context)\n\ndef all_auctions(request):\n obj = Auction.objects.all()\n context = {\n 'obj' : obj\n }\n\n return render(request, 'auction/view_all_auctions.html', context)\n\n@login_required\ndef add_auction_view(request):\n current_profile = get_object_or_404(Profile, user=request.user)\n form = AuctionForm(user=current_profile)\n if request.method == 'POST':\n form = AuctionForm(request.POST, user=current_profile)\n if form.is_valid():\n form.cleaned_data['seller'] = current_profile\n obj = Auction.objects.create(**form.cleaned_data)\n return redirect('auction_view', id=obj.id)\n\n\n context = {\n 'form' : form\n }\n return render(request, 'auction/add_auction.html', context)\n\n@login_required\ndef set_bid(request, id):\n obj = get_object_or_404(Auction, id=id)\n obj.update_active()\n if request.method =='POST':\n user_bid = float(request.POST['current_bid'])\n user_profile = Profile.objects.filter(user__username=request.POST['username'])[0]\n \n if user_bid <= obj.item.starting_price or user_bid <= obj.current_bid:\n return JsonResponse({\n 'success' : False,\n 'message' : 'The bid is less than the minimum bid'\n })\n \n obj.current_bid = user_bid\n obj.highest_bidder = user_profile\n obj.save()\n\n return JsonResponse({\n 'success' : True,\n 'message' : 'Current bid price updated in the server'\n })\n\n\ndef search(request):\n print(\"query started\")\n if request.method=='POST':\n res_dict = {'results' : []}\n query = request.POST['query']\n results = Auction.objects.filter(item__title__contains=query)\n if not results:\n return JsonResponse({'results' : []})\n else:\n query = ''\n \n results = Auction.objects.filter(item__title__contains=query)\n\n context = {\n 'obj' : results\n }\n for result in results:\n result.update_active()\n res_dict['results'].append(result.to_dict())\n\n res_json = json.loads(\n json.dumps(res_dict)\n )\n \n return JsonResponse(res_json)\n\n\n\n\n","sub_path":"auction/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"174213198","text":"def changeChar(c, word, case):\n\tw = [i for i in word]\n\tvowel = ['a', 'e', 'i', 'o', 'u']\n\tif(case=='vowel'):\n\t\tfor i in range (len(w)):\n\t\t\tif w[i].lower() in vowel:\n\t\t\t\tw[i] = c\n\telse:\n\t\tfor i in range (len(w)):\n\t\t\tif not w[i].lower() in vowel:\n\t\t\t\tw[i] = c\n\treturn \"\".join(w)\ndef stringTransformation(array):\n\tarray[0] = changeChar('$', array[0], 'vowel')\n\tarray[1] = changeChar('#', array[1], 'cons')\n\tarray[2] = array[2].upper()\n\treturn \"\".join(array)\n","sub_path":"a1/StringTransformation.py","file_name":"StringTransformation.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"456674101","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport djangocms_text_ckeditor.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('domecek', '0015_contact_public'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='clubjournalentry',\n name='agenda',\n field=djangocms_text_ckeditor.fields.HTMLField(default='

pou\\u010den\\xed o BOZP

', verbose_name='agenda'),\n preserve_default=True,\n ),\n ]\n","sub_path":"domecek/migrations/0016_agenda_default.py","file_name":"0016_agenda_default.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"318903304","text":"def profile_assetrank(header_name, sheet, range_expr, cursor):\n\tcursor.execute(\"SELECT asr_asset_rank_description FROM asm_assetrank ORDER BY 1;\") \n\trow = cursor.fetchone()\n\tqdata = [] \n\twhile row: \n\t\tqdata.append(row[0]) \n\t\trow = cursor.fetchone()\n \n\trow_number = 1\n\tresult = []\n\tfor row in sheet.iter_rows(range_string=range_expr):\n\t\terror_msg = \"\"\n\t\trow_number += 1\n\t\tfor cell in row:\n\t\t\tif cell.value == None:\n\t\t\t\terror_msg += \"Asset rank required; \"\n\t\t\telse:\n\t\t\t\tif cell.value not in qdata:\n\t\t\t\t\terror_msg += \"Asset rank not found; \"\n\t\t\t\tif len(str(cell.value)) > 50:\n\t\t\t\t\terror_msg += \"Value is longer than 50 characters; \"\n \t\n\t\t\t#create row if errors found\n\t\t\tif error_msg != \"\":\n\t\t\t\tresult.append([cell.value, row_number, error_msg])\n\treturn result","sub_path":"profile_assetrank.py","file_name":"profile_assetrank.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"422219012","text":"from sklearn.base import ClassifierMixin, BaseEstimator\r\nfrom sklearn.exceptions import NotFittedError\r\nimport xgboost as xgb\r\nimport numpy as np\r\n\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n\r\nclass ClassifierMixinAuc(ClassifierMixin):\r\n def score(self, X, y, sample_weight=None):\r\n return roc_auc_score(y,self.predict(X))\r\n\r\nclass RandomForestClassifierAuc(RandomForestClassifier,ClassifierMixinAuc):\r\n pass\r\n\r\n\r\nclass XGBRegressor(BaseEstimator,ClassifierMixinAuc):\r\n def __init__(self,colsample_bytree=1,subsample=1,max_depth=3,min_child_weight=6,gamma=0,eta=0.3,num_round = 20): #Important to put the parameters of the base estimator here !\r\n self.colsample_bytree = colsample_bytree\r\n # Subsample ratio of columns when constructing each tree.\r\n self.subsample = subsample\r\n # Subsample ratio of the training instance.\r\n # Setting it to 0.5 means that XGBoost randomly collected half of the data instances\r\n # to grow trees and this will prevent overfitting.\r\n self.eta = eta\r\n # Step size shrinkage used in update to prevents overfitting.\r\n # After each boosting step, we can directly get the weights of new features.\r\n # And eta actually shrinks the feature weights to make the boosting process more conservative.\r\n self.gamma = gamma\r\n # Minimum loss reduction required to make a further partition on a leaf node of the tree.\r\n # The larger, the more conservative the algorithm will be.\r\n self.min_child_weight = min_child_weight\r\n # Minimum sum of instance weight (hessian) needed in a child.\r\n self.max_depth = max_depth\r\n # Maximum depth of a tree\r\n self.num_round = num_round\r\n\r\n def fit(self,X,y):\r\n params = {\"colsample_bytree\": self.colsample_bytree,\r\n \"subsample\":self.subsample,\r\n \"eta\": self.eta,\r\n \"gamma\" : self.gamma,\r\n \"min_child_weight\": self.min_child_weight,\r\n \"max_depth\":self.max_depth,\r\n \"silent\":1,\r\n 'objective': 'binary:logistic'}\r\n self.regressor = xgb.train(params,xgb.DMatrix(X,y),num_boost_round=self.num_round)\r\n self.length = X.shape[1]\r\n\r\n @property\r\n def feature_importances_(self):\r\n if self.regressor is None:\r\n raise NotFittedError(\"Estimator not fitted, call `fit` before\"\r\n \" `feature_importances_`.\")\r\n fscore = self.regressor.get_fscore()\r\n findexes = np.sort([int(s[1:]) for s in fscore.keys()])\r\n findexes_null = [i for i in range(0, self.length) if not any(findexes == i)]\r\n for findindex in findexes_null:\r\n fscore['f'+str(findindex)] = 0\r\n importance = np.array([fscore['f' + str(i)] for i in range(0, self.length)])\r\n importance = importance/importance.sum()\r\n return importance\r\n\r\n def predict(self, X):\r\n pred = self.regressor.predict(xgb.DMatrix(X))\r\n return (np.sign(pred - 0.5)) / 2 + 0.5\r\n\r\n def predict_proba(self, X):\r\n pred = self.regressor.predict(xgb.DMatrix(X))\r\n return np.array([1 - pred, pred]).T","sub_path":"regressor.py","file_name":"regressor.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"77033233","text":"import collections\n\nimport amo\nfrom applications.models import AppVersion\n\n\ndef es_dict(items):\n if not items:\n return {}\n if hasattr(items, 'items'):\n items = items.items()\n return [{'k': key, 'v': value} for key, value in items]\n\n# We index all the key/value pairs as lists of {'k': key, 'v': value} dicts\n# so that ES doesn't include every single key in the update_counts mapping.\n\"\"\"\n{'addon': addon id,\n 'date': date,\n 'count': total count,\n 'id': some unique id,\n 'versions': [{'k': addon version, 'v': count}]\n 'os': [{'k': amo.PLATFORM.name, 'v': count}]\n 'locales': [{'k': locale, 'v': count} # (all locales lower case)\n 'apps': {amo.APP.guid: [{'k': app version, 'v': count}}]\n 'status': [{'k': status, 'v': count}\n\"\"\"\ndef extract_update_count(update, all_apps=None):\n doc = {'addon': update.addon_id,\n 'date': update.date,\n 'count': update.count,\n 'id': update.id,\n 'versions': es_dict(update.versions),\n 'os': [],\n 'locales': [],\n 'apps': [],\n 'status': []}\n\n # Only count platforms we know about.\n if update.oses:\n os = collections.defaultdict(int)\n for key, count in update.oses.items():\n if key.lower() in amo.PLATFORM_DICT:\n os[amo.PLATFORM_DICT[key.lower()].name] += count\n doc['os'] = es_dict((unicode(k), v) for k, v in os.items())\n\n # Case-normalize locales.\n if update.locales:\n locales = collections.defaultdict(int)\n for locale, count in update.locales.items():\n try:\n locales[locale.lower()] += int(count)\n except ValueError:\n pass\n doc['locales'] = es_dict(locales)\n\n # Only count app/version combos we know about.\n if update.applications:\n if all_apps is None:\n all_apps = get_all_app_versions()\n apps = collections.defaultdict(dict)\n for guid, version_counts in update.applications.items():\n if guid not in amo.APP_GUIDS:\n continue\n app = amo.APP_GUIDS[guid]\n app_versions = all_apps[app.id]\n for version, count in version_counts.items():\n if version in app_versions:\n try:\n apps[app.guid][version] = int(count)\n except ValueError:\n pass\n doc['apps'] = dict((app, es_dict(vals)) for app, vals in apps.items())\n\n if update.statuses:\n doc['status'] = es_dict((k, v) for k, v in update.statuses.items()\n if k != 'null')\n return doc\n\n\ndef extract_download_count(dl):\n return {'addon': dl.addon_id,\n 'date': dl.date,\n 'count': dl.count,\n 'sources': es_dict(dl.sources) if dl.sources else {},\n 'id': dl.id}\n\n\ndef get_all_app_versions():\n vals = AppVersion.objects.values_list('application', 'version')\n rv = collections.defaultdict(list)\n for app, version in vals:\n rv[app].append(version)\n return dict(rv)\n","sub_path":"apps/stats/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":3088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"202273348","text":"\"\"\"This is a script to update a DNS record hosted on Dreamhost's servers.\n\nThis script requires a Dreamhost API key to work.\nSee README.md for an explanation of the required config file.\n\"\"\"\n\nimport json\nimport uuid\nimport requests\n\n\ndef load_config_file(filename):\n \"\"\"Return dictionary of parameters from config file.\"\"\"\n try:\n fp = open(filename)\n except IOError:\n print(\"Error opening file \" + filename)\n raise\n\n try:\n params = json.load(fp)\n return params\n except ValueError:\n print(\"Config file malformed.\")\n raise\n finally:\n fp.close()\n\n\ndef get_public_ip():\n \"\"\"Return the current public IP from https://ipify.org.\"\"\"\n r = requests.get('https://api.ipify.org?format=json')\n r.raise_for_status()\n ip_dict = r.json()\n if \"ip\" in ip_dict:\n return ip_dict[\"ip\"]\n else:\n raise ValueError(\"Error getting public IP.\")\n\n\ndef get_dns_records(params):\n \"\"\"Return dictionary of all DNS records in Dreamhost account.\"\"\"\n url = \"https://api.dreamhost.com/\" + \\\n \"?key=\" + str(params[\"api_key\"]) + \\\n \"&cmd=dns-list_records\" + \\\n \"&unique_id=\" + str(uuid.uuid4()) + \\\n \"&format=json\"\n r = requests.get(url)\n r.raise_for_status()\n records = r.json()\n if \"data\" in records:\n return records[\"data\"]\n else:\n raise ValueError(\"Error getting records from Dreamhost.\")\n\n\ndef get_record_if_exists(dh_records, params):\n \"\"\"Checks to see if record specified in config.json exists in current Dreamhost records.\"\"\"\n for record in dh_records:\n if record[\"record\"] == params[\"record\"] and record[\"type\"] == params[\"type\"]:\n # Return Dreamhost record if record does currently exist.\n return record\n # Return empty dictionary if record does not currently exist on Dreamhost.\n return {}\n\n\ndef remove_record(dh_record, params):\n \"\"\"Remove the given record from Dreamhost's servers.\"\"\"\n url = \"https://api.dreamhost.com/\" + \\\n \"?key=\" + str(params[\"api_key\"]) + \\\n \"&cmd=dns-remove_record\" + \\\n \"&record=\" + str(dh_record[\"record\"]) + \\\n \"&type=\" + str(dh_record[\"type\"]) + \\\n \"&value=\" + str(dh_record[\"value\"]) + \\\n \"&unique_id=\" + str(uuid.uuid4()) + \\\n \"&format=json\"\n r = requests.get(url)\n r.raise_for_status()\n status = r.json()\n if \"result\" in status:\n if status[\"result\"] != \"success\":\n raise ValueError(\"Unsuccessful in removing DNS record from Dreamhost.\")\n else:\n raise ValueError(\"Unknown response from Dreamhost API.\")\n\n\ndef add_record(params):\n \"\"\"Add the given record to Dreamhost's servers.\"\"\"\n value = get_public_ip() if params[\"value\"] == \"public ip\" else params[\"value\"]\n url = \"https://api.dreamhost.com/\" + \\\n \"?key=\" + str(params[\"api_key\"]) + \\\n \"&cmd=dns-add_record\" + \\\n \"&record=\" + str(params[\"record\"]) + \\\n \"&type=\" + str(params[\"type\"]) + \\\n \"&value=\" + str(value) + \\\n \"&unique_id=\" + str(uuid.uuid4()) + \\\n \"&format=json\"\n r = requests.get(url)\n r.raise_for_status()\n status = r.json()\n if \"result\" in status:\n if status[\"result\"] != \"success\":\n raise ValueError(\"Unsuccessful in adding new DNS record to Dreamhost.\")\n else:\n raise ValueError(\"Unknown response from Dreamhost API.\")\n\n\ndef main():\n \"\"\"Update the given DNS record on Dreamhost's servers with the new value.\n\n If the record already exists, delete the record, and then re-add the record with the new value.\n If the record does not already exist, add the record with the new value.\n \"\"\"\n params = load_config_file(\"config.json\")\n updated = False\n\n records = get_dns_records(params)\n record_exists = get_record_if_exists(records, params)\n if record_exists:\n if not (record_exists[\"value\"] == params[\"value\"] and record_exists[\"type\"] == params[\"type\"]):\n remove_record(record_exists, params)\n add_record(params)\n updated = True\n else:\n add_record(params)\n updated = True\n\n # If record was updated, check that update was successful.\n if updated:\n records = get_dns_records(params)\n success = get_record_if_exists(records, params)\n if success:\n print(\"Successfully updated DNS record.\")\n else:\n print(\"Failed to update DNS record.\")\n else:\n print(\"DNS record already exists on Dreamhost.\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"update_dns.py","file_name":"update_dns.py","file_ext":"py","file_size_in_byte":4586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"543842953","text":"# -*- coding: UTF-8 -*-\nimport os\nimport time\nimport pymysql\nimport scrapy\n\nfrom twisted_IO import mains\n\nsql_list = []\nstart_time = time.time()\nroot_dir = r'C:\\Users\\Administrator.DESKTOP-DV7S27B\\Desktop\\IO\\行业复测'\nlists = os.listdir(root_dir) # 列出文件夹下所有的目录与文件\nfor i in range(0, len(lists)):\n path = os.path.join(root_dir, lists[i])\n if os.path.isfile(path): # #判断路径是否为文件\n with open(path, 'r', encoding='utf-8') as f: # 要读取非UTF-8编码的文本文件,需要给open()函数传入encoding参数\n page_source = f.read()\n page_sel = scrapy.Selector(text=page_source)\n result = page_sel.xpath('//*[@class=\"jsx-1039848762 table-section\"]')\n # result = page_sel.xpath('//*[@class=\"jsx-732187119 table-body\"]')\n # result = page_sel.xpath('//*[@class=\"jsx-2833694364 table-section\"]')\n # print('打印第一次解析的结果', result)\n for i in result:\n # print(i.extract())\n page_sel = scrapy.Selector(text=i.extract())\n name = page_sel.xpath('/html/body/div/div/div[1]/div/div[2]/div[1]/a/text()').extract_first() # 公司名\n brief = page_sel.xpath('/html/body/div/div/div[1]/div/div[2]/div[2]/text()').extract_first() # 简介\n industry = page_sel.xpath('/html/body/div/div/div[2]//text()').extract_first() # 行业\n city = page_sel.xpath('/html/body/div/div/div[3]/text()').extract_first() # 城市\n times = page_sel.xpath('/html/body/div/div/div[4]/text()').extract_first() # 融资时间\n finance = page_sel.xpath('/html/body/div/div/div[5]/text()').extract_first() # 融资时间\n money = page_sel.xpath('/html/body/div/div/div[6]/text()').extract_first() # 融资金额\n agencys = page_sel.xpath('/html/body/div/div/div[7]//text()').extract() # 融资机构\n agency = ''.join([agency for agency in agencys])\n localtime = time.localtime(time.time())\n str_time = time.strftime(\"%Y-%m-%d\", localtime)\n db = pymysql.connect(\"192.168.103.31\", \"root\", \"adminadmin\", \"company\")\n # print(name, brief, industry, city, times, finance, money, agency)\n # 使用 cursor() 方法创建一个游标对象 cursor\n cursor = db.cursor()\n # SQL 插入语句\n sql = \"\"\"INSERT INTO XINIU1203(project_name,brief,industry,city,finance_time,finance,money,agency,legal_person,legal_name,registered_capital,competing_product,past_financing,teams,types,insert_times) VALUES (\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%d\",\"%s\")\"\"\" % (\n name, brief, industry, city, times, finance, money, agency, '', '', '', '', '', '', 1, str_time)\n try:\n # 执行sql语句\n cursor.execute(sql)\n # 提交到数据库执行\n db.commit()\n except Exception as e:\n print(e)\n # 如果发生错误则回滚\n db.rollback()\n print(name, brief, industry, city, times, finance, money, agency)\n db.close()\nend_time = time.time()\nprint('花费时间', end_time - start_time)\n","sub_path":"IDGdemo/IO/行业复测IO.py","file_name":"行业复测IO.py","file_ext":"py","file_size_in_byte":3414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"579582809","text":"import copy\nfrom typing import Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\nfrom torchvision.models import resnet50\nfrom torchvision.models.resnet import Bottleneck\n\nfrom models.adapter import Adapter\nfrom models.matching_module import MatchingModule\nfrom models.variational_autoencoders.conv_vae import ConvVAE\n\n\nclass GMNETCNet(nn.Module):\n \"\"\"\n The GMNETCNet is a variant of the Generic Matching Network (GMN) that combines adapter\n connections and Variational Autoencoders.\n \"\"\"\n\n def __init__(self, output_matching_size: Tuple[int, int] = None):\n super(GMNETCNet, self).__init__()\n\n self.vae = ConvVAE()\n for p in self.vae.parameters():\n p.requires_grad = False\n self.vae.train(False)\n\n resnet_model = resnet50(pretrained=True)\n self.cut_resnet = nn.Sequential(*(list(resnet_model.children())[:6]))\n for module in self.cut_resnet.modules():\n if isinstance(module, Bottleneck) and isinstance(module.conv2, nn.Conv2d):\n module.conv2 = Adapter(module.conv2)\n\n self.cut_resnet_template = copy.deepcopy(self.cut_resnet)\n\n self.adapt_pool = nn.AdaptiveAvgPool2d(1)\n self.batch_norm = nn.BatchNorm2d(512)\n\n self.matching_model = MatchingModule(1024, output_matching_size)\n self.output_conv = nn.Conv2d(256, 1, 3, stride=1, padding=1)\n\n def forward(self, x: Tensor, x_object: Tensor, template: Tensor) -> Tensor:\n\n mu, logvar = self.vae.encoder(template)\n template_weights = self.vae.reparametrize(mu, logvar)\n template_weights = template_weights[0].repeat(3, 1, 1, 1)\n template_weights = template_weights.permute(1, 0, 2, 3)\n for module in self.cut_resnet.modules():\n if isinstance(module, nn.Conv2d):\n module.weight = torch.nn.Parameter(template_weights, requires_grad=False)\n break\n\n for module in self.cut_resnet_template.modules():\n if isinstance(module, nn.Conv2d):\n module.weight = torch.nn.Parameter(template_weights, requires_grad=False)\n break\n\n x_object = self.cut_resnet_template(x_object)\n x_object = self.adapt_pool(x_object)\n x_object = F.normalize(x_object, p=2, dim=-1)\n\n x = self.batch_norm(self.cut_resnet(x))\n\n x_object = x_object.repeat(1, 1, x.shape[-2], x.shape[-1])\n outputs = torch.cat((x, x_object), 1)\n\n outputs = self.matching_model(outputs)\n outputs = self.output_conv(outputs)\n return outputs\n\n def load_vae(self, path: str):\n self.vae.load_state_dict(torch.load(path))\n","sub_path":"src/models/density_counting/gmn_etcnet.py","file_name":"gmn_etcnet.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"290740398","text":"'''\r\n617. Merge Two Binary Trees [Easy]\r\nGiven two binary trees and imagine that when you put one of them to cover the other,\r\nsome nodes of the two trees are overlapped while the others are not.\r\n\r\nYou need to merge them into a new binary tree.\r\nThe merge rule is that if two nodes overlap, then sum node values up as the new value of the merged node.\r\nOtherwise, the NOT null node will be used as the node of new tree.\r\n\r\n给定两个二叉树,想象当你将它们中的一个覆盖到另一个上时,两个二叉树的一些节点便会重叠。\r\n\r\n你需要将他们合并为一个新的二叉树。合并的规则是如果两个节点重叠,那么将他们的值相加作为节点合并后的新值,\r\n否则不为 NULL 的节点将直接作为新二叉树的节点。\r\n\r\n\r\n\r\nExample 1:\r\nInput:\r\n\tTree 1 Tree 2\r\n 1 2\r\n / \\ / \\\r\n 3 2 1 3\r\n / \\ \\\r\n 5 4 7\r\nOutput:\r\n\r\nMerged tree:\r\n\t 3\r\n\t / \\\r\n\t 4 5\r\n\t / \\ \\\r\n\t 5 4 7\r\n\r\n\r\nNote: The merging process must start from the root nodes of both trees.\r\n\r\n\r\n[Method 1]: Recursion\r\n[Time]: O(m), a total of m nodes need to be traversed.\r\nHere, m represents the minimum number of nodes from the two given trees.\r\n[Space]: O(m). The depth of the recursion tree can go upto m in the case of a skewed tree.\r\nIn average case, depth will be O(logm).\r\nRuntime: 88 ms, faster than 67.38% of Python3 online submissions for Merge Two Binary Trees.\r\nMemory Usage: 14.7 MB, less than 5.72% of Python3 online submissions for Merge Two Binary Trees.\r\n'''\r\n# Definition for a binary tree node.\r\n# class TreeNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\n\r\nclass Solution:\r\n def mergeTrees(self, t1: TreeNode, t2: TreeNode) -> TreeNode:\r\n if not t1 or not t2: return t2 or t1\r\n root = TreeNode(t1.val + t2.val)\r\n root.left = self.mergeTrees(t1.left, t2.left)\r\n root.right = self.mergeTrees(t1.right, t2.right)\r\n return root\r\n\r\n'''\r\n#Or:\r\n[Space]: O(m)\r\n'''\r\ndef mergeTrees2(self, t1: TreeNode, t2: TreeNode) -> TreeNode:\r\n if not t1:\r\n return t2\r\n if not t2:\r\n return t1\r\n t1.val += t2.val\r\n t1.left = self.mergeTrees2(t1.left, t2.left)\r\n t1.right = self.mergeTrees2(t1.right, t2.right)\r\n return t1\r\n\r\n'''\r\n[Method 2]: Iteration + Stack (dfs)\r\n如果两树相应位置都不为None, 则一起入栈。\r\n[Time]: O(m),worst case为当两树都平衡时,则要traverse一个树的所有节点m;\r\n[Space]: O(m/2),worst case为当两树都平衡时,stack最多储存left或者right上的节点总数。\r\nRuntime: 92 ms, faster than 39.02% of Python3 online submissions for Merge Two Binary Trees.\r\nMemory Usage: 14.2 MB, less than 20.00% of Python3 online submissions for Merge Two Binary Trees.\r\n'''\r\n# Definition for a binary tree node.\r\n# class TreeNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\n\r\nclass Solution:\r\n def mergeTrees(self, t1: TreeNode, t2: TreeNode) -> TreeNode:\r\n if not t1 or not t2: return t2 or t1\r\n stack1, stack2 = [t1], [t2]\r\n while stack1: #入栈是两个栈一起入\r\n node1, node2 = stack1.pop(), stack2.pop()\r\n node1.val += node2.val\r\n if node1.left and node2.left:\r\n stack1.append(node1.left)\r\n stack2.append(node2.left)\r\n elif node2.left: #若t1左树为空,则把t2左树全部移过来\r\n node1.left = node2.left\r\n if node1.right and node2.right:\r\n stack1.append(node1.right)\r\n stack2.append(node2.right)\r\n elif node2.right:\r\n node1.right = node2.right\r\n return t1\r\n","sub_path":"ByTags/Tree/BT/Merge_Two_Binary_Trees.py","file_name":"Merge_Two_Binary_Trees.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"340965958","text":"\n# https://matplotlib.org/mpl_toolkits/mplot3d/tutorial.html\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot\n\n\nfig = matplotlib.pyplot.figure()\nax = fig.add_subplot(111, projection='3d')\n\n\nxOrigo = 0\nyOrigo = 0\nzOrigo = 0\nax.scatter(xOrigo, yOrigo, zOrigo, c='black', marker='x')\n\nx = 10\ny = 20\nz = 30\nax.scatter(x, y, z, c='red', marker='o')\n\nx = 10\ny = 70\nz = 40\nax.scatter(x, y, z, c='red', marker='o')\n\n\n\nax.set_xlim(-100, 100)\nax.set_ylim(-100, 100)\nax.set_zlim(-100, 100)\n\nax.set_xlabel('X Label')\nax.set_ylabel('Y Label')\nax.set_zlabel('Z Label')\n\nmatplotlib.pyplot.show()","sub_path":"TestMatPlotLib3D.py","file_name":"TestMatPlotLib3D.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"212062350","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\nimport os\nimport platform\n\n\ndef delayFechar(tempo):\n for ind in range(tempo, 0, -1):\n print(\"Browser fechando em \" + str(ind) + \" segundos\")\n time.sleep(1)\n\n\ndef renomeador(links, palavra_chave):\n i = 1\n for link in links:\n nome_do_arquivo = link[38:]\n os.rename(\"PDFs/\" + nome_do_arquivo, \"PDFs/\" + palavra_chave + \" \" + str(i) + \".pdf\")\n i += 1\n\n\nprint(\"Entre com as palavras chave para a pesquisa:\")\npalavras_chave = str(input())\n\nprint(\"Quantas paginas de resultados deseja fazer o download?\")\npaginas = int(input())\n\ninicio_link = 'https://www.semanticscholar.org/search?q='\nfim_link = '&sort=relevance&pdf=true'\nfim_link_outras_comeco = '&sort=relevance&page='\nfim_link_outras_fim = '&pdf=true'\nmeio_link = ''\npalavras_separadas = palavras_chave.split()\n\nfor palavra in palavras_separadas:\n meio_link += palavra\n if palavra is not palavras_separadas[-1]:\n meio_link += '%20'\n\nurl1 = inicio_link + meio_link + fim_link\nlista_de_urls = [url1]\n\nfor i in range(2, paginas+1):\n temp = inicio_link + meio_link + fim_link_outras_comeco + str(i) + fim_link_outras_fim\n lista_de_urls.append(temp)\n\noptions = webdriver.ChromeOptions()\n\ndiretorio_atual = os.getcwd()\n\nplataforma = platform.system()\nif plataforma == 'Darwin':\n diretorio_chromedriver = diretorio_atual + '/ChromeDriver/ChromeDriverMac'\nelif plataforma == 'Windows':\n diretorio_chromedriver = diretorio_atual + '/ChromeDriver/ChromeDriverWin.exe'\nelse:\n diretorio_chromedriver = diretorio_atual + '/ChromeDriver/ChromeDriverLin'\n\ndiretorio_pdf = diretorio_atual + '/PDFs'\n\nif os.path.exists(diretorio_pdf):\n pass\nelse:\n os.mkdir('PDFs')\n\noptions.add_experimental_option(\"prefs\", {\n \"download.default_directory\": diretorio_pdf,\n \"download.prompt_for_download\": False,\n \"download.directory_upgrade\": True,\n \"plugins.always_open_pdf_externally\": True,\n \"safebrowsing.enabled\": True\n})\ndriver = webdriver.Chrome(diretorio_chromedriver, chrome_options=options)\n\ndownload_links = []\n\nfor url in lista_de_urls:\n driver.get(url)\n try:\n element = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.PARTIAL_LINK_TEXT, 'PDF')))\n finally:\n lista_de_pdfs = driver.find_elements_by_partial_link_text(\"PDF\")\n for pdf in lista_de_pdfs:\n download_links.append(pdf.get_attribute('href'))\n pdf.click()\n\ndelayFechar(3)\nrenomeador(download_links, palavras_chave)\ndriver.quit()\n","sub_path":"SemanticScholarDownloader.py","file_name":"SemanticScholarDownloader.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"643961776","text":"'''\nAuthor: Puffrora\nDate: 2021-10-31 21:00:09\nLastModifiedBy: Puffrora\nLastEditTime: 2021-10-31 21:06:04\n'''\n\n\n# count all the empty nodes, distribute each empty node a slot\n# in traverse, each empty node \"#\" will use up a slot, each non-empty node will use up a slot and adds two slots for its left and right child node\n# judge whether all slots are used up by \"#\"\n# TC: O(N)\n# ST: O(1)\nclass Solution:\n def isValidSerialization(self, preorder: str) -> bool:\n preorder = preorder.split(\",\")\n # give a slot for root ndoe\n slots = 1\n for node in preorder:\n # slots used up before distribution\n if slots == 0:\n return False\n if node == \"#\":\n slots -= 1\n else:\n slots = slots - 1 + 2\n return slots == 0","sub_path":"Leetcode/leetcode331 验证二叉树的前序序列化.py","file_name":"leetcode331 验证二叉树的前序序列化.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"10262295","text":"#QA 2nd Week Assessment\n#===========================================================================================\n\ndef section1_1():\n print(\"\")\n print(\"Task: Create a function which will list all the Records that are specified in the\\n\"\n \"Text file. You may want to reformat the data to be in a single print statement,\\n\"\n \"for better readability \")\n print(\"\")\n\n with open('users.txt', 'r') as u: #Opens user.txt file in read mode\n\n class Resource( object ): #new class is created\n for line in 'users.txt': #iterates through the lines present in user.txt file\n class_counter= 1\n def __init__(self, ID, First_Name, Second_Name, Address_1, Address_2, Post_Code,\n Telephone_Number):\n self.ID = ID #ID number\n #self.ID = Resource.class_counter\n self.First_Name = First_Name #first name\n self.Second_Name = Second_Name #second name\n self.Address_1 = Address_1 #first line of address\n self.Address_2 = Address_2 #second line of address\n self.Post_Code = Post_Code #post code\n self.Telephone_Number = Telephone_Number #Telephone number\n Resource.class_counter += 1\n\n\n user = Resource(u.readline(), u.readline(), u.readline(), u.readline(), u.readline(), u.readline(), u.readline())\n user2 = Resource(u.readline(), u.readline(), u.readline(), u.readline(), u.readline(), u.readline(), u.readline())\n user3 = Resource(u.readline(), u.readline(), u.readline(), u.readline(), u.readline(), u.readline(), u.readline())\n\n person1 = vars(user) #stores the data read from the file into user\n person2 = vars(user2)\n person3 = vars(user3) #iterates 3 times to ensure all user details have be obtained\n\n u.close()\n\n print('================================================')\n print (\"\\n\".join(\"%s: %s\" % item for item in person1.items())) #prints out the data\n print('================================================')\n print (\"\\n\".join(\"%s: %s\" % item for item in person2.items()))\n print('================================================')\n print (\"\\n\".join(\"%s: %s\" % item for item in person3.items()))\n print('================================================')\n\n#===========================================================================================\n\ndef section1_2():\n print(\"\")\n print(\"Task: Create a function that can add a person to this text file\\n\"\n \"following the existing format. The data for this person should be taken as user input.\\n\"\n \"Remember the ID should be unique, so the program should auto-generate the next ID, not\\n\"\n \"accepted as an input. \")\n print(\"\")\n\n with open('users.txt', 'a+') as u: #opens file in append mode\n user_fname = input(\"Please Enter Your First Name: \") #user input to input their credentials\n user_sname = input(\"Please Enter Your Second Name: \")\n user_add1 = input(\"Please Enter Your First Line Of Address: \")\n user_add2 = input(\"Please Enter Your City: \")\n user_postcode = input(\"Please Enter Your Post Code: \")\n user_number = input(\"Please Enter Your Telephone Number: \")\n\n # u.readline()\n ID_Counter = 4 #automatic id generator starting from 4 instead of 0\n u.write('\\n')\n u.write(str(ID_Counter))\n u.write('\\n')\n u.write(user_fname.capitalize())\n u.write('\\n')\n u.write(user_sname.capitalize())\n u.write('\\n')\n u.write(user_add1.capitalize())\n u.write('\\n')\n u.write(user_add2.capitalize())\n u.write('\\n')\n u.write(user_postcode.upper())\n u.write('\\n')\n u.write(user_number)\n ID_Counter = + 1 #adds an increment to ID number\n\n u.close()\n\n#===========================================================================================\n\ndef section1_3():\n print(\"\")\n print(\"Task: Create a function to remove a particular person from this text file,\\n\"\n \"the user should be able to pick which person they remove by specifying an ID. \")\n print(\"\")\n user_choice = input(\"Which ID Would You Like To Remove?\\n[1]\\n[2]\\n[3]\")\n\n i = open('users.txt', 'r') #opens users.txt file in read mode\n data = i.readlines() #stores all in the lines in variable called data\n i.close() # file is closed\n\n f = open('sol1.txt', 'w') #creats a new txt file in which data can be copied to\n f.write(str(data)) #writes the data into the new file\n f.close()\n\n if user_choice == \"1\":\n del data[0:6 + 1] #deletes the range of the data that the person has\n f = open(\"UserNew.txt\", \"w\") #opens up a new file in write mode\n f.writelines(data) #writes the new data\n elif user_choice == \"2\":\n del data[7:13 + 1]\n f = open(\"UserNew.txt\", \"w\")\n f.writelines(data)\n elif user_choice == \"3\":\n del data[14:20 + 1]\n f = open(\"UserNew.txt\", \"w\")\n f.writelines(data)\n\n#===========================================================================================\n\ndef section2_1():\n print(\"\")\n print(\"Task: Create a function that can list information about a person\\n\"\n \"that exists in thetext file. The user should be able to choose which\\n\"\n \"field to search by.\")\n print(\"\")\n import linecache #imports line cache to get any line from any file\n\n def user1():\n if user_input2 == \"ID\":\n ID = linecache.getline('Users.txt', 1) #grabs the corresponding line from txt file\n print(ID) #prints the ID\n elif user_input2 == \"FIRST NAME\":\n fname = linecache.getline('Users.txt', 2)\n print(fname)\n elif user_input2 == \"SECOND NAME\":\n sname = linecache.getline('Users.txt', 3)\n print(sname)\n elif user_input2 == \"ADDRESS 1\":\n add1 = linecache.getline('Users.txt', 4)\n print(add1)\n elif user_input2 == \"ADDRESS 2\":\n add2 = linecache.getline('Users.txt', 5)\n print(add2)\n elif user_input2 == \"POST CODE\":\n pcode = linecache.getline('Users.txt', 6)\n print(pcode)\n elif user_input2 == \"TEL NUMBER\":\n tel = linecache.getline('Users.txt', 7)\n print(tel)\n\n def user2():\n if user_input2 == \"ID\":\n ID = linecache.getline('Users.txt', 8)\n print(ID)\n elif user_input2 == \"FIRST NAME\":\n fname = linecache.getline('Users.txt', 9)\n print(fname)\n elif user_input2 == \"SECOND NAME\":\n sname = linecache.getline('Users.txt', 10)\n print(sname)\n elif user_input2 == \"ADDRESS 1\":\n add1 = linecache.getline('Users.txt', 11)\n print(add1)\n elif user_input2 == \"ADDRESS 2\":\n add2 = linecache.getline('Users.txt', 12)\n print(add2)\n elif user_input2 == \"POST CODE\":\n pcode = linecache.getline('Users.txt', 13)\n print(pcode)\n elif user_input2 == \"TEL NUMBER\":\n tel = linecache.getline('Users.txt', 14)\n print(tel)\n\n def user3():\n if user_input2 == \"ID\":\n ID = linecache.getline('Users.txt', 15)\n print(ID)\n elif user_input2 == \"FIRST NAME\":\n fname = linecache.getline('Users.txt', 16)\n print(fname)\n elif user_input2 == \"SECOND NAME\":\n sname = linecache.getline('Users.txt', 17)\n print(sname)\n elif user_input2 == \"ADDRESS 1\":\n add1 = linecache.getline('Users.txt', 18)\n print(add1)\n elif user_input2 == \"ADDRESS 2\":\n add2 = linecache.getline('Users.txt', 19)\n print(add2)\n elif user_input2 == \"POST CODE\":\n pcode = linecache.getline('Users.txt', 20)\n print(pcode)\n elif user_input2 == \"TEL NUMBER\":\n tel = linecache.getline('Users.txt', 21)\n print(tel)\n\n user_input = input(\"Which ID Would You Like To View?\\n[1] [2] [3]: \")\n user_input2 = input(\"Which Field Would You Like To View?\\n[ID] [FIRST NAME]\"\n \" [SECOND NAME] [ADDRESS 1] [ADDRESS 2] [POST CODE]\"\n \"[TEL NUMBER]: \")\n\n user_input = user_input.upper() #converts user input into capitals\n user_input2 = user_input2.upper()\n\n if user_input == \"1\":\n user1()\n elif user_input == \"2\":\n user2()\n elif user_input == \"3\":\n user3()\n\n#===========================================================================================\n\ndef section2_2():\n print(\"\")\n print(\"Task: Create a function that can make a copy of the text file to be used\\n\"\n \"as a back-up. (hint: research shutil.copy())\")\n print(\"\")\n import shutil\n\n shutil.copy('users.txt', 'UsersBackup.txt')\n\n#===========================================================================================\n\ndef section2_3():\n print(\"\")\n print(\"Task: Create a function to update a person that already exists in the text\\n\"\n \"file, theuser should be able to pick which record they update and which\\n\"\n \"field in the record they change.(hint: research .strip() to remove new line characters)\")\n print(\"\")\n\n\n with open('users.txt', 'r') as file:\n data = file.readlines()\n\n def user1():\n if user_input2 == \"FIRST NAME\":\n print(\"Current: \" + data[1])\n f_name_change = input(\"Please Enter New First Name: \")\n print(\"Updated: \" + f_name_change)\n data[1] = (f_name_change + \"\\n\")\n with open('users.txt', 'w') as file:\n file.writelines(data)\n elif user_input2 == \"SECOND NAME\":\n print(\"Current: \" + data[2])\n s_name_change = input(\"Please Enter New Second Name: \")\n print(\"Updated: \" + s_name_change)\n data[2] = (s_name_change + \"\\n\")\n with open('users.txt', 'w') as file:\n file.writelines(data)\n elif user_input2 == \"ADDRESS 1\":\n print(\"Current: \" + data[3])\n add_ = input(\"Please Enter New First Address: \")\n print(\"Updated: \" + add_)\n data[3] = (add_ + \"\\n\")\n with open('users.txt', 'w') as file:\n file.writelines(data)\n elif user_input2 == \"ADDRESS 2\":\n print(\"Current: \" + data[4])\n add2_ = input(\"Please Enter New Second Address: \")\n print(\"Updated: \" + add2_)\n data[4] = (add2_ + \"\\n\")\n with open('users.txt', 'w') as file:\n file.writelines(data)\n elif user_input2 == \"POST CODE\":\n print(\"Current: \" + data[5])\n pcode = input(\"Please Enter New Post Code: \")\n print(\"Updated: \" + pcode)\n data[5] = (pcode + \"\\n\")\n with open('users.txt', 'w') as file:\n file.writelines(data)\n elif user_input2 == \"TEL NUMBER\":\n print(\"Current: \" + data[6])\n tel = input(\"Please Enter New Tel Number: \")\n print(\"Updated: \" + tel)\n data[6] = (tel + \"\\n\")\n with open('users.txt', 'w') as file:\n file.writelines(data)\n\n\n def user2():\n if user_input2 == \"FIRST NAME\":\n print(\"Current: \" + data[8])\n f_name_change = input(\"Please Enter New First Name: \")\n print(\"Updated: \" + f_name_change)\n data[8] = (f_name_change + \"\\n\")\n with open('users.txt', 'w') as file:\n file.writelines(data)\n elif user_input2 == \"SECOND NAME\":\n print(\"Current: \" + data[9])\n s_name_change = input(\"Please Enter New Second Name: \")\n print(\"Updated: \" + s_name_change)\n data[9] = (s_name_change + \"\\n\")\n with open('users.txt', 'w') as file:\n file.writelines(data)\n elif user_input2 == \"ADDRESS 1\":\n print(\"Current: \" + data[10])\n add_ = input(\"Please Enter New First Address: \")\n print(\"Updated: \" + add_)\n data[10] = (add_ + \"\\n\")\n with open('users.txt', 'w') as file:\n file.writelines(data)\n elif user_input2 == \"ADDRESS 2\":\n print(\"Current: \" + data[11])\n add2_ = input(\"Please Enter New Second Address: \")\n print(\"Updated: \" + add2_)\n data[11] = (add2_ + \"\\n\")\n with open('users.txt', 'w') as file:\n file.writelines(data)\n elif user_input2 == \"POST CODE\":\n print(\"Current: \" + data[12])\n pcode = input(\"Please Enter New Post Code: \")\n print(\"Updated: \" + pcode)\n data[12] = (pcode + \"\\n\")\n with open('users.txt', 'w') as file:\n file.writelines(data)\n elif user_input2 == \"TEL NUMBER\":\n print(\"Current: \" + data[13])\n tel = input(\"Please Enter New Tel Number: \")\n print(\"Updated: \" + tel)\n data[13] = (tel + \"\\n\")\n with open('users.txt', 'w') as file:\n file.writelines(data)\n\n def user3():\n if user_input2 == \"FIRST NAME\":\n print(\"Current: \" + data[15])\n f_name_change = input(\"Please Enter New First Name: \")\n print(\"Updated: \" + f_name_change)\n data[15] = (f_name_change + \"\\n\")\n with open('users.txt', 'w') as file:\n file.writelines(data)\n elif user_input2 == \"SECOND NAME\":\n print(\"Current: \" + data[16])\n s_name_change = input(\"Please Enter New Second Name: \")\n print(\"Updated: \" + s_name_change)\n data[16] = (s_name_change + \"\\n\")\n with open('users.txt', 'w') as file:\n file.writelines(data)\n elif user_input2 == \"ADDRESS 1\":\n print(\"Current: \" + data[17])\n add_ = input(\"Please Enter New First Address: \")\n print(\"Updated: \" + add_)\n data[17] = (add_ + \"\\n\")\n with open('users.txt', 'w') as file:\n file.writelines(data)\n elif user_input2 == \"ADDRESS 2\":\n print(\"Current: \" + data[18])\n add2_ = input(\"Please Enter New Second Address: \")\n print(\"Updated: \" + add2_)\n data[18] = (add2_ + \"\\n\")\n with open('users.txt', 'w') as file:\n file.writelines(data)\n elif user_input2 == \"POST CODE\":\n print(\"Current: \" + data[19])\n pcode = input(\"Please Enter New Post Code: \")\n print(\"Updated: \" + pcode)\n data[19] = (pcode + \"\\n\")\n with open('users.txt', 'w') as file:\n file.writelines(data)\n elif user_input2 == \"TEL NUMBER\":\n print(\"Current: \" + data[20])\n tel = input(\"Please Enter New Tel Number: \")\n print(\"Updated: \" + tel)\n data[20] = (tel + \"\\n\")\n with open('users.txt', 'w') as file:\n file.writelines(data)\n\n user_input = input(\"Which ID Would You Like To Change?\\n[1] [2] [3]: \")\n user_input2 = input(\"Which Field Would You Like To Change?\\n[FIRST NAME]\"\n \" [SECOND NAME] [ADDRESS 1] [ADDRESS 2] [POST CODE]\"\n \" [TEL NUMBER]: \")\n\n user_input = user_input.upper()\n user_input2 = user_input2.upper()\n\n if user_input == \"1\":\n user1()\n elif user_input == \"2\":\n user2()\n elif user_input == \"3\":\n user3()\n\n#=======================================================================================================================\n\ndef section3():\n print(\"View Duplicate Fields?:\\n\"\n \"Yes = [Y]\\n\"\n \"No = [N]\")\n\n\n with open('users.txt', 'r') as file:\n\n data = file.readlines()\n\n user_input2 = input(\":\")\n user_input2 = user_input2.upper()\n def user1():\n if user_input2 == \"Y\":\n print(\"Male 1: \" + data[1])\n print(\"Male 2: \" + data[15])\n\n user1()\n\n\n\n#=======================================================================================================================\n\nmarker = input(\"Which Section Would You Like To View?\\nSection 1.1 = [1.1]\\n\"\n \"Section 1.2 = [1.2]\\nSection 1.3 = [1.3]\\nSection 2.1 = [2.1]\\n\"\n \"Section 1.1 = [2.2]\\nSection 2.3 = [2.3]\\nSection 3 = [3]\")\n\nmarker = marker.upper()\n\nif marker == \"1.1\":\n section1_1()\nelif marker == \"1.2\":\n section1_2()\nelif marker == \"1.3\":\n section1_3()\nelif marker == \"2.1\":\n section2_1()\nelif marker == \"2.2\":\n section2_2()\nelif marker == \"2.3\":\n section2_3()\nelif marker == \"3\":\n section3()\n\n#=======================================================================================================================\n\n","sub_path":"Solomon.py","file_name":"Solomon.py","file_ext":"py","file_size_in_byte":16930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"298615906","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nall_bonds = np.loadtxt('MD_Data/all_bonds.txt')\nall_angles = np.loadtxt('MD_Data/all_angles.txt')\nbond_angle_means = np.loadtxt('bond_angle_means.txt')\nbond_length_means = np.loadtxt('bond_length_means.txt')\nbond_dihedrals = np.loadtxt('MD_Data/all_dihedral.txt')\ndata = np.loadtxt('MD_Data/MD_energy_1.txt')\n\nind0 = bond_dihedrals[7, :] < 0\nbond_dihedrals[7, ind0] = bond_dihedrals[7, ind0] + 360\nind0 = bond_dihedrals[8, :] < 0\nbond_dihedrals[8, ind0] = bond_dihedrals[8, ind0] + 360\nplt.hist(bond_dihedrals[7, :])\nplt.show()\nplt.hist(bond_dihedrals[8, :])\nplt.show()\nomega1_mean = np.mean(bond_dihedrals[7, :])\nomega1_std = np.std(bond_dihedrals[7, :])\nomega2_mean = np.mean(bond_dihedrals[8, :])\nomega2_std = np.std(bond_dihedrals[8, :])\nprint(omega1_mean, omega1_std, omega2_mean, omega2_std)\n\nbond_min_use = bond_length_means[:, 0] - 3.0 * bond_length_means[:, 1]\nbond_max_use = bond_length_means[:, 0] + 3.0 * bond_length_means[:, 1]\n\nangle_min_use = bond_angle_means[:, 0] - 3.0 * bond_angle_means[:, 1]\nangle_max_use = bond_angle_means[:, 0] + 3.0 * bond_angle_means[:, 1]\n\nprint('number of samples', data.shape[0])\n\ncounter = 0\nto_use_array = np.zeros(data.shape[0])\nfor i in range(0, data.shape[0]):\n # ind0 = bond_dihedrals[7, i] < omega1_mean - omega1_std * 3.0\n # ind1 = bond_dihedrals[7, i] > omega1_mean + omega1_std * 3.0\n # ind2 = bond_dihedrals[8, i] < omega2_mean - omega2_std * 3.0\n # ind3 = bond_dihedrals[8, i] > omega2_mean + omega2_std * 3.0\n # if (ind0 or ind1 or ind2 or ind3):\n # to_use_array[i] = 0\n # counter = counter + 1\n # else:\n this_bonds = all_bonds[:, i]\n this_angles = all_angles[:, i]\n ind0 = this_bonds <= bond_min_use\n ind1 = this_bonds >= bond_max_use\n ind2 = this_angles <= angle_min_use\n ind3 = this_angles >= angle_max_use\n if (ind0.any() or ind1.any() or ind2.any() or ind3.any()):\n to_use_array[i] = 0\n counter = counter + 1\n else:\n to_use_array[i] = 1\n\nprint(counter)\n\nind0 = to_use_array == 1\nused_energy = data[ind0]\ndiscarded_energy = data[~ind0]\nprint(used_energy.shape)\nprint(discarded_energy.shape)\nfig, axs = plt.subplots(1, 2, tight_layout=True)\n# We can set the number of bins with the `bins` kwarg\naxs[0].hist(used_energy)\naxs[1].hist(discarded_energy)\nplt.show()\n\nfig, axs = plt.subplots(2, 1, tight_layout=True)\nind0 = data > 20\nsub_angles = all_angles[:, 0:data.shape[0]]\naxs[0].boxplot(sub_angles[:, ind0].T)\n\nind0 = data > 20\n\naxs[1].boxplot(sub_angles[:, ~ind0].T)\nplt.show()","sub_path":"Code/parse_MD_energy.py","file_name":"parse_MD_energy.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"109056175","text":"\n# damn we have a lot of modules\n\nimport discord\nimport discord.ext.commands\nimport traceback\nimport motor.motor_asyncio\n\n\n\nclient = motor.motor_asyncio.AsyncIOMotorClient()\ndb = client['Discord']\n\n\nclass InfoMessages:\n banhammer = \"<:banhammer:520808727989846035> \"\n caution = \"<:caution:520688590204960768> \"\n denied = \"<:Denied:520684512414531594> \"\n approved = \"<:Approved:520684511567151104> \"\n kicked = \"<:kick:521361686645702696> \"\n warned = \"<:warn:521370081951744044> \"\n\n\n upvote = \"<:staffupvote:521362102536110080> \"\n downvote = \"<:staffdownvote:521362102481321984> \"\n\n upvote_reaction = \":staffupvote:521362102536110080\"\n downvote_reaction = \":staffdownvote:521362102481321984\"\n\n\n\n\nclass ModeratorLogs:\n \n def __init__(self, bot):\n self.bot = bot\n\n\n\n @discord.ext.commands.command(name='case', aliases=['casenumber'])\n @discord.ext.commands.has_permissions(kick_members=True)\n async def case_lookup(self, ctx, case_number):\n query = await db.moderator_logs.find_one({\"ServerID\": str(ctx.guild.id), \"CaseNumber\": str(case_number)})\n if query:\n \n if query['Action'] == \"kick\":\n \n embed=discord.Embed(title=InfoMessages.kicked + 'User Kicked', colour=0x2CA298)\n embed.add_field(name=\"User\", value=query['Victim'], inline=False)\n embed.add_field(name=\"UserID\", value=query['VictimID'])\n embed.add_field(name=\"Reason\", value=query['Reason'])\n embed.add_field(name=\"Moderator\", value=query['Moderator'], inline=True)\n embed.set_footer(text=\"Case Number #{} | Requested by: {}\".format(query['CaseNumber'], ctx.author.name))\n await ctx.send(embed=embed)\n\n elif query['Action'] == \"ban\":\n\n embed=discord.Embed(title=InfoMessages.banhammer + 'User Banned', colour=0x2e88d1)\n embed.add_field(name=\"User\", value=query['Victim'], inline=False)\n embed.add_field(name=\"UserID\", value=query['VictimID'])\n embed.add_field(name=\"Duration\", value=\"Permanently\" , inline=True)\n embed.add_field(name=\"Reason\", value=query['Reason'])\n embed.add_field(name=\"Moderator\", value=query['Moderator'], inline=True)\n \n embed.set_footer(text=\"Case Number #{} | Requested by: {}\".format(query['CaseNumber'], ctx.author.name))\n await ctx.send(embed=embed)\n\n elif query['Action'] == \"warn\":\n\n embed=discord.Embed(title='User Warned', colour=0xFFA500)\n embed.add_field(name=\"User\", value=query['Victim'], inline=False)\n embed.add_field(name=\"UserID\", value=query['VictimID'])\n embed.add_field(name=\"Amount\", value=query['Amount'])\n embed.add_field(name=\"Reason\", value=query['Reason'])\n embed.add_field(name=\"Moderator\", value=query['Moderator'], inline=True)\n embed.set_footer(text=\"Case Number #{} | Requested by: {}\".format(query['CaseNumber'], ctx.author.name))\n await ctx.send(embed=embed)\n\n elif query['Action'] == \"liftwarn\":\n\n embed=discord.Embed(title='User De-warned', colour=0x945907)\n embed.add_field(name=\"User\", value=query['Victim'], inline=False)\n embed.add_field(name=\"UserID\", value=query['VictimID'])\n embed.add_field(name=\"Amount\", value=query['Amount'])\n embed.add_field(name=\"Reason\", value=query['Reason'])\n embed.add_field(name=\"Moderator\", value=query['Moderator'], inline=True) \n embed.set_footer(text=\"Case Number #{} | Requested by: {}\".format(query['CaseNumber'], ctx.author.name))\n await ctx.send(embed=embed)\n\n else:\n await ctx.send(\"? ? ? ?\")\n else:\n await ctx.send(\"Could not find case number.\")\n\n @discord.ext.commands.command(name=\"case_list\")\n @discord.ext.commands.is_owner()\n async def list(self, ctx):\n logs = []\n query = db.moderator_logs.find({\"ServerID\": str(ctx.guild.id)})\n if query:\n async for log in query:\n logs.append(log)\n await ctx.send(logs)\n else:\n await ctx.send(\"No Logs found!\")\n \n @discord.ext.commands.command(name=\"usercase\")\n @discord.ext.commands.has_permissions(kick_members=True)\n async def user_case(self, ctx, user):\n logs = []\n try:\n user_id = int(user)\n except Exception:\n await ctx.send(\"Please use a normal discord ID\")\n\n\n # TODO: Paginator this\n query = db.moderator_logs.find({\"ServerID\": str(ctx.guild.id), \"VictimID\": user_id})\n try:\n if query:\n async for case in query:\n logs.append(case['CaseNumber'])\n await ctx.send(\"```{}```\".format(\"\\n\".join(logs)))\n else:\n await ctx.send(\"no logs found.\")\n except Exception as e:\n print(e)\n await ctx.send(e)\n\ndef setup(bot):\n bot.add_cog(ModeratorLogs(bot))\n","sub_path":"cogs/mod_logs.py","file_name":"mod_logs.py","file_ext":"py","file_size_in_byte":5203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"402419239","text":"class Solution:\n \"\"\"\n @param nums: an integer array and all positive numbers, no duplicates\n @param target: An integer\n @return: An integer\n \"\"\"\n\n def backPackVI(self, nums, target):\n # write your code here\n nums.sort()\n # dp[i]: number of possibles when target is i\n dp = [1] + [0] * target\n\n for i in range(1, target + 1):\n for n in nums:\n if i < n: break\n dp[i] += dp[i - n]\n\n return dp[-1]\n","sub_path":"lintcode/564-combination-sum-iv.py","file_name":"564-combination-sum-iv.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"412209017","text":"import argparse\nimport os\nimport time\nfrom os.path import expanduser\n\nimport cv2\nimport torch\nfrom torch import optim\nfrom tqdm import tqdm\n\nfrom yolov3 import models, utils\nfrom yolov3.datasets import YoloDataset\nfrom yolov3.get_image_size import get_image_size\nfrom yolov3.losses import YoloLoss\n\n\ndef test(opts):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n dataset = YoloDataset(opts.src, opts.size)\n dataloader = torch.utils.data.DataLoader(dataset,\n batch_size=1,\n shuffle=True,\n num_workers=4)\n\n model = models.Darknet(opts.cfg, opts.weights,\n opts.nms, opts.obj,\n opts.size, device, False).to(device).eval()\n\n run_detect(model, dataloader, opts, device)\n\n\ndef run_detect(model, dataloader, opts, device):\n results = {}\n class_colors = utils.generate_class_colors(model.num_classes)\n class_to_names = utils.get_class_names(opts.names_path)\n with torch.no_grad():\n for i, (img, img_name) in enumerate(tqdm(dataloader)):\n start_time = time.time()\n img = img.to(device)\n detections = model(img)\n if detections.shape[0] > 0:\n width, height = get_image_size(img_name[0])\n detections = utils.transform_detections(\n detections, width, height, opts.size)\n detections = detections.view(-1, 7)\n confidences = detections[..., -2] * detections[..., -3]\n detections = torch.cat((detections[..., 6].unsqueeze(1),\n confidences.unsqueeze(1),\n detections[..., :4]), 1)\n utils.write_detections(\n detections,\n img_name[0],\n opts.size,\n opts.dst,\n class_colors,\n class_to_names)\n elapsed = round(time.time() - start_time, 2)\n info = \"Processed image {} in {} seconds\".format(i, elapsed)\n\n\ndef get_model(opts):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = models.Darknet(opts.cfg, opts.weights,\n opts.nms, opts.obj,\n opts.size, device,\n False).to(device)\n model.eval()\n return model\n\n\nclass Config:\n def __init__(self, cfg, weights, nms, obj, size):\n self.cfg = cfg\n self.weights = weights\n self.nms = nms\n self.obj = obj\n self.size = size\n\n\ndef get_args():\n home = expanduser(\"~\")\n weights_file = os.path.join(home, \".torch/yolov3/yolov3.weights\")\n opts = argparse.ArgumentParser(description='Yolov3 Detection')\n opts.add_argument('-c', '--cfg',\n help='Configuration file',\n default=\"../config/yolov3.cfg\")\n opts.add_argument('-w', '--weights',\n help='Weights file',\n default=weights_file)\n opts.add_argument('-o', '--obj',\n help='Objectness threshold',\n default=.5)\n opts.add_argument('-n', '--nms',\n help='Non-maximum Suppression threshold',\n default=.45)\n opts.add_argument('-s', '--size',\n help='Input size',\n default=416)\n opts.add_argument('-src', '--src',\n help='Source directory',\n default=\"../images\")\n opts.add_argument('-d', '--dst',\n help='Destination directory',\n default=\"../results\")\n opts.add_argument('-np', '--names_path',\n help='Path to names of classes',\n default=\"../data/coco.names\")\n opts.add_argument('-a', '--ann_path',\n help='Path to annotations of the images',\n default=\"../annotations/\")\n opts = opts.parse_args()\n return opts\n\n\ndef main():\n opts = get_args()\n test(opts)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"demos/detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":4165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"489875024","text":"from typing import List\n\nIntList = List[int]\n\n\ndef reverse_list(l: IntList) -> IntList:\n \"\"\"Reverse a list via simple list concatenation.\"\"\"\n accumulator: IntList = []\n\n i: int\n for i in l:\n accumulator = [i] + accumulator # the order matters\n\n return accumulator\n\n\ndef reverse_list_recursive(l: List[int]) -> List[int]:\n if l == []:\n return []\n\n rest_rev = reverse_list_recursive(l[1:])\n first = l[0:1]\n\n result = rest_rev + first\n\n return result\n\n\ndef reverse_string_recursive(s: str) -> str:\n if s == \"\":\n return \"\"\n\n rest_rev = reverse_string_recursive(s[1:])\n first = s[0:1]\n\n result = rest_rev + first\n\n return result\n\n\ndef main() -> None:\n assert reverse_list([9, 8, 7]) == [7, 8, 9]\n assert reverse_list_recursive([9, 8, 7]) == [7, 8, 9]\n\n assert reverse_string_recursive(\"Let it go\") == \"og ti teL\"\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"chap03_recursion/reverse_list_and_string.py","file_name":"reverse_list_and_string.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"257183261","text":"class fViewPendampingLKMS:\n\n def __init__(self,parentForm,FormObj):\n self.form = parentForm\n self.app = parentForm.ClientApplication\n self.FormView = None\n\n def Show(self,prod):\n params = self.app.CreateValues(['key',prod])\n self.FormObject.SetDataWithParameters(params)\n\n spl=prod.split('=')\n idcomp=spl[2]\n idc=spl[1]\n idprod=idc.split('#')[0]\n\n self.ShowHisTrans(idcomp)\n self.ShowLKMS(idcomp)\n self.ShowProgram(idcomp)\n self.FormContainer.Show()\n\n def ShowHisTrans(self,idcomp):\n qHisKon = self.qHisKon\n AddParam = \"[ CompanionId=%s ]\" % (idcomp)\n\n qHisKon.OQLText = \" Select from CompanionContract as qHisKon\\\n %s \\\n ( ContractNo, \\\n InitialContractPeriod, \\\n EndContractPeriod, \\\n CompanionExperience, \\\n LPropinsi.Propinsi, \\\n KabKota, \\\n LBranch.BranchName, \\\n InputDate, \\\n self \\\n ) then order by DESC InputDate;\" % (AddParam)\n qHisKon.DisplayData()\n\n def ShowLKMS(self,idcomp):\n qLKMS = self.qLKMS\n\n AddParam = \"[ CompanionId=%s ]\" % (idcomp)\n\n qLKMS.OQLText = \" Select from MustahiqProduct as qLKMS\\\n %s \\\n ( MustahiqExtNumber, \\\n LMustahiq.LCustomer.CustomerName, \\\n LProduct.ProductName, \\\n LMustahiq.LCustomer.AddressStreet, \\\n LMustahiq.LCustomer.PhoneNumber, \\\n self \\\n );\" % (AddParam)\n qLKMS.DisplayData()\n\n def ShowProgram(self,idcomp):\n qProgram = self.qProgram\n\n AddParam = \"[ CompanionId=%s]\" % (idcomp)\n\n qProgram.OQLText = \" Select from CompanionProduct as qProgram\\\n %s \\\n ( LProduct.ProductName, \\\n LCompanion.CompanionExtNumber, \\\n LCompanion.LLastContract.LBranch.BranchName, \\\n LCompanion.LLastContract.EndContractPeriod, \\\n self \\\n );\" % (AddParam)\n qProgram.DisplayData()\n\n\n","sub_path":"dialogs/LKMS/fViewPendampingLKMS_intr.py","file_name":"fViewPendampingLKMS_intr.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"79714628","text":"from pyrosetta import *\ninit('-in:auto_setup_metals -constraints:cst_fa_file b-roll.cst')\nfrom pyrosetta.rosetta.core.select import residue_selector as selection\nfrom pyrosetta.rosetta.core.pack.task import operation\nfrom pyrosetta.rosetta.protocols import minimization_packing as pack_min\nfrom pyrosetta.rosetta.core.select.movemap import *\nfrom pyrosetta.rosetta.protocols.relax import FastRelax\n\ndef des_relax_pose(pose):\n## set up score function and reverse fold tree\n scorefunction = create_score_function(\"ref2015_cst\")\n pyrosetta.rosetta.core.scoring.constraints.add_fa_constraints_from_cmdline_to_pose(pose)\n ft = pose.fold_tree() \n ft.reorder(pose.total_residue())\n pose.fold_tree(ft)\n scorefunction(pose)\n \n ## added is residues different from the wt b roll\n added = pyrosetta.rosetta.core.select.residue_selector.ResidueIndexSelector()\n added.set_index(\"10,11,21,22,32,33,43,44,54,65,66,77,78,79,80,81,83,84,85\")\n\n # select interaction partners within a 5A radius\n nbr_selector = selection.NeighborhoodResidueSelector()\n nbr_selector.set_focus_selector(added)\n nbr_selector.set_include_focus_in_subset(False)\n nbr_selector2 = selection.NeighborhoodResidueSelector()\n nbr_selector2.set_focus_selector(added)\n nbr_selector2.set_include_focus_in_subset(True)\n \n # tell Rosetta which residues to design and repack\n tf = pyrosetta.rosetta.core.pack.task.TaskFactory()\n tf.clear()\n tf.push_back(operation.InitializeFromCommandline())\n prevent_repacking_rlt = operation.PreventRepackingRLT()\n prevent_subset_repacking = operation.OperateOnResidueSubset(prevent_repacking_rlt, nbr_selector2, True )\n tf.push_back(prevent_subset_repacking)\n restrict_to_repack = operation.RestrictToRepackingRLT()\n prevent_nbr_design = operation.OperateOnResidueSubset(restrict_to_repack, nbr_selector, False )\n tf.push_back(prevent_nbr_design)\n aa_to_design = pyrosetta.rosetta.core.pack.task.operation.RestrictAbsentCanonicalAASRLT()\n aa_to_design.aas_to_keep(\"ACDEFGHIKLMNPQRSTVWY\")\n aa_design = operation.OperateOnResidueSubset(aa_to_design, added, True )\n \n packer_task = tf.create_task_and_apply_taskoperations(pose)\n print(packer_task)\n \n # Create relax move map and set up fast relax\n mmf = MoveMapFactory()\n mmf.add_bb_action(mm_enable, added)\n mmf.add_chi_action(mm_enable, nbr_selector2)\n\n fr = FastRelax()\n fr.set_scorefxn(scorefunction)\n fr.set_movemap_factory(mmf)\n fr.set_task_factory(tf)\n fr.constrain_relax_to_start_coords(True)\n \n rel_pose = fr.apply(pose)\n return rel_pose\n \ndef main(pdb_file):\n pose = pose_from_pdb(pdb_file)\n jd = PyJobDistributor(\"outputs/mut_des_beta_roll\", 1, scorefunction)\n jd.native_pose = pose\n\n rel_pose = Pose()\n while not jd.job_complete:\n rel_pose.assign(pose)\n if not os.getenv(\"DEBUG\"):\n des_relax_pose(rel_pose)\n jd.output_decoy(rel_pose)\n","sub_path":"des_relax.py","file_name":"des_relax.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"329893166","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/unbound_ec2/repeater.py\n# Compiled at: 2016-11-14 04:03:05\nimport threading\n\nclass RecursiveRepeater(threading.Thread):\n \"\"\"Periodically runs code in a thread.\n \"\"\"\n\n def __init__(self, delay, callme):\n \"\"\"Calls `callme` every `delay` seconds.\n \"\"\"\n threading.Thread.__init__(self)\n self.callme = callme\n self.delay = delay\n self.event = threading.Event()\n self.daemon = True\n\n def run(self):\n while not self.event.wait(1.0):\n self.callme()\n self.event.wait(self.delay)\n\n def stop(self):\n self.event.set()\n self.join()","sub_path":"pycfiles/unbound_ec2-1.3.0-py2.7/repeater.py","file_name":"repeater.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"417965362","text":"import requests\nfrom json import dump\nfrom json import loads\nfrom pathlib import Path\n\ndef run_query(json, headers):\n request = requests.post('https://api.github.com/graphql', json=json, headers=headers)\n if request.status_code == 200:\n return request.json()\n else:\n raise Exception(\"Query failed to run by returning code of {}. {}. {}\"\n .format(request.status_code, json['query'],\n json['variables']))\n\nquery = \"\"\"\nquery example{\n search(query:\"stars:100..{STARS}\", type:REPOSITORY, first:50{AFTER}){\n pageInfo{\n hasNextPage\n endCursor\n }\n nodes{\n ... on Repository{\n nameWithOwner\n url\n stargazers {\n totalCount\n }\n createdAt\n forks{\n totalCount\n }\n releases{\n totalCount\n }\n primaryLanguage{\n name\n }\n }\n }\n }\n rateLimit{\n remaining\n resetAt\n }\n}\n\"\"\"\n\nfirst_query = query.replace(\"{STARS}\", \"*\")\nfinal_query = first_query.replace(\"{AFTER}\", \"\")\n\njson = {\n \"query\":final_query, \"variables\":{}\n}\n\ntoken = 'eebd3fc5d3ddded0864ea8ce8a9313ced18de9fa' #insert your token\nheaders = {\"Authorization\": \"Bearer \" + token} \ntotal_pages = 1 #GiHutb restricts queries to 100 pages (1k repositories)\n\nprint(\"[REPORT]: STARTING QUERIES \")\nprint(\"[REPORT]: QUERYING PAGE:\" + str(total_pages))\n\nresult = run_query(json, headers)\nnodes = result['data']['search']['nodes']\nnext_page = result[\"data\"][\"search\"][\"pageInfo\"][\"hasNextPage\"]\n\nwhile (next_page and total_pages < 100):\n if(result['data']['rateLimit']['remaining'] == 0):\n print(\"[REPORT]: CHANGING TOKEN\")\n token = '95e3bd95f13ca07fe89bf4609794765c157adca5' #due to query limits\n \n total_pages += 1\n\n print(\"[REPORT]: QUERYING PAGE:\" + str(total_pages))\n \n cursor = result[\"data\"][\"search\"][\"pageInfo\"][\"endCursor\"]\n\n next_query = first_query.replace(\"{AFTER}\", \", after: \\\"%s\\\"\" % cursor)\n \n json[\"query\"] = next_query\n result = run_query(json, headers)\n nodes += result['data']['search']['nodes']\n next_page = result[\"data\"][\"search\"][\"pageInfo\"][\"hasNextPage\"]\n\n #for each block of 100 pages, qe have to make a new query (based on the number of stars)\n if(total_pages % 10 == 0): \n if(total_pages == 100): #some workaround\n continue\n\n total_pages += 1\n print(\"[REPORT]: QUERYING PAGE:\" + str(total_pages))\n\n first_query = query.replace(\"{STARS}\", str(nodes[-1]['stargazers']['totalCount']))\n final_query = first_query.replace(\"{AFTER}\", \"\")\n json[\"query\"] = final_query\n\n result = run_query(json, headers)\n nodes += result['data']['search']['nodes']\n next_page = result[\"data\"][\"search\"][\"pageInfo\"][\"hasNextPage\"]\n\nprint(\"[REPORT]: END OF REQUESTS\")\n\nfor node in nodes:\n output = Path(__file__).parent / \"./github_5k_repositories.csv\"\n with open(output, 'a') as the_file:\n the_file.write(node['nameWithOwner'] + \";\" + node['url'] + \";\" + str(node['stargazers']['totalCount']) + \";\" +\n node['createdAt'] + \";\" + str(node['forks']['totalCount']) + \";\" + str(node['releases']['totalCount']) + \"\\n\") \n","sub_path":"labs/replication/get_5k_repositories_data.py","file_name":"get_5k_repositories_data.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"387794987","text":"# File: chaos2.py\n# A simple program illustrating chaotic behavior.\n\ndef main():\n print(\"This program illustrates a chaotic function\")\n x = float(input(\"Enter a number between 0 and 1: \"))\n y = float(input(\"Enter another number between 0 and 1: \"))\n n = int(input(\"How many numbers should I print? \"))\n \n print(\"input\".center(8) + str(x).center(20) + str(y).center(20))\n print('-' * 70)\n\n result = {'x':[], 'y':[]}\n\n for i in range(n):\n x = 3.9 * x * (1 - x)\n result['x'].append(x)\n y = 3.9 * y * (1 - y)\n result['y'].append(y)\n\n for i in range(n):\n print(' ' * 8 + str(result['x'][i]).ljust(20) + str(result['y'][i]).ljust(20))\n\nmain()\n","sub_path":"PythonProgramming(ThirdEdition)/Ch01/chaos2.py","file_name":"chaos2.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"571779232","text":"import random\r\nfrom datetime import datetime, timedelta\r\nfrom django.core.management.base import BaseCommand\r\n\r\nfrom django.contrib.admin.utils import flatten # 2차원 배열 안의 값을 가져올 때 사용 가능\r\nfrom django_seed import Seed\r\n\r\nfrom reservations import models as reservation_models\r\nfrom rooms import models as room_models\r\nfrom users import models as user_models\r\n\r\nNAME = 'reservations'\r\n\r\nclass Command(BaseCommand):\r\n\r\n help = f'this command create {NAME}!'\r\n\r\n def add_arguments(self, parser):\r\n parser.add_argument(\"--number\", default=1, type=int, help=f\"How many {NAME} do you want to create?\")\r\n\r\n def handle(self, *args, **options):\r\n number = options.get('number')\r\n seeder = Seed.seeder()\r\n\r\n users = user_models.User.objects.all()\r\n rooms = room_models.Room.objects.all()\r\n seeder.add_entity(\r\n reservation_models.Reservation, \r\n number,\r\n {\r\n \"check_in\" : lambda x: datetime.now(),\r\n \"check_out\" : lambda x: datetime.now() + timedelta(days=random.randint(3, 25)),\r\n \"guest\" : lambda x: random.choice(users),\r\n \"room\" : lambda x: random.choice(rooms),\r\n\r\n })\r\n\r\n created_rooms = seeder.execute()\r\n self.stdout.write(self.style.SUCCESS(f\"성공적으로 {number}개의 {NAME}이 생성되었습니다.\"))","sub_path":"users/management/commands/seed_reservations.py","file_name":"seed_reservations.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"449782094","text":"#!/usr/bin/python3.4\n# -*- coding: utf-8 -*-\n\nimport sqlite3\nimport os\n#import json\nfrom datetime import datetime\nimport sys\nimport argparse\n\ndef executeQuery(cursor, query):\n ''' Takes the cursor object and the query, executes it '''\n try:\n cursor.execute(query)\n except:\n print(\"There is something wrong, probably with the query\\n\\n\"+query)\n\ndef history(cursor, today=False, pattern=None):\n ''' Function which extracts history from the sqlite file '''\n\n sql=\"\"\"select url, title, last_visit_date,rev_host from moz_historyvisits natural join moz_places where last_visit_date is not null and \"\"\"\n if pattern is not None:\n sql+= \"url like '%\"+pattern+\"%' and url not like '%google%.co%' and url not like '%duckduckgo.co%' and url not like '%live.com%'\\\n and url not like '%facebook%.com%' and url not like '%gmail.com%'\"\n else:\n sql+= \" url like 'http%'\"\n sql+=' order by last_visit_date desc;'\n\n executeQuery(cursor,sql)\n\n if today:\n for row in cursor:\n last_visit = datetime.fromtimestamp(row[2]/1000000).strftime('%Y-%m-%d %H:%M:%S')\n current_time = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n if current_time[:10]==last_visit[:10]:\n print(\"%s %s\"%(row[0],last_visit))\n else:\n for row in cursor:\n last_visit = datetime.fromtimestamp(row[2]/1000000).strftime('%Y-%m-%d %H:%M:%S')\n print(\"%s %s\"%(row[0],last_visit))\n\ndef bookmarks(cursor, json=False, pattern=None):\n ''' Function to extract bookmark related information '''\n\n theQuery = \"\"\"select url, moz_places.title, rev_host, frecency, last_visit_date from moz_places join moz_bookmarks on moz_bookmarks.fk=moz_places.id\nwhere visit_count>0 \"\"\"\n\n if pattern==None:\n theQuery+=\" and moz_places.url like 'http%'\"\n else:\n theQuery+=\"and moz_places.title like '%\"+pattern+\"%' and moz_places.url not like '%google.co%' and moz_places not like '%duckduckgo.co%'\"\n\n theQuery+=\" order by dateAdded desc;\"\n executeQuery(cursor,theQuery)\n\n string=\"\"\n title_bookmarks=['url', 'title', 'rev_host', 'frecency', 'last_visit_date']\n bookmarks_json=\"\"\n bookmarks=[]\n\n for row in cursor:\n #print(\"%s; %s\"%(row[0], datetime.fromtimestamp(row[4]/1000000).strftime('%Y-%m-%d %H:%M:%S')))\n print(\"%s\"%(row[0]))\n '''if json==True:\n title_bookmarks=['url', 'title', 'rev_host', 'frecency', 'last_visit_date']\n string=\"\"\n\n for row in cursor:\n blist = dict(zip(title_bookmarks,row))\n bookmarks.append(blist)\n\n for b in bookmarks:\n string+=str(b)+','\n\n bookmarks_json=string\n\n if bookmarks_json:\n file = open('bookmarks.json','w')\n file.write(bookmarks_json)\n file.close()'''\n\ndef getPath():\n '''Gets the path where the sqlite3 database file is present'''\n home_dir = os.environ['HOME']\n if sys.platform.startswith('win') == True:\n firefox_path = home_dir + '\\\\AppData\\\\Roaming\\\\Mozilla\\\\Firefox\\\\Profiles\\\\'\n elif sys.platform.startswith('linux')==True:\n firefox_path = home_dir + \"/.mozilla/firefox/\"\n elif sys.platform.startswith('darwin')==True:\n firefox_path = home_dir+'Library/Application Support/Firefox/Profiles/'\n\n return firefox_path\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser(description=\"Extract information from firefox's internal database\")\n parser.add_argument('--bm', '-b',default=\"\")\n parser.add_argument('--hist','-y', default=\"\")\n #parser.add_argument('--json',default=False) TODO: Later some time\n args = parser.parse_args()\n\n try:\n firefox_path = getPath()\n profiles = [i for i in os.listdir(firefox_path) if i.endswith('.default')]\n sqlite_path = firefox_path+ profiles[0]+'/places.sqlite'\n connection = sqlite3.connect(sqlite_path)\n except:\n print('Something went wrong with places.sqlite')\n exit(1)\n\n cursor = connection.cursor()\n if args.bm is not '':\n bookmarks(cursor,pattern=args.bm)\n if args.hist is not '' :\n history(cursor, pattern=args.hist)\n\n cursor.close()\n","sub_path":"ffs_main.py","file_name":"ffs_main.py","file_ext":"py","file_size_in_byte":4217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"407475188","text":"#!/usr/bin/python\n# Jake Kosberg\n# http://github.com/node-/tetra-master\n# Main Module\n\nimport os\nimport pygame\nfrom pygame.locals import *\n\n# Modules\nimport worldgen\nimport utils\nimport pieces\nimport worldgen\nimport events\n\n\n\ndef main():\n running = True\n pygame.init()\n screen = pygame.display.set_mode((640, 480))\n clock = pygame.time.Clock()\n pygame.display.set_caption('Tetra Master')\n board = pygame.image.load(utils.dirlock('../data/board.bmp'))\n world = worldgen.gen_world()\n cards = pieces.get_cards() \n selection = False\n\n while running:\n clock.tick(60)\n screen.blit(board,(0,0))\n myfont = pygame.font.Font(utils.dirlock(\"../data/Trajan-Bold.ttf\"), 30)\n mx,my = pygame.mouse.get_pos()\n mouse_pos = (mx, my)\n pygame.draw.rect(screen, (100,40,80), (150,40, 336, 408))\n for a in world:\n for b in a:\n pygame.draw.rect(screen, (0,0,0), (b.xpos, b.ypos, 84, 102))\n if b.card:\n b.draw(b.card.image)\n screen.blit(b.image, (b.xpos, b.ypos))\n for monster in cards:\n screen.blit(monster.image,(monster.xpos, monster.ypos))\n if monster.selected == True:\n current_selected = myfont.render(monster.name + \" is selected!\", 1, (255,255,255))\n screen.blit(current_selected, (100, 100))\n for event in pygame.event.get():\n if event.type == QUIT:\n running = False\n elif event.type == MOUSEBUTTONDOWN:\n cards, selection = events.watch(cards,selection,mouse_pos,world) \n\n pygame.display.update()\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"337969109","text":"######################################################################################################\n# Recursive Feature Selection #\n######################################################################################################\n\nrfe_running = 1\n\ntry:\n selector = RFECV(model_select_best, step = step, cv = cv, scoring = scoring_function, verbose = 1)\n \n try: \n selector.fit(x_param, y_param) \n except: \n selector.fit(x_train, y_train)\n \n best_features = list(selector.support_)\n rfe_scores = list(selector.grid_scores_)\n \n # Filter best x values and re-convert data -------------------------------------------------------\n best_x_values = list()\n \n for i in range(0, len(best_features)):\n if best_features[i] == True:\n best_x_values.append(x_values[i])\n \n x_values = best_x_values\n json.dump(x_values, open(\"feature/\" + model[\"algorithm\"] + \"_x_values.csv\",'w'))\n exec(open(\"workflow/data_converter.py\").read())\nexcept:\n print(\"Selected algorithm is not supported by sklearn.feature_selection RFECV\")\n \nrfe_running = 0\n\nprint(\"Recursive Feature Selection done\")","sub_path":"framework/workflow/model_rfecv.py","file_name":"model_rfecv.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"41701921","text":"import amici\nimport petab\nimport pypesto\nimport pypesto.visualize\nimport numpy as np\nimport time\n\n# import function to save/import your results\nfrom save_functions import save_optimization_to_file, save_guesses, save_modelSelection, save_startpoints\nfrom import_functions import import_guesses, import_startpoints\n\n# import regularization routines\nfrom l1_regularization_computations import compute_regularization_path, \\\n compute_converged_points_single, \\\n compute_converged_points, \\\n compute_model_selection_criteria\n\n\n# IMPORT MODEL__________________________________________________________________________________________________________\n\nfolder_base = \"models/\"\nmodel_name = \"Crauste_CellSystems2017_logE_eps\"\n\npetab_problem = petab.Problem.from_folder(folder_base + model_name)\n\npetab_problem.model_name = 'Crauste_CellSystems2017_logE_eps'\n\nimporter = pypesto.PetabImporter(petab_problem)\nimporter.compile_model()\n\nmodel = importer.create_model()\n\nprint(\"Model parameters:\", list(model.getParameterIds()), '\\n')\nprint('Optimization parameters:', petab_problem.get_optimization_to_simulation_parameter_mapping(), '\\n')\nprint(\"Model outputs: \", list(model.getObservableIds()), '\\n')\nprint(\"Model states: \", list(model.getStateIds()), '\\n')\n\nprint('sigmas', petab_problem.get_sigmas())\n\n# SOLVER OPTIONS _______________________________________________________________________________________________________\n\nsolver = importer.create_solver(model)\n\n# play with the tolerance\nsolver.setRelativeTolerance(rtol=1e-10)\nsolver.setAbsoluteTolerance(atol=1e-10)\n\n# enable sensitivities\nsolver.setSensitivityOrder(amici.SensitivityOrder_first) # First-order ...\nsolver.setSensitivityMethod(amici.SensitivityMethod_forward) # ... forward sensitivities\nmodel.requireSensitivitiesForAllParameters() # ... w.r.t. all parameters\n\n# play with FSA tolerances\nsolver.setRelativeToleranceFSA(rtol=1e-10)\nsolver.setAbsoluteToleranceFSA(atol=1e-10)\n\n\n# CREATE LOG(1+x) OBJECTIVE FUNCTION ___________________________________________________________________________________\n\nobj_lin = importer.create_objective(solver=solver)\n\n\n# define offset\neps = 1e-5\n\nf = lambda x: obj_lin.get_fval(10**np.array(x) - eps)\ng = lambda x: obj_lin.get_grad(10**np.array(x) - eps) * 10**np.array(x) * np.log(10)\n\nobj = pypesto.Objective(fun=f, grad=g)\n\nprint('optimal x = ', petab_problem.x_nominal)\nprint('optimal lh value', obj(petab_problem.x_nominal))\n\n\n# check gradient at optimum and at random point\ncheck_grad_1 = obj.check_grad(petab_problem.x_nominal)\nprint(check_grad_1[np.array(['grad', 'fd_c', 'abs_err', 'rel_err'])])\n\nx_random = np.random.normal(0.5, 0.005, 12)\ncheck_grad_2 = obj.check_grad(x_random)\nprint(check_grad_2[np.array(['grad', 'fd_c', 'abs_err', 'rel_err'])])\n\n\n\n# OPTIMIZATION WITHOUT PRIOR ___________________________________________________________________________________________\n\noptimizer = pypesto.ScipyOptimizer(method='L-BFGS-B')\n\n# play with optimization options\noptimizer.options = {'maxiter': 1e5, 'ftol': 1e-10, 'gtol': 1e-10, 'maxls': 80}\n# default:\n# optimizer.options = {'maxcor': 10, 'ftol': 1e-10, 'gtol': 1e-05, 'eps': 1e-08, 'maxfun': 1e5,\n# 'maxiter': 1e5, 'maxls': 20}\n\nproblem = importer.create_problem(obj)\nengine = pypesto.SingleCoreEngine()\nn_starts = 10\nstart = time.time()\nresult = pypesto.minimize(problem=problem,\n optimizer=optimizer,\n n_starts=n_starts,\n engine=engine)\nend = time.time()\n\nprint('\\nbest parameter: ', result.optimize_result.as_list('x')[0]['x'])\nprint('best likelihood value: ', obj(result.optimize_result.as_list('x')[0]['x']))\n\n# calculate computation time\ncomp_time = end - start\n\n# calculate converged points\nconv_points = compute_converged_points_single(result=result)\nprint('converted points: ', conv_points)\n\n\n# SAVE STARTPOINTS _____________________________________________________________________________________________________\n\n# calculate the startpoints\nstartpoints = result.optimize_result.get_for_key('x0')\n\n# save the startpoints\nsave_startpoints(result=result, path='startpoints/', file_name='logE_eps')\n\n\n# SAVE OPTIMIZATION RESULTS ____________________________________________________________________________________________\n\noptions = 'MODEL: Crauste model base, ' \\\n '\\nSCALE: log(x + ' + str(eps) + ')' + \\\n '\\nSTARTS: ' + str(n_starts) + \\\n '\\nCONV POINTS: ' + str(conv_points) + \\\n '\\nTIME: ' + str(comp_time)\n\n\n# specify path\npath = 'results_and_plots/optimization/logE_eps/'\n\n# file name is equal to starting points\nfile_name = str(n_starts)\n\n# save\nsave_optimization_to_file(result=result,\n n_start=n_starts,\n nominal_par=petab_problem.x_nominal,\n par_names=model.getParameterIds()[:12],\n opt_lh=obj(petab_problem.x_nominal),\n file_name=file_name,\n conv_points=conv_points,\n comp_time=comp_time,\n opt_interval=[problem.lb, problem.ub],\n startpoints=startpoints,\n options=options,\n path=path)\n","sub_path":"application_example/base_model/logE_eps_base.py","file_name":"logE_eps_base.py","file_ext":"py","file_size_in_byte":5424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"487213571","text":"\"\"\"\n# Copyright 2020 ABHINAV RAWAT\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\"\"\"\nThis module is used to get some useful data about raspberry pi\n\"\"\"\n\nimport os\nimport psutil\nimport re\nimport subprocess\n\n\nclass PI:\n\n def get_ram_info(self):\n \"\"\"\n :return: Return RAM information (unit=kb) in a list\n Index 0: total RAM\n Index 1: used RAM\n Index 2: free RAM\n \"\"\"\n p = os.popen('free')\n i = 0\n while True:\n i = i + 1\n line = p.readline()\n if i == 2:\n return line.split()[1:4]\n\n def get_disk_space(self):\n \"\"\"\n :return: # Return information about disk space as a list (unit included)\n # Index 0: total disk space\n # Index 1: used disk space\n # Index 2: remaining disk space\n # Index 3: percentage of disk used\n \"\"\"\n p = os.popen(\"df -h /\")\n i = 0\n while 1:\n i = i + 1\n line = p.readline()\n if i == 2:\n return line.split()[1:5]\n\n def get_cpu_usage(self):\n \"\"\"\n :return: Return % of CPU used by user as a character string\n \"\"\"\n return str(psutil.cpu_percent())\n\n def get_connected_ip_addr(self, network):\n \"\"\"\n :param network: which network interface i.e. 'wlan0', 'eth0'\n :return: string of ip\n \"\"\"\n cmd = \"/sbin/ifconfig \" + str(network) + \" | grep 'inet '\"\n resp = (subprocess.check_output(cmd, shell=True)).decode(\"utf-8\")\n ip = re.search('inet (.+) netmask', resp).group(1)\n return ip\n\n def get_cpu_temp(self):\n \"\"\"\n :return: float of cpu temp\n \"\"\"\n tFile = open('/sys/class/thermal/thermal_zone0/temp')\n temp = float(tFile.read())\n cpu_temp = temp / 1000\n return cpu_temp\n\n def get_wifi_status(self):\n \"\"\"\n :return: return list of [ssid, signal quality, signal level, signal percentage]\n \"\"\"\n ssid = os.popen(\"iwgetid -r\").read()\n ssid = ssid.rstrip(\"\\n\")\n\n cmd1 = \"iwconfig wlan0 | grep -i quality\"\n res1 = (subprocess.check_output(cmd1, shell=True))\n resp = res1.decode(\"utf-8\")\n start = resp.index(\"k\") + len(\"k\")\n end = resp.index('S', start)\n strength = resp[start:end]\n signal_level = strength.replace(\"Quality=\", \"\")\n\n dat = signal_level\n actual = int(dat[:3])\n maxx = int(dat[4:6])\n wifi_percentage = int((actual / maxx) * 100)\n\n start1 = resp.index(\"S\") + len(\"S\")\n end1 = resp.index('m', start1)\n temp = resp[start1:end1]\n level = 'S' + temp\n signal_quality = level.replace(\"Signal level=\", \"\")\n return [ssid, signal_quality.strip(), signal_level.strip(), wifi_percentage]\n\n\nif __name__ == '__main__':\n PI()\n","sub_path":"pyembedded/raspberry_pi_tools/raspberrypi.py","file_name":"raspberrypi.py","file_ext":"py","file_size_in_byte":3910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"154199068","text":"# url = 'https://www.swexpertacademy.com/main/code/problem/problemDetail.do?contestProbId=AWMedCxalW8DFAXd&categoryId=AWMedCxalW8DFAXd&categoryType=CODE'\n\nfor tc in range(int(input())):\n N = int(input())\n lst = list(int(input()) for _ in range(N))\n for i in range(1, N):\n lst[i] = lst[i] - 1\n\n cnt = 0\n diff = lst[1:]\n while diff:\n cnt += 1\n min_ = diff.pop(diff.index(min(diff)))\n diff = [i for i in diff if i % min_]\n print(f'#{tc+1} {cnt}')","sub_path":"SW Expert Academy/Difficulty_3/4371. 항구에 들어오는 배.py","file_name":"4371. 항구에 들어오는 배.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"374732064","text":"import json\n\nfrom django.db.models import Q\nfrom django.db.models.functions import Lower, Concat\nfrom django.forms import model_to_dict, modelformset_factory\nfrom django.http import JsonResponse\nfrom django.views import View\nfrom django.views.generic import CreateView, TemplateView, DeleteView, UpdateView\n\nfrom system.forms import MenuUpdateForm, LoginLogUpdateForm\nfrom system.models import LoginLog\nfrom utils.Paginator import paginate\n\n\nclass LoginLogMainView(TemplateView):\n template_name = 'system/log/login.html'\n\n\nclass LoginLogListView(View):\n def get(self, request):\n code = 1\n msg = '获取失败'\n total = 0\n result = []\n try:\n code = 0\n msg = '获取成功'\n fields = ['id', 'user', 'user__username', 'type', 'ip', 'status', 'login_time']\n data = LoginLog.objects.values(*fields)\n total = len(data)\n result = paginate(request, data)\n except Exception as e:\n msg = str(e)\n ret = dict(code=code, msg=msg, count=total, data=result)\n return JsonResponse(ret)\n\n\nclass LoginLogSearchView(View):\n def post(self, request):\n code = 1\n msg = '搜索失败'\n total = 0\n result = []\n try:\n code = 0\n msg = '获取成功'\n data = request.POST.get('data', None)\n fields = ['id', 'user', 'user__username', 'type', 'ip', 'status', 'login_time']\n data = LoginLog.objects.filter(Q(user__username__contains=data) | Q(ip__contains=data)).values(*fields)\n total = len(data)\n result = paginate(request, data)\n except Exception as e:\n msg = str(e)\n ret = dict(code=code, msg=msg, count=total, data=result)\n return JsonResponse(ret)","sub_path":"apps/system/views_log.py","file_name":"views_log.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"204725790","text":"from flask import Flask,jsonify\nfrom flask_cors import CORS\nfrom yeelight import Bulb\nylight = Bulb('192.168.8.23')\napp = Flask(__name__)\nCORS(app)\n\n@app.route('/')\ndef get_json_data():\n data = Bulb.get_properties(ylight)\n power = data['power']\n return jsonify({\"power\":power,\"color\":data['rgb']}) \n\nif __name__ == '__main__':\n app.run\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"143778838","text":"\n\n\ndef count(N):\n if N == 0:\n return \"INSOMNIA\"\n s = set()\n i = 0\n while len(s) < 10:\n i += 1\n [s.add(c) for c in str(N*i)]\n return N*i\n\n\nn = int(input())\nfor i in range(n):\n N = int(input())\n print(\"Case #{}: {}\".format(i+1, count(N)))\n\n","sub_path":"codes/CodeJamCrawler/16_0_1/erb/counting_sheep.py","file_name":"counting_sheep.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"537239126","text":"s = str(input())\nor_len = len(s)\ns2 = s + s\n\nd = dict()\n\nfor i in range(or_len, len(s2)+1):\n if s2[i-or_len:i] in d.keys():\n continue\n else:\n d[s2[i-or_len:i]] = True\n\nprint(len(d.keys()))\n \n","sub_path":"CodeForce/190223/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"212603712","text":"'''\ncount = 0\nfor c in s:\n\tif c is 'a' or c is 'e' or c is 'i' or c is 'o' or c is 'u':\n\t\tcount += 1\nprint(\"Number of vowels: \" + str(count))\n'''\n'''\nmask = 'bob'\ncount = 0\nfor i in range(0, len(s)):\n\tif mask == s[i:i+3]:\n\t\tcount += 1\nprint(\"Number of times 'bob' occurs is: \" + str(count))\n'''\n\ns = 'abcdefghijklmnopqrstuvwxyz'\n# a, b are 2 pointers for s\nstart, end, Max, a, b= 0, 0, 0, 0, 0\n\nfor i in range(0, len(s) - 1):\n\t# increment b if ascending\n\tif s[i] <= s[i + 1]:\n\t\tb += 1\n\telse:\n\t\t# when cut, update Max, start, end\n\t\tif b - a + 1 > Max:\n\t\t\tMax = b - a + 1\n\t\t\tstart = a\n\t\t\tend = b\n\t\t# move a, b to next position\n\t\ta = b + 1\n\t\tb = a\n\n# longest one may include the tail\nif b - a + 1 > Max:\n\tstart = a\n\tend = b\nprint(\"Longest substring in alphabetical order is: \" + s[start:end + 1])\n\n'''\nr, c = '', ''\nfor char in s:\n if (c == ''):\n c = char\n elif (c[-1] <= char):\n c += char\n elif (c[-1] > char):\n if (len(r) < len(c)):\n r = c\n c = char\n else:\n c = char\nif len(c) > len(r):\n r = c\nprint(r)\n'''","sub_path":"Other/MIT_ICSP/week1.py","file_name":"week1.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"154409371","text":"#!/usr/bin/env python3\n#################################################################################################\n## Create a plot on a map with irregularly spaced data stored in netcdf files, examples:\n## 1. python plot_snapshot_nc.py -i file_name.nc -v vname \n## 1D variable is assumed to be stationary vector data. Also assume lon/lat and the variable\n## can be found in the same file. For 2D(vector,time) and 3D(lon,lat,time) variable, the \n## code creates a geographical plot with data averaged over the time domain.\n## 2. python plot_snapshot_nc.py -i data_name.nc -v vname1-vname2 -t 2,-3 -factor 86400\n## Make a graphical plot of difference, multiplied by 86400, between vname1 and vname2 and\n## averaged over the time segment from the second record to the third last record.\n## 3. python plot_snapshot_nc.py -i data.nc -g geo.nc -v vname -lon lon1,lon2 -lat lat1,lat2\n## The code will make a geographical plot over a specific spatial domain. The variable vname\n## is assumed to be stored in data.nc while the geographic information is stored in geo.nc.\n## If the files are located in the same directory, the directory path just needs to be\n## specified in one file.\n## 4. python plot_snapshot_nc.py -i data.nc -g geo.nc -v vname -s no -t 128\n## The code makes a geographical plot for the time step 128 and output it to a png file.\n## Author: Zhichang Guo, email: Zhichang.Guo@noaa.gov\n###############################################################################################\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nimport netCDF4 as nc\nimport numpy as np\nimport argparse\nimport glob\nimport os\nimport sys\nimport ntpath\n\ndef lonF2S(flon):\n flon = int(flon*100)/100.\n if flon > 180:\n flon = 360. - flon\n flon = int(flon*100)/100.\n strLon = str(flon) + 'W'\n elif flon < 0.0:\n strLon = str(-flon) + 'W'\n else:\n strLon = str(flon) + 'E'\n return strLon\n\ndef lonS2F(strLon):\n strLon = strLon.upper()\n strLon.replace('E','')\n if 'W' in strLon:\n flon = -1.0*float(strLon.replace('W',''))\n else:\n flon = float(strLon)\n return flon\n\ndef latF2S(flat):\n flat = int(flat*100)/100.\n if flat < 0.0:\n strLat = str(-flat) + 'S'\n else:\n strLat = str(flat) + 'N'\n return strLat\n\ndef latS2F(strLat):\n strLat = strLat.upper()\n strLat.replace('N','')\n if 'S' in strLat:\n flat = -1.0*float(strLat.replace('S',''))\n else:\n flat = float(strLat)\n return flat\n\ndef tstepS2I(strTStep, tds):\n if ',' in strTStep:\n tsteps = strTStep.split(',')\n tstepBeg = int(tsteps[0])\n tstepEnd = int(tsteps[1])\n if tstepBeg < 0:\n tstepBeg += tds + 1\n if tstepEnd < 0:\n tstepEnd += tds + 1\n tstepBeg = min(tstepBeg,tds)\n tstepEnd = min(tstepEnd,tds)\n else:\n tstepBeg = 0\n tstepEnd = tds\n if tstepBeg > tstepEnd:\n return tstepEnd, tstepBeg\n else:\n return tstepBeg, tstepEnd\n\ndef plot_world_map(lons, lats, data, metadata, plotpath, screen, lonr, latr, comment):\n # plot generic world map\n if screen.upper() == \"NO\":\n matplotlib.use('agg')\n fig = plt.figure(figsize=(12,8))\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree(central_longitude=0))\n ax.add_feature(cfeature.GSHHSFeature(scale='auto'))\n listWE = lonr.split(',')\n listSN = latr.split(',')\n lonBeg = lonS2F(listWE[0])\n lonEnd = lonS2F(listWE[1])\n latBeg = latS2F(listSN[0])\n latEnd = latS2F(listSN[1])\n scaleWE = 360/(lonEnd-lonBeg)*10\n scaleSN = 180/(latEnd-latBeg)*10\n scaleMax = max(scaleWE,scaleSN)\n ax.set_extent([lonBeg, lonEnd, latBeg, latEnd])\n vmax = np.nanmean(data)+np.nanstd(data)*2\n vmin = np.nanmean(data)-np.nanstd(data)*2\n cmap = 'viridis'\n cbarlabel = '%s' % (metadata['var'])\n if comment == '':\n plttitle = 'Variable: %s' % (metadata['var'])\n else:\n plttitle = 'Variable: %s; %s' % (metadata['var'], comment)\n cs = plt.scatter(lons, lats, c=data, s=scaleMax,\n cmap=cmap, transform=ccrs.PlateCarree(),vmin=vmin,vmax=vmax)\n cb = plt.colorbar(cs, orientation='horizontal', shrink=0.5, pad=.04)\n cb.set_label(cbarlabel, fontsize=12)\n plt.title(plttitle)\n if screen.upper() == \"NO\":\n plt.savefig(plotpath)\n plt.close('all')\n else:\n plt.show()\n\ndef read_var(datapath, geopath, varname, tstep, fov, fact):\n obsfiles = glob.glob(datapath)\n geofile = glob.glob(geopath)\n opath, obsfname = ntpath.split(datapath)\n gpath, geofname = ntpath.split(geopath)\n geofile_new = geofile\n obsfiles_new = obsfiles\n if opath == '' and gpath == '':\n cpath = os.getcwd()\n obsfiles_new = glob.glob(os.path.join(cpath,datapath))\n geofile_new = glob.glob(os.path.join(cpath,geofname))\n elif opath != '' and gpath == '':\n geofile_new = np.append(geofile_new, [os.path.join(opath,geofname)])\n elif opath == '' and gpath != '':\n obsfiles_new = glob.glob(os.path.join(gpath,datapath))\n lats = np.array([])\n lons = np.array([])\n data = np.array([])\n if not geopath == \"\":\n for g in geofile_new:\n geonc = nc.Dataset(g)\n lattmp = geonc.variables['latitude'][:]\n lontmp = geonc.variables['longitude'][:]\n lats = np.concatenate((lats,lattmp))\n lons = np.concatenate((lons,lontmp))\n geonc.close()\n for f in obsfiles_new:\n datanc = nc.Dataset(f)\n if geopath == \"\":\n lattmp = datanc.variables['latitude'][:]\n lontmp = datanc.variables['longitude'][:]\n lats = np.concatenate((lats,lattmp))\n lons = np.concatenate((lons,lontmp))\n if '-' in varname:\n varnames = varname.split('-')\n datatmp = datanc.variables[varnames[0]][:]\n datatmp2 = datanc.variables[varnames[1]][:]\n if len(datatmp.shape) == 1:\n for lid in range(len(datatmp)):\n datatmp[lid] -= datatmp2[lid]\n elif len(datatmp.shape) == 2:\n for tid in range(len(datatmp)):\n for lid in range(len(datatmp[0])):\n datatmp[tid][lid] -= datatmp2[tid][lid]\n elif len(datatmp.shape) == 3:\n for tid in range(len(datatmp)):\n for yid in range(len(datatmp[0])):\n for xid in range(len(datatmp[0][0])):\n datatmp[tid][yid][xid] -= datatmp2[tid][yid][xid]\n else:\n sys.exit(\"cannot handle variables with dimensions more than 3 (lon,lat,time or vector,time or time)\")\n else:\n datatmp = datanc.variables[varname][:]\n datanc.close()\n dims = len(datatmp.shape)\n if dims == 1:\n data = np.concatenate((data,datatmp))\n comment = ''\n elif dims == 2:\n if 'VECTOR' in fov.upper():\n tds = len(datatmp) - 1\n lds = len(datatmp[0])\n if ',' in tstep:\n tstepBeg, tstepEnd = tstepS2I(tstep, tds)\n tsteps = tstepEnd - tstepBeg + 1\n data = np.zeros(lds)\n for lid in range(lds):\n cnt = 0.0\n for i in range(tsteps):\n tid = i + tstepBeg\n data[lid] += datatmp[tid][lid]\n cnt += 1.0\n if cnt > 0.5:\n data[lid] /= cnt\n else:\n data[lid] = -999999.99\n comment = 'Time average: %s - %s' % (str(tstepBeg), str(tstepEnd))\n else:\n timestep = int(tstep)\n if timestep < 0:\n timestep += len(datatmp)\n timestep = max(timestep,0)\n timestep = min(timestep,tds)\n data = np.concatenate((data,datatmp[timestep]))\n comment = 'Time step: %s of 0 - %s' % (str(timestep), tds)\n elif 'FIELD' in fov.upper():\n yds = len(datatmp)\n xds = len(datatmp[0])\n lons_new = np.zeros((yds*xds))\n lats_new = np.zeros((yds*xds))\n data_new = np.zeros((yds*xds))\n for yid in range(yds):\n for xid in range(xds):\n lid = yid*xds + xid\n lons_new[lid] = lons[xid]\n lats_new[lid] = lats[yid]\n data_new[lid] = datatmp[yid][xid]\n lons = lons_new\n lats = lats_new\n data = data_new\n comment = 'Stationary Field'\n else:\n sys.exit(\"Error: invalid fov option\")\n elif dims == 3:\n tds = len(datatmp) - 1\n yds = len(datatmp[0])\n xds = len(datatmp[0][0])\n if ',' in tstep:\n tstepBeg, tstepEnd = tstepS2I(tstep, tds)\n tsteps = tstepEnd - tstepBeg + 1\n data = np.zeros((yds,xds))\n for yid in range(yds):\n for xid in range(xds):\n cnt = 0.0\n for i in range(tsteps):\n tid = i + tstepBeg\n data[yid][xid] += datatmp[tid][yid][xid]\n cnt += 1.0\n if cnt > 0.5:\n data[yid][xid] /= cnt\n else:\n data[yid][xid] = -999999.99\n comment = 'Time average: %s - %s' % (str(tstepBeg), str(tstepEnd))\n else:\n timestep = int(tstep)\n if timestep < 0:\n timestep += len(datatmp)\n timestep = max(timestep,0)\n timestep = min(timestep,tds)\n data = np.concatenate((data,datatmp[timestep]))\n comment = 'Time step: %s of 0 - %s' % (str(timestep), tds)\n lons_new = np.zeros((yds*xds))\n lats_new = np.zeros((yds*xds))\n data_new = np.zeros((yds*xds))\n for yid in range(yds):\n for xid in range(xds):\n lid = yid*xds + xid\n lons_new[lid] = lons[xid]\n lats_new[lid] = lats[yid]\n data_new[lid] = data[yid][xid]\n lons = lons_new\n lats = lats_new\n data = data_new\n else:\n sys.exit(\"cannot handle variables with dimensions more than 2 (vector, time)\")\n if not fact == '1':\n factor = float(fact)\n for lid in range(len(data)):\n data[lid] *= factor\n comment += '; Factor: %s'%(fact)\n return data, lons, lats, comment\n\ndef gen_figure(inpath, geopath, outpath, varname, screen, tstep, fov, lonr, latr, fact):\n # read the files to get the 2D array to plot\n data, lons, lats, comment = read_var(inpath, geopath, varname, tstep, fov, fact)\n plotpath = outpath+'/%s.png' % (varname)\n metadata = {\n 'var': varname\n }\n plot_world_map(lons, lats, data, metadata, plotpath, screen, lonr, latr, comment)\n\nif __name__ == \"__main__\":\n ap = argparse.ArgumentParser()\n ap.add_argument('-o', '--output', help=\"path to output directory\", default=\"./\")\n ap.add_argument('-i', '--input', help=\"path to the input file\", required=True)\n ap.add_argument('-g', '--geo', help=\"path to the geographic info file\", default=\"\")\n ap.add_argument('-v', '--variable', help=\"variable name to plot\", required=True)\n ap.add_argument('-s', '--screen', help=\"no if plot to file\", default=\"yes\")\n ap.add_argument('-t', '--tstep', help=\"time step for plotting\", default=\"0,-1\")\n ap.add_argument('-f', '--fov', help=\"field or vector data\", default=\"vector\")\n ap.add_argument('-lon', '--longitude', help=\"longitude range for plotting\", default=\"-180,180\")\n ap.add_argument('-lat', '--latitude', help=\"latitude range for plotting\", default=\"-90,90\")\n ap.add_argument('-factor', '--fact', help=\"factor for units conversion\", default=\"1\")\n MyArgs = ap.parse_args()\n gen_figure(MyArgs.input, MyArgs.geo, MyArgs.output, MyArgs.variable, MyArgs.screen, MyArgs.tstep, MyArgs.fov, MyArgs.longitude, MyArgs.latitude, MyArgs.fact)\n","sub_path":"plot_snapshot_nc.py","file_name":"plot_snapshot_nc.py","file_ext":"py","file_size_in_byte":12665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"26648417","text":"import importlib\nfrom collections import namedtuple\nfrom math import pi, sqrt\nfrom typing import Tuple, Optional, Sequence, Dict, Any\n\nimport torch\n\nfrom torch import Tensor\n\nimport numpy as np\n\nstd_normal = torch.distributions.Normal(0, 1)\n\n_base = namedtuple('Cens', field_names=['obs', 'lower', 'upper'])\n_base.__new__.__defaults__ = (None, None)\n\n\nclass Cens(_base):\n\n def _for_fill(self, x):\n return x is None or isinstance(x, (int, float))\n\n def to_array(self) -> np.ndarray:\n obs = self._standardize_array(self.obs, self.lower, self.upper)\n\n stack = getattr(importlib.import_module(type(obs).__module__), 'stack')\n full_like = getattr(importlib.import_module(type(obs).__module__), 'full_like')\n\n if len(obs.shape) != 1:\n raise RuntimeError(\"Cannot convert to array unless len(self.obs.shape) is 1.\")\n\n if self._for_fill(self.lower):\n lower = full_like(obs, -float('inf') if self.lower is None else self.lower)\n else:\n lower = self._standardize_array(self.lower)\n if obs.shape != lower.shape:\n raise RuntimeError(\"obs.shape != lower.shape\")\n\n if self._for_fill(self.upper):\n upper = full_like(obs, float('inf') if self.upper is None else self.upper)\n else:\n upper = self._standardize_array(self.upper)\n if obs.shape != upper.shape:\n raise RuntimeError(\"obs.shape != upper.shape\")\n\n if (lower == upper).any():\n raise RuntimeError(\"lower cannot == upper\")\n\n arr = stack([obs, lower, upper], 1)\n return arr\n\n def _standardize_array(self, x: Any, lower: Any = None, upper: Any = None):\n if isinstance(x, (float, int)):\n if isinstance(lower, (torch.Tensor, np.ndarray)) or hasattr(lower, 'values'):\n template = self._standardize_array(lower)\n elif isinstance(upper, (torch.Tensor, np.ndarray)) or hasattr(upper, 'values'):\n template = self._standardize_array(upper)\n else:\n raise ValueError(f\"Could not interpret as array:{x}\")\n full_like = getattr(importlib.import_module(type(template).__module__), 'full_like')\n return full_like(template, x)\n\n if not isinstance(x, (torch.Tensor, np.ndarray)) and isinstance(getattr(x, 'values', None), np.ndarray):\n return x.values\n return x\n\n\ndef tobit_adjustment(mean: Tensor,\n cov: Tensor,\n lower: Optional[Tensor] = None,\n upper: Optional[Tensor] = None,\n probs: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]:\n assert cov.shape[-1] == cov.shape[-2] # symmetrical\n\n if upper is None:\n upper = torch.full_like(mean, float('inf'))\n if lower is None:\n lower = torch.full_like(mean, -float('inf'))\n\n assert lower.shape == upper.shape == mean.shape\n\n is_cens_up = torch.isfinite(upper)\n is_cens_lo = torch.isfinite(lower)\n\n if not is_cens_up.any() and not is_cens_lo.any():\n return mean, cov\n\n F1, F2 = _F1F2(mean, cov, lower, upper)\n\n std = torch.diagonal(cov, dim1=-2, dim2=-1).sqrt()\n sqrt_pi = pi ** .5\n\n # prob censoring:\n if probs is None:\n prob_lo, prob_up = tobit_probs(mean=mean,\n cov=cov,\n lower=lower,\n upper=upper)\n else:\n prob_lo, prob_up = probs\n\n # adjust mean:\n lower_adj = torch.zeros_like(mean)\n lower_adj[is_cens_lo] = prob_lo[is_cens_lo] * lower[is_cens_lo]\n upper_adj = torch.zeros_like(mean)\n upper_adj[is_cens_up] = prob_up[is_cens_up] * upper[is_cens_up]\n mean_if_uncens = mean + (sqrt(2. / pi) * F1) * std\n mean_uncens_adj = (1. - prob_up - prob_lo) * mean_if_uncens\n mean_adj = mean_uncens_adj + upper_adj + lower_adj\n\n # adjust cov:\n diag_adj = torch.zeros_like(mean)\n for m in range(mean.shape[-1]):\n diag_adj[..., m] = (1. + 2. / sqrt_pi * F2[..., m] - 2. / pi * (F1[..., m] ** 2)) * cov[..., m, m]\n\n cov_adj = torch.diag_embed(diag_adj)\n\n return mean_adj, cov_adj\n\n\ndef tobit_probs(mean: Tensor,\n cov: Tensor,\n lower: Optional[Tensor] = None,\n upper: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]:\n # CDF not well behaved at tails, truncate\n clamp = lambda z: torch.clamp(z, -5., 5.)\n\n if upper is None:\n upper = torch.empty_like(mean)\n upper[:] = float('inf')\n if lower is None:\n lower = torch.empty_like(mean)\n lower[:] = float('-inf')\n\n std = torch.diagonal(cov, dim1=-2, dim2=-1)\n probs_up = torch.zeros_like(mean)\n is_cens_up = torch.isfinite(upper)\n upper_z = (upper[is_cens_up] - mean[is_cens_up]) / std[is_cens_up]\n probs_up[is_cens_up] = 1. - std_normal.cdf(clamp(upper_z))\n\n probs_lo = torch.zeros_like(mean)\n is_cens_lo = torch.isfinite(lower)\n lower_z = (lower[is_cens_lo] - mean[is_cens_lo]) / std[is_cens_lo]\n probs_lo[is_cens_lo] = std_normal.cdf(clamp(lower_z))\n\n return probs_lo, probs_up\n\n\ndef erfcx(x: Tensor) -> Tensor:\n \"\"\"M. M. Shepherd and J. G. Laframboise,\n MATHEMATICS OF COMPUTATION 36, 249 (1981)\n \"\"\"\n\n K = 3.75\n y = (torch.abs(x) - K) / (torch.abs(x) + K)\n y2 = 2.0 * y\n (d, dd) = (-0.4e-20, 0.0)\n (d, dd) = (y2 * d - dd + 0.3e-20, d)\n (d, dd) = (y2 * d - dd + 0.97e-19, d)\n (d, dd) = (y2 * d - dd + 0.27e-19, d)\n (d, dd) = (y2 * d - dd + -0.2187e-17, d)\n (d, dd) = (y2 * d - dd + -0.2237e-17, d)\n (d, dd) = (y2 * d - dd + 0.50681e-16, d)\n (d, dd) = (y2 * d - dd + 0.74182e-16, d)\n (d, dd) = (y2 * d - dd + -0.1250795e-14, d)\n (d, dd) = (y2 * d - dd + -0.1864563e-14, d)\n (d, dd) = (y2 * d - dd + 0.33478119e-13, d)\n (d, dd) = (y2 * d - dd + 0.32525481e-13, d)\n (d, dd) = (y2 * d - dd + -0.965469675e-12, d)\n (d, dd) = (y2 * d - dd + 0.194558685e-12, d)\n (d, dd) = (y2 * d - dd + 0.28687950109e-10, d)\n (d, dd) = (y2 * d - dd + -0.63180883409e-10, d)\n (d, dd) = (y2 * d - dd + -0.775440020883e-09, d)\n (d, dd) = (y2 * d - dd + 0.4521959811218e-08, d)\n (d, dd) = (y2 * d - dd + 0.10764999465671e-07, d)\n (d, dd) = (y2 * d - dd + -0.218864010492344e-06, d)\n (d, dd) = (y2 * d - dd + 0.774038306619849e-06, d)\n (d, dd) = (y2 * d - dd + 0.4139027986073010e-05, d)\n (d, dd) = (y2 * d - dd + -0.69169733025012064e-04, d)\n (d, dd) = (y2 * d - dd + 0.490775836525808632e-03, d)\n (d, dd) = (y2 * d - dd + -0.2413163540417608191e-02, d)\n (d, dd) = (y2 * d - dd + 0.9074997670705265094e-02, d)\n (d, dd) = (y2 * d - dd + -0.26658668435305752277e-01, d)\n (d, dd) = (y2 * d - dd + 0.59209939998191890498e-01, d)\n (d, dd) = (y2 * d - dd + -0.84249133366517915584e-01, d)\n (d, dd) = (y2 * d - dd + -0.4590054580646477331e-02, d)\n d = y * d - dd + 0.1177578934567401754080e+01\n\n result = d / (1.0 + 2.0 * torch.abs(x))\n x_neg = (x < 0)\n result[x_neg] = 2.0 * torch.exp(x[x_neg] ** 2) - result[x_neg]\n\n return result\n\n\ndef _F1F2_no_inf(x: Tensor, y: Tensor) -> Tuple[Tensor, Tensor]:\n if (x.abs() > 3).any() or (y.abs() > 3).any():\n raise RuntimeError(\"_F1F2_no_inf not stable for inputs with abs(value) > 3\")\n\n numer_1 = torch.exp(-x ** 2) - torch.exp(-y ** 2)\n numer_2 = x * torch.exp(-x ** 2) - y * torch.exp(-y ** 2)\n denom = torch.erf(y) - torch.erf(x)\n\n F1 = numer_1 / denom\n F2 = numer_2 / denom\n return F1, F2\n\n\ndef _F1F2(mean: Tensor,\n cov: Tensor,\n lower: Tensor,\n upper: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"\n See https://github.com/cossio/TruncatedNormal.jl/blob/5e72b6abc8f2ce7aed8147e629b4c8dd5040a8bd/notes/normal.pdf\n \"\"\"\n is_cens_up = torch.isfinite(upper)\n is_cens_lo = torch.isfinite(lower)\n\n std = torch.diagonal(cov, dim1=-2, dim2=-1).sqrt()\n\n # mask out the infs before any gradients are being tracked:\n alpha = torch.zeros_like(mean)\n alpha[is_cens_lo] = (lower[is_cens_lo] - mean[is_cens_lo]) / std[is_cens_lo]\n beta = torch.zeros_like(mean)\n beta[is_cens_up] = (upper[is_cens_up] - mean[is_cens_up]) / std[is_cens_up]\n\n # _F1F2_no_inf unstable for large z-scores, so use the lim(+/-inf) version for those as well\n is_cens_up = is_cens_up & (beta.data < 4.)\n is_cens_lo = is_cens_lo & (alpha.data > -4.)\n is_cens_both = is_cens_up & is_cens_lo\n\n #\n sqrt_2 = 2. ** .5\n x = alpha / sqrt_2\n y = beta / sqrt_2\n\n # uncensored\n F1, F2 = torch.zeros_like(mean), torch.zeros_like(mean)\n\n # censored both:\n F1[is_cens_both], F2[is_cens_both] = _F1F2_no_inf(x[is_cens_both], y[is_cens_both])\n\n # censored lower, uncensored upper:\n F1[is_cens_lo & ~is_cens_up] = 1. / erfcx(x[is_cens_lo & ~is_cens_up])\n F2[is_cens_lo & ~is_cens_up] = x[is_cens_lo & ~is_cens_up] / erfcx(x[is_cens_lo & ~is_cens_up])\n\n # uncensored lower, censored upper:\n F1[~is_cens_lo & is_cens_up] = -1. / erfcx(-y[~is_cens_lo & is_cens_up])\n F2[~is_cens_lo & is_cens_up] = -y[~is_cens_lo & is_cens_up] / erfcx(-y[~is_cens_lo & is_cens_up])\n\n return F1, F2\n","sub_path":"torch_kalman/state_belief/families/censored_gaussian/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"43614589","text":"#-------------------------------------------------------------------------------\r\n# Name: DallasCityScraper\r\n# Purpose: To automate retrieval of data from website\r\n# http://dallascityhall.com/departments/sustainabledevelopment/buildinginspection/Pages/permit_reports2.aspx'\r\n# This site accepts a search string like 'Comcast' and takes you to page\r\n# http://licensing.copyright.gov/search/DisplayLegalName.jsp\r\n# From this page, user can select the actual company name and view\r\n# associated information (ID number, first community and state) on page\r\n# http://licensing.copyright.gov/search/SelectCommunity.jsp\r\n# From this page user select the community,\r\n# then click appropriate button to view either filing period or\r\n# associated communities information.\r\n# Get Filing Periods take you to page\r\n# http://licensing.copyright.gov/search/SelectFilingPeriod2.jsp\r\n# Get Associated communities takes you to page:\r\n# http://licensing.copyright.gov/search/DisplayAssociatedCommunities.jsp\r\n# You can go to the associated communities page given beow from\r\n# \"Filing Periods\" page as well\r\n# http://licensing.copyright.gov/search/DisplayAssociatedCommunities2.jsp\r\n# Author: AnilM\r\n#\r\n# Created: 06/02/2015\r\n# Copyright: (c) AnilM 2015\r\n# Licence: \r\n#-------------------------------------------------------------------------------\r\nfrom __future__ import ( division, absolute_import, print_function, unicode_literals )\r\n\r\nimport sys, os, tempfile, logging\r\n\r\nif sys.version_info >= (3,):\r\n import urllib.request as urllib2\r\n import urllib.parse as urlparse\r\nelse:\r\n import urllib2\r\n import urlparse\r\nimport mechanize\r\nimport cookielib\r\nfrom bs4 import BeautifulSoup\r\nimport html2text\r\nimport twill.commands\r\nimport re,sys\r\nimport argparse\r\nimport csv\r\nimport datetime\r\nimport time\r\n\r\ndef reopenSite(siteUrl):\r\n status = 0\r\n try:\r\n page = br.open(siteUrl)\r\n except (mechanize.HTTPError,mechanize.URLError) as e:\r\n time.sleep(25)\r\n status = 1\r\n\r\n if (status != 0):\r\n exit\r\n\r\n html = page.read()\r\n soup = BeautifulSoup(html)\r\n# print (soup)\r\n # Select the first (index zero) form\r\n br.select_form(nr=0)\r\n\r\n #disclaimerForm = soup.find(\"form\", {\"name\":\"main\"})\r\n\r\n #control = br.form.find_control(\"disclaimer\")\r\n #control.selected=True\r\n for i in range(0, len(br.find_control(type=\"checkbox\").items)):\r\n if \"modify\" not in str(br.find_control(type=\"checkbox\").items[i]):\r\n br.find_control(type=\"checkbox\").items[i].selected =True\r\n control1 = br.form.find_control(\"text\")\r\n control1.disabled=True\r\n br.submit()\r\n\r\n htmlnext = br.response().read()\r\n\r\n soup = BeautifulSoup(htmlnext)\r\n print (soup)\r\n pass\r\n\r\ndef openSite(siteUrl, destPath):\r\n status = 0\r\n try:\r\n page = br.open(siteUrl)\r\n except (mechanize.HTTPError,mechanize.URLError) as e:\r\n time.sleep(25)\r\n status = 1\r\n\r\n if (status != 0):\r\n exit\r\n\r\n html = page.read()\r\n soup = BeautifulSoup(html)\r\n print (soup)\r\n # Select the first (index zero) form\r\n br.select_form(nr=0)\r\n\r\n #disclaimerForm = soup.find(\"form\", {\"name\":\"main\"})\r\n\r\n #control = br.form.find_control(\"disclaimer\")\r\n #control.selected=True\r\n for i in range(0, len(br.find_control(type=\"checkbox\").items)):\r\n if \"modify\" not in str(br.find_control(type=\"checkbox\").items[i]):\r\n br.find_control(type=\"checkbox\").items[i].selected =True\r\n control1 = br.form.find_control(\"text\")\r\n control1.disabled=True\r\n br.submit()\r\n\r\n htmlnext = br.response().read()\r\n\r\n soup = BeautifulSoup(htmlnext)\r\n print (soup)\r\n date1 = datetime.date(2015, 3, 15)\r\n\r\n for num in range(1,375):\r\n try:\r\n br.select_form(nr=2)\r\n br.form['lastName'] = srchString\r\n br.form['site'] = ['CIVIL']\r\n br.form['courtSystem'] = ['C']\r\n br.form['partyType'] = ['DEF']\r\n d1 = date1.strftime(\"%m/%d/%Y\")\r\n print (d1) ;\r\n date1 = date1 + datetime.timedelta(days=1)\r\n br.form['filingDate'] = d1\r\n br.submit()\r\n htmlnext = br.response().read()\r\n soup = BeautifulSoup(htmlnext)\r\n except :\r\n #outFile.close()\r\n #outFile = open(legalNameFile,'a');\r\n reopenSite(siteUrl)\r\n continue\r\n #print (soup)\r\n\r\n# Find span for export CSV |\r\n#
Export options:\r\n exportSpan = soup.find(\"div\", {\"class\":\"exportlinks\"})\r\n #print (exportSpan)\r\n\r\n try:\r\n downLinks = exportSpan.findAll('a')\r\n link = downLinks[0]\r\n fileUrl = 'http://casesearch.courts.state.md.us'\r\n fileUrl += link['href']\r\n br.follow_link(text='CSV')\r\n csvdata = br.response().read()\r\n outFile.writelines( csvdata )\r\n response= br.back(2)\r\n htmlnext = response.read()\r\n #soup = BeautifulSoup(htmlnext)\r\n #print (soup)\r\n except :\r\n continue\r\n #print (soup)\r\n #download_or_extract(fileUrl, destPath )\r\n #outFile.close()\r\n pass\r\n\r\n\r\n# --------------------------------------------------------------------------\r\n# Main Start here.\r\n# --------------------------------------------------------------------------\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--url', help='url help')\r\nargs = parser.parse_args()\r\n\r\n# Browser\r\nbr = mechanize.Browser()\r\n\r\n# Cookie Jar\r\ncj = cookielib.LWPCookieJar()\r\nbr.set_cookiejar(cj)\r\n\r\n# Browser options\r\nbr.set_handle_equiv(True)\r\nbr.set_handle_gzip(True)\r\nbr.set_handle_redirect(True)\r\nbr.set_handle_referer(True)\r\nbr.set_handle_robots(False)\r\n\r\n# Follows refresh 0 but not hangs on refresh > 0\r\nbr.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)\r\n\r\n# User-Agent (this is cheating, ok?)\r\nbr.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:44.0) Gecko/20100101 Firefox/44.0')]\r\nbr.addheaders = [('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')]\r\n#print parser.parse_args(['--name'])\r\nmainSite = 'http://texasbar.com/'\r\n#mainSite = 'http://casesearch.courts.state.md.us/casesearch/processDisclaimer.jis'\r\ndestPath = 'C:\\\\Users\\\\amanchanda\\\\Downloads\\\\Research\\\\Legal\\\\'\r\n\r\ntry:\r\n\r\n openSite(mainSite, destPath)\r\n outFile.close()\r\nexcept (mechanize.HTTPError,mechanize.URLError) as e:\r\n outFile.close()\r\n exit ;\r\npass\r\n\r\n\r\n","sub_path":"CourtData/LegalEZ_01.py","file_name":"LegalEZ_01.py","file_ext":"py","file_size_in_byte":6656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"461043406","text":"import socket\n\nclass connect:\n\n def __init__(self, addr, port):\n self.addr = addr\n self.port = port\n self.sock = socket.socket(socket.AF_INET, \n socket.SOCK_STREAM)\n self.sock.connect((self.addr, self.port))\n \n def query(self, msg):\n '''Request a specific message'''\n totalsent = 0\n while totalsent < len(msg):\n sent = self.sock.send(msg[totalsent:].encode())\n if sent == 0:\n raise RuntimeError(\"socket connection \"\n \"broken\")\n totalsent = totalsent + sent\n\n \n def response(self):\n '''returns the reponse from\n the query as a string'''\n chunks = []\n bytes_recd = 0\n while True:\n chunk = self.sock.recv(2048)\n chunks.append(chunk)\n bytes_recd = bytes_recd + len(chunk)\n if chunk == b'':\n break\n return b''.join(chunks)\n\n def close(self):\n self.sock.close()\n\ndef main():\n c = connect(socket.gethostbyname('hello-world.ca'), 80)\n c.query(\"GET / HTTP/1.1\\nHost: hello-world.ca\\n\\n\")\n print(c.response())\n c.close()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"384555638","text":"import time\n#------------------------------ Funciones --------------------------------------------\ndef introducirOpción():\n time.sleep(1.5)\n opción = input ('Escoja una opci��n:\\n 1. Ingresar 2 números\\n 2. Calcular sumatoria\\n 3. Convertir de kg a libras (se utiliza el primer número ingresado)\\n 4. Calcular de libras a kilogramos (se utiliza el primer número ingresado)\\n 5. Dividir los números\\n 6. Salir\\n \\n Su opción: ')\n return opción\n\ndef calcularSumatoria(num1, num2):\n suma = 0\n for valor in range(num1, num2):\n suma = suma + valor\n return suma\n\ndef convertirLb_Kg(num1):\n resultado = num1 / 2.205\n return resultado\n\ndef convertirKg_Lb(num1):\n resultado = num1 * 2.205\n return resultado\n\ndef dividirNumeros(num1, num2):\n solucion = num1 / num2\n return solucion\n\n#----------------------------------------- Programa Principal -------------------------------------------------------\n\nopción = input ('Escoja una opción:\\n 1. Ingresar 2 números\\n 2. Calcular sumatoria\\n 3. Convertir de kg a libras (se utiliza el primer número ingresado)\\n 4. Calcular de libras a kilogramos (se utiliza el primer número ingresado)\\n 5. Dividir los números\\n 6. Salir\\n \\n Su opción: ')\n\nwhile (opción != '6'):\n#------------------------------------------ Introducir números ------------------------------------------ \n if opción == '1':\n num1 = int(input('Introduzca el primer número: '))\n num2 = int(input('Introduzca el segundo número: '))\n opción = introducirOpción()\n \n#------------------------------------------ Sumatoria -------------------------------------------------------------- \n elif opción == '2':\n print('\\nEl resultado de la sumatoria es: ' + str(calcularSumatoria(num1,num2)) + '\\n')\n opción = introducirOpción()\n#------------------------------------------ Conversión lb - kg --------------------------------------------------- \n elif opción == '3':\n print('\\n'+ str(num1) + ' libras equivalen a ' + str(convertirLb_Kg(num1)) + ' kilogramos.\\n')\n opción = introducirOpción()\n#----------------------------------------- Conversión kg - lb ------------------------------------- \n elif opción == '4':\n print('\\n'+ str(num1) + ' kilogramos equivalen a ' + str(convertirKg_Lb(num1)) + ' libras.\\n')\n opción = introducirOpción()\n#----------------------------------------- División ---------------------------------------------------- \n elif opción == '5':\n if num2 == 0:\n print('\\nError, Segundo número no puede ser cero\\n')\n else:\n print('\\nLa resultado de la división es: ' + str(dividirNumeros(num1, num2)))\n opción = introducirOpción()\n\nprint('\\nMuchas gracias por usar este programa.\\n¡Tenga un bonito día! :D')\ntime.sleep(5)","sub_path":"Ejercicio_2.py","file_name":"Ejercicio_2.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"99918744","text":"# -*- coding: utf-8 -*-\nimport sys\nimport socket\nimport ssl\nimport json\nimport requests\nimport urllib # https://docs.python.org/3/library/urllib.parse.html\nimport uuid\nimport re\nimport docker\nfrom bs4 import BeautifulSoup\nimport config\nfrom tests.utils import *\nimport gettext\n_ = gettext.gettext\n\n\ndef run_test(langCode, url):\n \"\"\"\n Checking an URL against Sitespeed.io (Docker version). \n For installation, check out:\n - https://hub.docker.com/r/sitespeedio/sitespeed.io/\n - https://www.sitespeed.io\n \"\"\"\n arg = '--rm --shm-size=1g -b chrome --plugins.remove screenshot --browsertime.videoParams.createFilmstrip false --browsertime.chrome.args ignore-certificate-errors -n {0} {1}'.format(\n config.sitespeed_iterations, url)\n\n image = \"sitespeedio/sitespeed.io:latest\"\n\n language = gettext.translation(\n 'performance_sitespeed_io', localedir='locales', languages=[langCode])\n language.install()\n _ = language.gettext\n\n print(_('TEXT_RUNNING_TEST'))\n\n docker_client = docker.from_env()\n result = str(docker_client.containers.run(image, arg))\n result = result.replace('\\\\n', ' ')\n\n old_val = None\n old_val_unsliced = None\n result_dict = {}\n\n for line in result.split(' '):\n if old_val == 'speedindex' or old_val == 'load' or old_val == 'backendtime' or old_val == 'firstpaint' or old_val == 'firstvisualchange' or old_val == 'domcontentloaded' or old_val == 'visualcomplete85' or old_val == 'lastvisualchange' or old_val == 'rumspeedindex' or old_val == 'dominteractivetime' or old_val == 'domcontentloadedtime' or old_val == 'pageloadtime' or old_val == 'perceptualspeedindex':\n result_dict[old_val] = line.replace('ms', '')\n\n if line[:-1].lower() == 'requests':\n result_dict['requests'] = old_val_unsliced\n\n old_val = line[:-1].lower()\n old_val_unsliced = line\n\n if 's' in result_dict['speedindex']:\n \"\"\"\n Changes speedindex to a number if for instance 1.1s it becomes 1100\n \"\"\"\n result_dict['speedindex'] = int(\n float(result_dict['speedindex'].replace('s', '')) * 1000)\n\n speedindex = int(result_dict['speedindex'])\n\n review = ''\n\n # give 0.5 seconds in credit\n speedindex_adjusted = speedindex - 500\n if speedindex_adjusted <= 0:\n # speed index is 500 or below, give highest score\n points = 5.0\n else:\n points = 5.0 - (speedindex_adjusted / 1000)\n\n if points > 5.0:\n points = 5.0\n if points < 1.0:\n points = 1.0\n\n if points == 5.0:\n review = _('TEXT_REVIEW_VERY_GOOD')\n elif points >= 4.0:\n review = _('TEXT_REVIEW_IS_GOOD')\n elif points >= 3.0:\n review = _('TEXT_REVIEW_IS_OK')\n elif points > 1.0:\n review = _('TEXT_REVIEW_IS_BAD')\n elif points <= 1.0:\n review = _('TEXT_REVIEW_IS_VERY_BAD')\n\n review += '* Speedindex: {}\\n'.format(speedindex)\n if 's' in result_dict['load']:\n review += _(\"TEXT_REVIEW_LOAD_TIME\").format(result_dict['load'])\n else:\n review += _(\"TEXT_REVIEW_LOAD_TIME_SECONDS\").format(\n result_dict['load'])\n\n review += _(\"TEXT_REVIEW_NUMBER_OF_REQUESTS\").format(\n result_dict['requests'])\n\n return (points, review, result_dict)\n","sub_path":"tests/performance_sitespeed_io.py","file_name":"performance_sitespeed_io.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"499210457","text":"# -*- coding: utf-8 -*-\nimport pickle\n\nlog_path = \"/Users/Thomas/Desktop/GameHistory/\"\n\n\nclass GameLog:\n\n def __init__(self, start_log_index):\n self.current_log_index = start_log_index\n self.current_file_index = (self.current_log_index - 1) // 1000 + 1\n self.current_1k_games = GameLog.load_1k_games(self.current_file_index)\n while not self.current_1k_games:\n self.current_file_index += 1\n self.current_1k_games = GameLog.load_1k_games(self.current_file_index)\n self.current_log_index = (self.current_file_index - 1) * 1000 + 1\n\n def get_next_game(self):\n if self.current_log_index >= 1200000:\n return None\n while (self.current_log_index not in self.current_1k_games) or \\\n (not self.current_1k_games[self.current_log_index]) or \\\n (self.current_1k_games[self.current_log_index]['ratingc'] == 'PF3'):\n self._incre_current_index()\n res = self.current_1k_games[self.current_log_index]\n self._incre_current_index()\n return res\n\n def _incre_current_index(self):\n if self.current_log_index % 1000 == 0:\n self.current_1k_games = None\n while not self.current_1k_games:\n self.current_file_index += 1\n self.current_1k_games = GameLog.load_1k_games(self.current_file_index)\n self.current_log_index = (self.current_file_index - 1) * 1000 + 1\n else:\n self.current_log_index += 1\n\n def current_location(self):\n current_f = self.current_file_index\n current_l = self.current_log_index - 1\n if current_l % 1000 == 0:\n current_f -= 1\n return \"Current location -- File index = {}, Log index = {}\".format(current_f, current_l)\n\n @staticmethod\n def load_1k_games(file_index):\n filename = log_path + \"{}k.txt\".format(file_index)\n try:\n return pickle.load(open(filename, \"rb\"))\n except:\n return None\n\n @staticmethod\n def load_game_by_id(log_id):\n games = GameLog.load_1k_games((int((log_id - 1) / 1000) + 1))\n if games:\n return games[log_id]\n else:\n return None\n\n @staticmethod\n def load_single_round(log_id, round_num):\n rounds = GameLog.load_game_by_id(log_id)\n if rounds:\n return rounds['log'][round_num]\n else:\n return None\n\n @staticmethod\n def print_game_by_id(log_id):\n history = GameLog.load_game_by_id(log_id)\n GameLog.print_formatted_rounds(history)\n\n @staticmethod\n def print_single_round(his_id, round_num):\n log = GameLog.load_single_round(his_id, round_num)\n GameLog.print_formatted_single_round(log)\n\n @staticmethod\n def print_formatted_rounds(history):\n if not history:\n print(\"Under this history-id there is no history\")\n return\n for key, value in history.items():\n if key == \"log\":\n print(\"log:\")\n for i in range(0, len(value)):\n array = value[i]\n print(\" log[\" + str(i) + \"]:\")\n for j in range(0, len(array)):\n arr = array[j]\n print(\" log[\" + str(i) + \"][\" + str(j) + \"]: \" + str(arr))\n else:\n print(str(key) + \": \" + str(value))\n\n @staticmethod\n def print_formatted_single_round(log):\n if log:\n item_names = {0: \"Round info\", 1: \"Scores\", 2: \"Bonus indicator\", 3: \"Inner bonus\",\n 4: \" Player 0 init\", 5: \" Player 0 draw\", 6: \" Player 0 drop\",\n 7: \" Player 1 init\", 8: \" Player 1 draw\", 9: \" Player 1 drop\",\n 10: \" Player 2 init\", 11: \" Player 2 draw\", 12: \" Player 2 drop\",\n 13: \" Player 3 init\", 14: \" Player 3 draw\", 15: \" Player 3 drop\",\n 16: \"Result\"}\n for i in range(0, len(log)):\n print(\"{}: {}\".format(item_names[i], log[i]))\n else:\n print(\"Under this history-id-round_num there is no history\")\n\n @staticmethod\n def get_game_ids_by_dan(dan):\n # dan = '十段' '八段' '九段' '七段' '天鳳'\n file_levels = log_path + \"history_id_by_levels.txt\"\n res = pickle.load(open(file_levels, 'rb'))\n return res[dan]\n\n @staticmethod\n def get_round_ids_by_win_form(win_forms):\n # win_forms: see end of this file\n file_forms = log_path + \"forms_statistics.txt\"\n forms = pickle.load(open(file_forms, 'rb'))\n if isinstance(win_forms, list):\n round_ids = []\n for w in win_forms:\n round_ids += forms[w]\n return round_ids\n else:\n return forms[win_forms]\n\n @staticmethod\n def get_draw_index(player_index):\n return (player_index + 1) * 3 + 2\n\n @staticmethod\n def get_drop_index(player_index):\n return (player_index + 1) * 3 + 3\n\n\n\"\"\"+++++++++++++++++++++++++++++++++++++++++++++++ All win forms ++++++++++++++++++++++++++++++++++++++++++++++++++\nNo winners: \n流局 \n\nBonus honors: 役牌 中(1飜) 役牌 白(1飜) 役牌 發(1飜) \n場風 東(1飜) 場風 南(1飜) 場風 西(1飜) \n自風 東(1飜) 自風 南(1飜) 自風 西(1飜) 自風 北(1飜)\n\nBonus tiles:\nドラ(1飜) ドラ(2飜) ドラ(3飜) ドラ(4飜) ドラ(5飜) ドラ(6飜) ドラ(7飜) ドラ(8飜) ドラ(9飜) ドラ(10飜) ドラ(11飜) ドラ(12飜)\n赤ドラ(1飜) 赤ドラ(2飜) 赤ドラ(3飜)\n裏ドラ(1飜) 裏ドラ(2飜) 裏ドラ(3飜) 裏ドラ(4飜) 裏ドラ(5飜) 裏ドラ(6飜) 裏ドラ(7飜) 裏ドラ(8飜) 裏ドラ(9飜)\n\nControllable forms:\n1f: 立直(1飜) 門前清自摸和(1飜) 断幺九(1飜) 三色同順(1飜) 平和(1飜) 一盃口(1飜) 混全帯幺九(1飜) 一気通貫(1飜)\n\n2f: 対々和(2飜) 混一色(2飜) 七対子(2飜) 三暗刻(2飜) 三色同順(2飜) 一気通貫(2飜) 混全帯幺九(2飜) 両立直(2飜) 小三元(2飜) \n 純全帯幺九(2飜) 混老頭(2飜) 三色同刻(2飜) 三槓子(2飜)\n\n3f: 混一色(3飜) 純全帯幺九(3飜) 二盃口(3飜)\n\n5f: 清一色(5飜) 清一色(6飜)\n\nmf: 大三元(役満) 四暗刻(役満) 国士無双(役満) 緑一色(役満) 四暗刻単騎(役満) 小四喜(役満) 字一色(役満) 九蓮宝燈(役満)\n 清老頭(役満) 国士無双13面(役満) 大四喜(役満) 純正九蓮宝燈(役満)\n \nStategy formable:\n断幺九 三色同順 平和 一盃口 混全帯幺九 一気通貫 対々和 混一色 七対子\n\nExtra luck:\n一発(1飜) 河底撈魚(1飜) 海底摸月(1飜) 槍槓(1飜) 嶺上開花(1飜) 天和(役満) 地和(役満)\n\"\"\"\n\n\"\"\"++++++++++++++++++++++++++++++++++++++++++++++ Game log format ++++++++++++++++++++++++++++++++++++++++++++++++++\n Retrieve history of one whole round game, indexed by its reference id.\n :param reference_id: Reference id, the list of all ids saved in \"/Desktop/GameHistoryReferenceIDs.txt\".\n :return: history.\n Description of return value (only important items)\n [keys] [value]\n rule disp: rule in japanese; 'aka5X': w/o red bonus tile 5X\n name names\n dan player level\n sc final score board\n log game history of a total round, 4+ single games included, log[i] = i-th single game\n • Game State:\n log[i][0]: [0]-which player is the first to drop [a,b,c] a=turn, b=base, c=point unit\n log[i][1]: score board before this single game\n log[i][2]: open bonus tile\n log[i][3]: secret bonus tile\n log[i][16]: game result-reward,score calculation\n •• Player State:\n East: log[i][4:6] (South-[7:9], West-[10:12], North-[13:15])\n log[i][+0]: initial tiles state\n log[i][+1]: received tiles sequence,\n log[i][+2]: dropped tiles sequence\n ••• Tile description:\n 11-19: 1-9 Numbers 🀇🀈🀉🀊🀋🀌🀍🀎🀏, 21-29: 1-9 Coins 🀙🀚🀛🀜🀝🀞🀟🀠🀡, 31-39: 1-9 Bamboos 🀐🀑🀒🀓🀔🀕🀖🀗🀘\n 41-47: West,South,East,North,Blank,Centre,Fortune 🀀🀁🀂🀃🀆🀅🀄\n 51-53: red 5 Numbers, red 5 Coins, red 5 Bamboos\n 60: drop what received from the closed pile\n 'c313233': eats the first tile from player before this player\n '15p5115': pong, position of p shows from whom the pong was made\n 'a/k/m': different kan set\n\"\"\"","sub_path":"ki/rfmcts/gamelog.py","file_name":"gamelog.py","file_ext":"py","file_size_in_byte":8924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"564479703","text":"from django import forms\nfrom django.forms import ModelForm\n\nfrom products.models import Product, Collection\n\nclass NewAmazonProductForm(forms.Form):\n asin = forms.CharField(label='ASIN')\n\nclass ProductForm(ModelForm):\n class Meta:\n model = Product\n exclude = ('product_id', 'created', 'detail_views', 'sales_rank', 'user')\n\nclass CollectionForm(ModelForm):\n class Meta:\n model = Collection\n exclude = ('created', 'updated')\n","sub_path":"products/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"585286995","text":"import cc_dat_utils,cc_json_utils,cc_data,sys\n\nif len(sys.argv) == 3:\n input = sys.argv[1]\n output = sys.argv[2]\n print (\"input:\"+input+\" --> output:\" + output)\n cc_data = cc_json_utils.make_cc_data_from_json(input)\n cc_dat_utils.write_cc_data_to_dat(cc_data,output)\n print(\"complete\")\nelse:\n print(\"wrong number of arguments!\")","sub_path":"json_to_dat.py","file_name":"json_to_dat.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"510322656","text":"\"\"\" retorrent.find_tfile \"\"\"\n\nfrom os.path import basename, isdir, isfile\nfrom os.path import join as pjoin\n\nfrom os_utils.os_utils import listdir\nfrom PyTorrentInfo.torrentParser import TorrentParser as TP\n\ndebug = True\n\n\ndef get_filenames(tfile_path):\n \"\"\"\n return a list of filenames inside the passed info dict.\n\n the 'files' key only exists if it's a dir.\n \"\"\"\n tp_info = TP().readFile(tfile_path)['torrent']['info']\n\n if 'files' in tp_info:\n # f['path'] is a list - check this assumption on later bugs\n return [basename(f['path'][0]) for f in tp_info['files']]\n\n return [tp_info.get('name')]\n\n\ndef find_tfiles(paths, tfilesdir):\n files_tfiles = gen_map(tfilesdir)\n\n return [tfile_from_filename(path, tfilesdir, files_tfiles) for path in paths]\n\n\ndef tfile_details(tfile_path):\n files_tfile = {}\n try:\n\n for filename in get_filenames(tfile_path):\n files_tfile[filename] = tfile_path\n\n except Exception as _e:\n pass\n\n return files_tfile\n\n\ndef gen_map(tfilesdir):\n tfiles = [pjoin(tfilesdir, f) for f in listdir(tfilesdir) if f.endswith('torrent')]\n\n files_tfiles = {}\n for tfile in tfiles:\n files_tfiles.update(tfile_details(tfile))\n return files_tfiles\n\n\ndef tfile_from_filename(filename, tfilesdir, files_tfiles=None):\n \"\"\"\n @param: files_tfiles : pre-generated output from gen_map\n \"\"\"\n if files_tfiles is None:\n files_tfiles = gen_map(tfilesdir)\n\n if isdir(filename):\n # cheat, and get a file inside the dir\n filename = pick_file(filename)\n\n filename = basename(filename)\n return files_tfiles.get(filename, '')\n\n\ndef pick_file(dirpath):\n dircontent = listdir(dirpath)\n\n for f in dircontent:\n path = pjoin(dirpath, f)\n if isfile(path):\n return f\n raise Exception('Passed a dir with no content!')\n","sub_path":"lib/retorrent/find_tfile.py","file_name":"find_tfile.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"550407312","text":"# -*- coding: utf-8 -*-\nimport json\n# import re\nfrom django.http import HttpResponse\n# from urllib import unquote\nimport time\n\n# ----------------------------- GLOBALS VARIABLES -----------------------------\nsettingFile = \"C:/projects/wsMediacenter/webservice/assets/settings.json\"\n# ----------------------------- -----------------------------\n\n\ndef open_file_settings():\n return json.load(open(settingFile))\n\n\ndef get_settings(request):\n try:\n return send_response(open_file_settings())\n except:\n return send_response(error_messages(2))\n\n\ndef get_setting(request, setting):\n file = open_file_settings()\n return send_response(file[setting])\n\n\ndef add_setting(request, category, value):\n file = open_file_settings()\n category = check_category(category)\n if value not in file[category]:\n file[category].append(value)\n update_file(file)\n return send_response(open_file_settings())\n else:\n return send_response(error_messages(1, value), 405)\n\n\ndef delete_setting(request, category, value):\n file = open_file_settings()\n category = check_category(category)\n data = file[category]\n\n for d in data:\n if d == value:\n data.remove(d)\n break\n\n update_file(file)\n\n return send_response(open_file_settings())\n\n\ndef check_category(category):\n return {\n 'folder': 'folders',\n 'video': 'videoFormats',\n 'audio': 'audioFormats'\n }.get(category, None)\n\n\ndef send_response(data, code=200):\n response = HttpResponse(json.dumps(data), content_type='application/json')\n response.status_code = code\n response[\"Access-Control-Allow-Origin\"] = \"*\"\n return response\n\n\ndef update_file(file):\n with open(settingFile, 'r+') as outfile:\n outfile.seek(0)\n outfile.write(json.dumps(file, indent=4))\n outfile.truncate()\n\n\ndef error_messages(id_message, value=''):\n messages = {\n 0: \"Une erreur est survenue du cote serveur.\",\n 1: \"Erreur: \" + value + \"existe deja.\",\n 2: \"Erreur lors de l\\'ouverture du fichier de configuration.\"\n }\n\n return messages.get(id_message)\n","sub_path":"webservice/views/setting_views.py","file_name":"setting_views.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"179248136","text":"from pathlib import Path\n\nfrom .consts import SHORT_NAME\n\n# Folder to look for icon, glade files\n# - If this app is installed in ~/.local/bin and run from there, look for ~/.local/share/cobang\n# - If this app is install in /usr/local/bin and run from there, look for /usr/local/share/cobang\n# - If this app is run from source, look in the source folder\n\n\ndef get_ui_folder() -> Path:\n top_app_dir = Path(__file__).parent.parent.resolve()\n str_top_app_dir = str(top_app_dir)\n if str_top_app_dir.startswith('/usr/local/'):\n data_folder = Path(f'/usr/local/share/{SHORT_NAME}')\n elif str_top_app_dir.startswith('/usr/'):\n data_folder = Path(f'/usr/share/{SHORT_NAME}')\n elif str_top_app_dir.startswith(str(Path('~/.local/').expanduser())):\n data_folder = Path(f'~/.local/share/{SHORT_NAME}').expanduser()\n else:\n # Run from source\n data_folder = top_app_dir / 'data'\n if data_folder.exists():\n return data_folder\n\n\ndef get_ui_filepath(filename: str) -> Path:\n ui_folder = get_ui_folder()\n return ui_folder / filename\n\n\ndef get_ui_source(filename: str) -> str:\n filepath = get_ui_filepath(filename)\n return filepath.read_text()\n","sub_path":"cobang/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"104519078","text":"init=[int(x) for x in input().split()]\nnumOfOperation=init[1]\nnums=[int(x) for x in input().split()]\nheaps=[[x] for x in nums]\nfor i in range(numOfOperation):\n operation=[int(x) for x in input().split()]\n if operation[0]==1:\n for j in heaps:\n if nums[operation[1]-1] in j and nums[operation[2]-1] not in j:\n for k in range(len(heaps)):\n if nums[operation[2]-1] in heaps[k]:\n j+=heaps[k]\n del heaps[k]\n break\n else:\n judge=True#判断第x个数是否被删除\n for j in heaps:\n if nums[operation[1]-1] in j:\n judge=False\n j.sort()\n print(j.pop(0))\n if judge:\n print(-1)","sub_path":"Code/CodeRecords/2605/60614/267277.py","file_name":"267277.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"308465354","text":"# PyQt5 imports\nfrom PyQt5 import QtGui, QtCore, QtOpenGL, QtWidgets\n\n# PyOpenGL imports\nfrom OpenGL.GL import *\nimport OpenGL.GL.shaders\nimport OpenGL.arrays.vbo as glvbo\nimport time\nfrom math import pi,cos,sin\n\nfrom PanoStimGL import *\n\nclass GLPlotWidget(QtWidgets.QOpenGLWidget):\n # default window size\n width, height = 1000, 500\n \n def initialize_stimulus_prep_win(self):\n self.stim = PanoStimGL(size = (50,50), tf = (0,0), stretch = False)\n self.vertices, indices, *_ = self.stim.get_stim()\n self.indices_length = len(indices)\n \n vertex_shader = \"\"\"\n #version 330\n #define PI 3.1415926538\n in vec3 position;\n in vec3 color;\n in vec2 tex_coords;\n out vec3 newColor;\n out vec2 vtex_coords;\n uniform mat4 trans;\n uniform mat4 rot;\n uniform mat4 scale;\n uniform float stretch[2];\n\n uniform bool stretch_enable;\n void main()\n {\n vec4 transformed = rot*scale*vec4(position, 1.0f);\n transformed = trans*transformed;\n \n float delta_x = trans[3][0];\n float y_elev = float(abs(transformed.y))*2.0 *90;\n float s = (1./cos(1.*y_elev/180. *PI));\n if (!stretch_enable){\n s = 1.0;\n }\n transformed = vec4(delta_x+s*(transformed.x-delta_x), transformed.yzw);\n \n gl_Position = transformed;\n newColor = color;\n vtex_coords = tex_coords;\n }\n \"\"\"\n\n fragment_shader = \"\"\"\n #version 330\n in vec3 newColor;\n \n uniform sampler2D mask;\n in vec2 vtex_coords;\n \n // Ouput data\n layout(location = 0)out vec4 color;\n void main()\n {\n vec4 mask_color = texture2D(mask, vtex_coords);\n color = mix(vec4(newColor,1.0),vec4(0.5,0.5,0.5,1.0), mask_color.r);\n }\n \"\"\"\n self.shader = OpenGL.GL.shaders.compileProgram(OpenGL.GL.shaders.compileShader(vertex_shader, GL_VERTEX_SHADER),\n OpenGL.GL.shaders.compileShader(fragment_shader, GL_FRAGMENT_SHADER))\n \n self.trans_location = glGetUniformLocation(self.shader, \"trans\")\n self.rot_location = glGetUniformLocation(self.shader, \"rot\")\n self.scale_location = glGetUniformLocation(self.shader, \"scale\")\n self.stretch_location = glGetUniformLocation(self.shader, \"stretch\")\n self.stretch_enable_location = glGetUniformLocation(self.shader, \"stretch_enable\")\n \n \n \n\n self.mask_location = glGetUniformLocation(self.shader, \"mask\")\n mask_size = 501 #always odd\n circle_mask = 1- create_circular_mask(mask_size,mask_size).flatten()\n \n self.maskID = glGenTextures(1)\n glBindTexture(GL_TEXTURE_2D, self.maskID);\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT) \n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, mask_size, mask_size, 0, GL_RED, GL_FLOAT, circle_mask)\n\n glBindTexture(GL_TEXTURE_2D, 0)\n \n self.__vao = glGenVertexArrays(1) \n glBindVertexArray(self.__vao) \n self.bufs = glGenBuffers(2) \n glBindBuffer(GL_ARRAY_BUFFER, self.bufs[0])\n glBufferData(GL_ARRAY_BUFFER, self.vertices.nbytes, self.vertices, GL_STATIC_DRAW)\n \n position = glGetAttribLocation(self.shader, \"position\")\n glVertexAttribPointer(position, 3, GL_FLOAT, GL_FALSE, 32, ctypes.c_void_p(0))\n glEnableVertexAttribArray(position)\n\n color = glGetAttribLocation(self.shader, \"color\")\n glVertexAttribPointer(color, 3, GL_FLOAT, GL_FALSE, 32, ctypes.c_void_p(12))\n glEnableVertexAttribArray(color)\n \n tex_coords = glGetAttribLocation(self.shader, \"tex_coords\")\n glVertexAttribPointer(tex_coords, 2, GL_FLOAT, GL_FALSE, 32, ctypes.c_void_p(24))\n glEnableVertexAttribArray(tex_coords)\n \n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.bufs[1])\n glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(ctypes.c_uint) * len(indices), (ctypes.c_uint * len(indices))(*indices), GL_STATIC_DRAW)\n\n #glClearColor(0.0, 0.0, 0.0, 0.0)\n #glBindBuffer(GL_ARRAY_BUFFER, 0)\n glBindVertexArray(0)\n \n def initialize_toPano_view(self):\n # A quad to show the rendered content\n # screen_quad_data = np.array([\n # # Positions TexCoords\n # -1.0, 1.0, 0.0, 1.0,\n # -1.0, -1.0, 0.0, 0.0,\n # 1.0, -1.0, 1.0, 0.0,\n # 1.0, 1.0, 1.0, 1.0\n # ], dtype=np.float32)\n \n # indices = np.array([0,1,2,0,2,3], dtype=np.uint16)\n \n screen_quad_data = np.array([\n # Positions TexCoords\n -1.0, 1.0, 0.0, 1.0,\n -1.0, -1.0, 0.0, 0.0,\n 1.0, -1.0, 1.0, 0.0,\n -1.0, 1.0, 0.0, 1.0,\n 1.0, -1.0, 1.0, 0.0,\n 1.0, 1.0, 1.0, 1.0\n ], dtype=np.float32)\n \n vertex_toPano = \"\"\"\n #version 330\n in vec2 position;\n in vec2 tex_coords;\n out vec2 TexCoords;\n void main()\n {\n gl_Position = vec4(position.x, position.y, 0.0f, 1.0f);\n TexCoords = tex_coords;\n }\n \"\"\"\n\n fragment_toPano = \"\"\"\n #version 330\n in vec2 TexCoords;\n uniform sampler2D src_tex;\n out vec4 color;\n void main()\n {\n vec4 sampled = texture2D(src_tex, TexCoords); // original rendered pixel color value\n color = sampled;\n }\n \"\"\"\n \n self.shaderToPano = OpenGL.GL.shaders.compileProgram(OpenGL.GL.shaders.compileShader(vertex_toPano, GL_VERTEX_SHADER),\n OpenGL.GL.shaders.compileShader(fragment_toPano, GL_FRAGMENT_SHADER))\n \n self.screen_quad_vao = glGenVertexArrays(1)\n glBindVertexArray(self.screen_quad_vao)\n \n quad_bufs = glGenBuffers(2)\n glBindBuffer(GL_ARRAY_BUFFER, quad_bufs[0])\n glBufferData(GL_ARRAY_BUFFER, screen_quad_data.nbytes, screen_quad_data, GL_STATIC_DRAW)\n \n position = glGetAttribLocation(self.shaderToPano, \"position\")\n glVertexAttribPointer(position, 2, GL_FLOAT, GL_FALSE, 16, ctypes.c_void_p(0))\n glEnableVertexAttribArray(position)\n \n tex_coords = glGetAttribLocation(self.shaderToPano, \"tex_coords\")\n glVertexAttribPointer(tex_coords, 2, GL_FLOAT, GL_FALSE, 16, ctypes.c_void_p(8))\n glEnableVertexAttribArray(tex_coords)\n \n self.src_tex_location = glGetUniformLocation(self.shaderToPano, \"src_tex\")\n \n #glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, quad_bufs[1])\n #glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(ctypes.c_uint) * len(indices), (ctypes.c_uint * len(indices))(*indices), GL_STATIC_DRAW)\n \n #glBindBuffer(GL_ARRAY_BUFFER, 0)\n #glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0)\n glBindVertexArray(0)\n\n def setup_framebuffer(self):\n #Framebuffer\n self.fbo = glGenFramebuffers(1)\n glBindFramebuffer(GL_FRAMEBUFFER, self.fbo)\n \n #Texture\n self.texture = glGenTextures(1)\n glBindTexture(GL_TEXTURE_2D, self.texture)\n glTexImage2D(GL_TEXTURE_2D, 0,GL_RGBA, self.width, self.height, 0, GL_RGBA, GL_FLOAT, None)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)\n glBindTexture(GL_TEXTURE_2D, 0) \n \n glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,self.texture, 0)\n glDrawBuffers(1, [GL_COLOR_ATTACHMENT0])\n if not glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE:\n print('framebuffer binding failed')\n exit()\n glBindFramebuffer(GL_FRAMEBUFFER,0)\n \n \n def initializeGL(self):\n \"\"\"Initialize OpenGL, VBOs, upload data on the GPU, etc.\n \"\"\"\n self.setup_framebuffer()\n self.initialize_stimulus_prep_win()\n self.initialize_toPano_view()\n self.framebuffer = True\n def set_stim(self, stim):\n self.stim = stim\n \n previous_time = time.time()\n def paintGL(self):\n \"\"\"Paint the scene.\n \"\"\"\n \n self.draw_stimulus_to_texture()\n\n if self.framebuffer:\n self.remap_texture_to_screen()\n \n def draw_stimulus_to_texture(self):\n # Render to our framebuffer\n \n \n glUseProgram(self.shader)\n glBindVertexArray(self.__vao)\n \n t = time.time()\n delta_t = t - self.previous_time\n max_delta_t = 0.1 #normal framerate is 60Hz, 16.67ms/frame\n if delta_t > max_delta_t: #to avoid skipping positions at startup\n delta_t = 0\n self.previous_time = t\n self.stim.update(delta_t)\n self.vertices, _ , mask, size_ratio, stretch = self.stim.get_stim()\n # if mask:\n # glBindTexture(GL_TEXTURE_2D, self.maskID)\n # else:\n # glBindTexture(GL_TEXTURE_2D, 0) \n # glBindBuffer(GL_ARRAY_BUFFER, self.bufs[0])\n # glBufferSubData(GL_ARRAY_BUFFER, 0, self.vertices.nbytes, self.vertices)\n # glBindBuffer(GL_ARRAY_BUFFER, 0)\n scale = scale_matrix(size_ratio).transpose().flatten()\n rot = rotation_matrix(np.radians(self.stim.ori)).transpose().flatten()\n trans = translation_matrix(self.stim.get_pos_uv()).transpose().flatten()\n \n glUniform1i(self.stretch_enable_location, stretch)\n glUniform1fv(self.stretch_location, 2, self.stim.get_stretching_factors())\n glUniformMatrix4fv(self.rot_location, 1, GL_FALSE, rot)\n glUniformMatrix4fv(self.trans_location, 1, GL_FALSE, trans)\n glUniformMatrix4fv(self.scale_location, 1, GL_FALSE, scale)\n \n if self.framebuffer:\n glBindFramebuffer(GL_FRAMEBUFFER,self.fbo)\n glDrawBuffers(1, [GL_COLOR_ATTACHMENT0])\n glViewport(0,0, int(self.width), int(self.height)) #glViewport(0,-int(self.height/2), int(self.width), int(self.height*2))\n \n glClearColor(0.5, 0.5, 0.5, 1.0)\n glClear(GL_COLOR_BUFFER_BIT) \n \n glDrawElements(GL_TRIANGLES, self.indices_length, GL_UNSIGNED_INT, None) \n\n glBindTexture(GL_TEXTURE_2D, 0)\n glBindVertexArray(0)\n \n \n def remap_texture_to_screen(self):\n if self.framebuffer:\n glBindFramebuffer(GL_FRAMEBUFFER,self.defaultFramebufferObject())\n \n glViewport(0,0, int(self.width), int(self.height))\n glClearColor(0.2, 0.2, 0.2, 0.0)\n glClear(GL_COLOR_BUFFER_BIT) \n \n glUseProgram(self.shaderToPano)\n glBindVertexArray(self.screen_quad_vao)\n \n #Set our \"src_tex\" sampler to use Texture Unit 0\n glActiveTexture(GL_TEXTURE1)\n glBindTexture(GL_TEXTURE_2D, self.texture)\n glUniform1i(self.src_tex_location, 1)\n \n glActiveTexture(GL_TEXTURE0)\n glDrawArrays(GL_TRIANGLES, 0, 6)\n #glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, None) \n \n glBindTexture(GL_TEXTURE_2D, 0)\n glBindVertexArray(0)\n \n def resizeGL(self, width, height):\n \"\"\"Called upon window resizing: reinitialize the viewport.\n \"\"\"\n #glViewport(int(self.width/4),0, int(self.width/2), int(self.height))\n # set orthographic projection (2D only)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n # the window corner OpenGL coordinates are (-+1, -+1)\n glOrtho(-1, 1, 1, -1, -1, 1)\n\n \n# import numpy for generating random data points\nimport sys\nimport numpy as np\nimport numpy.random as rdn\n\n# define a Qt window with an OpenGL widget inside it\nclass TestWindow(QtWidgets.QMainWindow):\n lastTime = time.time()\n nFrames = 0\n \n def __init__(self):\n super(TestWindow, self).__init__()\n\n # initialize the GL widget\n self.widget = GLPlotWidget()\n # put the window at the screen position (100, 100)\n self.setGeometry(100, 100, self.widget.width, self.widget.height)\n self.setCentralWidget(self.widget)\n \n self._timer = QtCore.QTimer(self)\n self._timer.timeout.connect(self.update)\n self._timer.start(10)\n \n # self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowStaysOnTopHint)\n # self.setAttribute(QtCore.Qt.WA_TranslucentBackground, True)\n # self.setAttribute(QtCore.Qt.WA_NoSystemBackground, False)\n # self.setWindowOpacity(1);\n\n self.show()\n \n def update(self):\n currentTime = time.time()\n self.nFrames += 1\n if currentTime - self.lastTime >= 1:\n #print(f'{1000/self.nFrames:.2f} ms/frame')\n self.nFrames = 0\n self.lastTime = currentTime\n \n self.widget.repaint()\n\nif __name__ == '__main__':\n # create the Qt App and window\n app = QtWidgets.QApplication(sys.argv)\n window = TestWindow()\n window.show()\n sys.exit(app.exec_())\n","sub_path":"code/python_code/draft_scripts/qtgl_successive_shader_passes.py","file_name":"qtgl_successive_shader_passes.py","file_ext":"py","file_size_in_byte":13597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"231431323","text":"from django.conf.urls.defaults import *\n\nfrom apply.models import Application\n\n\nurlpatterns = patterns('apply.views',\n url(r'^$', 'index', name='index'),\n url(r'^apply$', 'apply', name='apply'),\n url(r'^faq$', 'faq', name='faq'),\n url(r'^mentorship$', 'mentorship', name='mentorship'),\n url(r'^thanks$', 'thanks', name='thanks'),\n url(r'^projects$', 'projects', name='projects'),\n)\n","sub_path":"apps/apply/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"541214315","text":"import fileinput, re \nimport requests, sys\nimport json\n\n\n#--------------------------------------- Récupération des ID de gène sur ensembl-----------------------------------------------\n\ndef EnsEMBL_ID(specie_sub, gene) : \n\tserver = \"https://rest.ensembl.org\"\n\text = \"/xrefs/symbol/\"+specie_sub+\"/\"+gene+\"?\" #URL de requête\n\t \n\tr = requests.get(server+ext, headers={ \"Content-Type\" : \"application/json\"}) #Requête GET EnsEMBL\n \t\n\tif not r.ok:\n\t\tserver = \"https://rest.ensemblgenomes.org\"\n\t\text = \"/xrefs/symbol/\"+specie_sub+\"/\"+gene+\"?\"\n\t\tr = requests.get(server+ext, headers={ \"Content-Type\" : \"application/json\"}) #Pour les plantes, champignons, protiste...\n\n\t\tif not r.ok :\n\t\t\tr.raise_for_status()\n\t\t\tsys.exit()\t\t\t\t\t\t\t\t\t\t\t\t\t\n \n\tdecoded = r.json() \t\t\t\t\t\t\t\t\n\n\tk=0\n\tidEnsembl = [] \t\t\t\t\t\t\t\t\n\twhile k < len(decoded) : \t\t\t\t\t\t\t\t\n\t\tidEn = decoded[k]['id'] \t\t\t\t\t\t\t\t\n\t\tidEnsembl.append(idEn) \t\t\t\t\t\t\t\t\n\t\tk=k+1\n\treturn idEnsembl \t\t\t\t\t\t\t\t\n\t\n\n#--------------------------------------Récupération des ID des transcripts sur ensembl--------------------------------------------------\n\ndef EnsEMBL_TID(v_idEn) :\n\ti=0\n\tidtranscript = [] \n\twhile i < len(v_idEn) : \n\t\tfor idens in v_idEn : \n\t\t\tserver2 = \"https://rest.ensembl.org\"\n\t\t\text2 = \"/overlap/id/\"+idens+\"?feature=transcript\" \n\n\t\t\tr2 = requests.get(server2+ext2, headers={ \"Content-Type\" : \"application/json\"}) #Requête GET ensembl \n \n\t\t\tif not r2.ok:\n\t\t\t\tserver2 = \"https://rest.ensemblgenomes.org\"\n\t\t\t\text2 = \"/overlap/id/\"+idens+\"?feature=transcript\" \n \n\t\t\t\tr2 = requests.get(server2+ext2, headers={ \"Content-Type\" : \"application/json\"})\n\t\t\n\t\t\t\tif not r2.ok: \t\t\n\t\t\t\t\tr2.raise_for_status()\n\t\t\t\t\tsys.exit()\t\t\t\t\t\t\t\t\t\t\t\n \n\t\t\tdecoded2 = r2.json() \n\t\t\t\n\t\t\tj=0\n\t\t\twhile j < len(decoded2) : \n\t\t\t\ttransID = decoded2[j]['transcript_id'] \n\t\t\t\tif not transID in idtranscript : \n\t\t\t\t\tidtranscript.append(transID)\t\t\t\t\t\t\n\t\t\t\tj=j+1\n\t\t\ti = i+1\n\treturn idtranscript\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\n\t\n#----------------------------------------Récupération des ID des protéines sur ensembl-----------------------------------------------\n\t\t\ndef EnsEMBL_PID(v_idEn) : \n\tl = 0 \n\tprot=[]\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\twhile l < len(v_idEn) :\n\t\tfor idens in v_idEn : \t\t\t\t\t\t\t\t\t\t\t\n\t\t\tserver3 = \"https://rest.ensembl.org\"\n\t\t\text3 = \"/overlap/id/\"+idens+\"?feature=cds\"\n\n\t\t\tr3 = requests.get(server3+ext3, headers={ \"Content-Type\" : \"application/json\"})\t\t #Requête GET ensembl \n\t\n\t\t\tif not r3.ok : \n\t\t\t\tserver3 = \"https://rest.ensemblgenomes.org\"\n\t\t\t\text3 = \"/overlap/id/\"+idens+\"?feature=cds\"\t\t\t\t\n\t\t\t\n\t\t\t\tr3 = requests.get(server3+ext3, headers={ \"Content-Type\" : \"application/json\"})\n\n\t\t\t\tif not r3.ok : \n\t\t\t\t\tr3.raise_for_status()\n\t\t\t\t\tsys.exit()\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\n\t\t\tdecoded3 = r3.json()\t\t\t\t\t\t\t\t\t\t\n\t\t\tfor h in decoded3 : \t\t\t\t\t\t\t\t\t\t\n\t\t\t\tprotID = h['id']\t\t\t\t\t\t \t\t\t\n\t\t\t\tif not protID in prot : \t\t\t\t\t\t\t\t\n\t\t\t\t\tprot.append(protID)\t\t\t\t\t\t\t\t\t\n\t\t\tl = l +1\n\t\treturn prot\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\n\t\t\n#----------------------------------------------------Génome Browser------------------------------------------------------------------------------------\t\n\ndef Genome_B(v_idEn, specie_sub):\n\tdb_list = [\"ensembl\", \"plants.ensembl\", \"bacteria.ensembl\", \"fungi.ensembl\", \"protists.ensembl\", \"metazoa.ensembl\"] #Création d'une liste de banque de données ensembl\n\tfor db in db_list:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \n\t\tfinal_url = \"http://{}.org/{}/Gene/Summary?db=core;g={}\".format(db, specie_sub,v_idEn)\n\t\ttest_url = requests.get(final_url)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #Requête GET ensembl\n\t\tif test_url.ok: \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t \n\t\t\tresult=test_url.url\n\t\t\turl = \"http://www.{}.org/{}/Location/View?db=core;g={}\".format(db, specie_sub,v_idEn)\t\t\t\t\t #URL vers Génome browser \n\t\t\tbreak \n\treturn result,url\t\t\t\t\t\t\t\t\n\n\n#------------------------------------------------------Orthologue----------------------------------------------------------------------------------------------\n\ndef Orthologs(v_idEn, specie_sub) : \n\tfor idens in v_idEn : \t\t\t\t\t\n\t\tlink = \" ortholog \"\t\t\t#lien ortholog\n\treturn link\t\t\t\t\t\t\t\t\n","sub_path":"Ensembl.py","file_name":"Ensembl.py","file_ext":"py","file_size_in_byte":4419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"542212233","text":"from __future__ import division\nfrom jinja2.runtime import LoopContext, TemplateReference, Macro, Markup, TemplateRuntimeError, missing, concat, escape, markup_join, unicode_join, to_string, identity, TemplateNotFound\ndef run(environment):\n name = 'source/layouts/lite.html'\n\n def root(context, environment=environment):\n parent_template = None\n l_page = context.resolve('page')\n if 0: yield None\n if (not environment.getattr(l_page, 'standalone')):\n if 0: yield None\n if parent_template is None:\n yield u' \\n\\t'\n parent_template = environment.get_template('core/__base_web.html', 'source/layouts/lite.html')\n for name, parent_block in parent_template.blocks.iteritems():\n context.blocks.setdefault(name, []).append(parent_block)\n if parent_template is None:\n yield u'\\n'\n if parent_template is None:\n yield u'\\n\\n'\n if parent_template is None:\n for event in context.blocks['body_class'][0](context):\n yield event\n if parent_template is None:\n yield u'\\n\\n'\n if parent_template is None:\n for event in context.blocks['superbar'][0](context):\n yield event\n if parent_template is None:\n yield u'\\n\\n'\n if parent_template is None:\n for event in context.blocks['content'][0](context):\n yield event\n if parent_template is None:\n yield u'\\n\\n\\n'\n if parent_template is None:\n for event in context.blocks['superfooter'][0](context):\n yield event\n if parent_template is not None:\n for event in parent_template.root_render_func(context):\n yield event\n\n def block_content_body_class(context, environment=environment):\n if 0: yield None\n yield u'content_body'\n\n def block_infoNotice(context, environment=environment):\n l_page = context.resolve('page')\n if 0: yield None\n yield u'\\n\\t\\t\\t\\t'\n if environment.getattr(environment.getattr(l_page, 'elements'), 'infoNotice'):\n if 0: yield None\n yield u'\\n\\t\\t\\t\\t\\t%s\\n\\t\\t\\t\\t' % (\n environment.getattr(environment.getattr(l_page, 'elements'), 'infoNotice'), \n )\n yield u'\\n\\t\\t\\t'\n\n def block_superfooter(context, environment=environment):\n l_link = context.resolve('link')\n if 0: yield None\n yield u'\\n
\\n\\t
\\n\\t\\t©2011 political momentum (about - legal - help)\\n\\t
\\n\\n\\n\\t
\\n\\t\\t
\\n\\t
\\n\\n\\t
\\n\\t
\\n
\\n' % (\n context.call(l_link, 'about:landing'), \n context.call(l_link, 'legal:landing'), \n context.call(l_link, 'help:landing'), \n )\n\n def block_successNotice(context, environment=environment):\n l_page = context.resolve('page')\n if 0: yield None\n yield u'\\n\\t\\t\\t\\t'\n if environment.getattr(environment.getattr(l_page, 'elements'), 'successNotice'):\n if 0: yield None\n yield u'\\n\\t\\t\\t\\t\\t%s\\n\\t\\t\\t\\t' % (\n environment.getattr(environment.getattr(l_page, 'elements'), 'successNotice'), \n )\n yield u'\\n\\t\\t\\t'\n\n def block_content_footer_class(context, environment=environment):\n if 0: yield None\n yield u'content_footer'\n\n def block_body_class(context, environment=environment):\n if 0: yield None\n yield u'fcm-lite fcm-dark'\n\n def block_notice(context, environment=environment):\n l_page = context.resolve('page')\n if 0: yield None\n yield u'\\n\\t\\t\\t\\t'\n if environment.getattr(environment.getattr(l_page, 'elements'), 'generalNotice'):\n if 0: yield None\n yield u'\\n\\t\\t\\t\\t\\t%s\\n\\t\\t\\t\\t' % (\n environment.getattr(environment.getattr(l_page, 'elements'), 'generalNotice'), \n )\n yield u'\\n\\t\\t\\t'\n\n def block_sidebars(context, environment=environment):\n if 0: yield None\n yield u'\\n'\n\n def block_content(context, environment=environment):\n l_page = context.resolve('page')\n if 0: yield None\n yield u'\\n
\\n\\n'\n if (not environment.getattr(l_page, 'standalone')):\n if 0: yield None\n yield u\"\\n\\t
\\n\\n\\t
\\n\\t\\t
\\n\\t\\t\\t\"\n for event in context.blocks['errorNotice'][0](context):\n yield event\n yield u\"\\n\\t\\t
\\n\\n\\t\\t
\\n\\t\\t\\t\"\n for event in context.blocks['infoNotice'][0](context):\n yield event\n yield u\"\\n\\t\\t
\\n\\n\\t\\t
\\n\\t\\t\\t\"\n for event in context.blocks['notice'][0](context):\n yield event\n yield u\"\\n\\t\\t
\\n\\t\\t\\n\\t\\t
\\n\\t\\t\\t\"\n for event in context.blocks['successNotice'][0](context):\n yield event\n yield u'\\n\\t\\t
\\n\\t
\\n\\n'\n else:\n if 0: yield None\n yield u\"\\n\\t
\\n\"\n yield u'\\n\\n\\t'\n for event in context.blocks['content_body'][0](context):\n yield event\n yield u'\\n\\n\\t'\n if (not environment.getattr(l_page, 'standalone')):\n if 0: yield None\n yield u'\\n\\t\\t'\n if environment.getattr(l_page, 'watermark'):\n if 0: yield None\n yield u'\\n\\t\\t\\t'\n template = environment.get_template('snippets/dev_widget.html', 'source/layouts/lite.html')\n for event in template.root_render_func(template.new_context(context.parent, True, locals())):\n yield event\n yield u'\\n\\t\\t'\n yield u'\\n\\t'\n yield u'\\n\\n
\\n\\n'\n for event in context.blocks['sidebars'][0](context):\n yield event\n yield u'\\n\\n'\n if (not environment.getattr(l_page, 'standalone')):\n if 0: yield None\n yield u\"\\n\\t
\\n\\t\\t\"\n for event in context.blocks['content_footer'][0](context):\n yield event\n yield u'\\n\\t
\\n'\n yield u'\\n\\n
\\n'\n\n def block_superbar(context, environment=environment):\n if 0: yield None\n yield u'\\n'\n template = environment.get_template('snippets/litebar.html', 'source/layouts/lite.html')\n for event in template.root_render_func(template.new_context(context.parent, True, locals())):\n yield event\n yield u'\\n'\n\n def block_errorNotice(context, environment=environment):\n l_page = context.resolve('page')\n if 0: yield None\n yield u'\\n\\t\\t\\t\\t'\n if environment.getattr(environment.getattr(l_page, 'elements'), 'errorNotice'):\n if 0: yield None\n yield u'\\n\\t\\t\\t\\t\\t%s\\n\\t\\t\\t\\t' % (\n environment.getattr(environment.getattr(l_page, 'elements'), 'errorNotice'), \n )\n yield u'\\n\\t\\t\\t'\n\n def block_content_body(context, environment=environment):\n if 0: yield None\n yield u'\\n\\t'\n\n def block_content_footer(context, environment=environment):\n if 0: yield None\n yield u'\\n\\t\\t'\n\n blocks = {'content_body_class': block_content_body_class, 'infoNotice': block_infoNotice, 'superfooter': block_superfooter, 'successNotice': block_successNotice, 'content_footer_class': block_content_footer_class, 'body_class': block_body_class, 'notice': block_notice, 'sidebars': block_sidebars, 'content': block_content, 'superbar': block_superbar, 'errorNotice': block_errorNotice, 'content_body': block_content_body, 'content_footer': block_content_footer}\n debug_info = '1=10&2=14&5=22&7=27&11=32&80=37&15=43&27=47&28=51&29=54&80=58&83=62&43=67&44=71&45=74&70=78&5=82&35=86&36=90&37=93&66=97&11=101&14=105&15=108&18=111&19=115&26=118&27=122&34=125&35=129&42=132&43=136&55=143&58=146&59=149&60=152&66=158&69=161&70=164&71=167&7=172&8=175&19=180&20=184&21=187&55=191&71=195'\n return locals()","sub_path":"app/templates/compiled/layouts/lite.py","file_name":"lite.py","file_ext":"py","file_size_in_byte":9801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"361835908","text":"#!/usr/bin/env python3\nimport argparse\nimport fnmatch\nimport os\nimport re\nimport sys\n\nLICENSE = \"\"\"//------------------------------------------------------------------------------\n//\n// Copyright 2018 Fetch.AI Limited\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n//\n//------------------------------------------------------------------------------\n\"\"\"\n\nLICENSE_SEPARATOR = \"\"\"\n\"\"\"\n\nFULL_LICENSE = LICENSE + LICENSE_SEPARATOR\n\nPROJECT_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\nAPP_FOLDERS = ('apps', 'lib')\nFILE_PATTERS = ('*.cpp', '*.hpp')\n\n\ndef parse_commandline():\n parser = argparse.ArgumentParser()\n parser.add_argument('--fix', action='store_true', help='Enable updating of source files')\n return parser.parse_args()\n\n\ndef project_source_files():\n for folder in APP_FOLDERS:\n folder_root = os.path.join(PROJECT_ROOT, folder)\n for root, _, files in os.walk(folder_root):\n for pattern in FILE_PATTERS:\n for filename in fnmatch.filter(files, pattern):\n file_path = os.path.abspath(os.path.join(root, filename))\n yield file_path\n\n\ndef read_file(path):\n with open(path, 'r', encoding='utf-8') as input_file:\n return input_file.read()\n\n\ndef check_source_file(path):\n contents = read_file(path)\n\n if FULL_LICENSE not in contents:\n print('License missing from:', os.path.relpath(path, PROJECT_ROOT))\n return False\n\n return True\n\n\ndef update_source_file(path):\n\n contents = read_file(path)\n\n # do not bother processing files which have the license\n if FULL_LICENSE in contents:\n return\n\n # update the contents of the file\n if path.endswith('.cpp'):\n if LICENSE in contents:\n contents = contents.replace(LICENSE, FULL_LICENSE)\n else:\n contents = FULL_LICENSE + contents\n\n elif path.endswith('.hpp'):\n if LICENSE in contents:\n contents = re.sub(r'#pragma once\\s+' + LICENSE, '#pragma once\\n' + FULL_LICENSE, contents)\n else:\n contents = re.sub(r'#pragma once\\s+', '#pragma once\\n' + FULL_LICENSE, contents)\n\n #contents = contents.replace('#pragma once\\n', '#pragma once\\n' + LICENSE)\n\n else:\n print('Unable to update file: ', os.path.relpath(path, PROJECT_ROOT))\n return False\n\n if FULL_LICENSE not in contents:\n print('Unable to apply update to file:', os.path.relpath(path, PROJECT_ROOT))\n\n # update the contents of the file\n with open(path, 'w', encoding='utf-8') as output_file:\n output_file.write(contents)\n\n return True\n\n\ndef main():\n args = parse_commandline()\n\n if args.fix:\n routine = update_source_file\n else:\n routine = check_source_file\n\n results = list(map(routine, project_source_files()))\n\n if not all(results):\n sys.exit(1)\n\nif __name__ == '__main__':\n main()\n","sub_path":"oef-core/scripts/check-license-header.py","file_name":"check-license-header.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"32719554","text":"# -*- coding: utf-8 -*-\nfrom config import db\n\n\nclass OrderAidance(db.Model):\n __tablename__ = \"boss_order_aidance\"\n\n id = db.Column(db.Integer, primary_key=True, nullable=False)\n task_id = db.Column(db.Integer)\n service_no = db.Column(db.String(64))\n pre_order_no = db.Column(db.String(64))\n create_person = db.Column(db.String(20))\n create_time = db.Column(db.DateTime)\n execute_person = db.Column(db.String(20))\n execute_time = db.Column(db.DateTime)\n is_done = db.Column(db.SmallInteger)\n\n def __init__(self, task_id, service_no, pre_order_no, create_person, create_time, execute_person, execute_time,\n is_done):\n '''Constructor'''\n self.task_id = task_id\n self.service_no = service_no\n self.pre_order_no = pre_order_no\n self.create_person = create_person\n self.create_time = create_time\n self.execute_person = execute_person\n self.execute_time = execute_time\n self.is_done = is_done\n\n def __repr__(self):\n return 'id : %s' % self.id\n\n\n# Client and database attributes dictionary\nclinetHead = ['id', 'taskId', 'serviceNo', 'preOrderNo', 'createPerson', 'createTime', 'executePerson', 'executeTime',\n 'isDone']\nOrderAidanceChangeDic = {\n \"id\": \"id\",\n \"taskId\": \"task_id\",\n \"serviceNo\": \"service_no\",\n \"preOrderNo\": \"pre_order_no\",\n \"createPerson\": \"create_person\",\n \"createTime\": \"create_time\",\n \"executePerson\": \"execute_person\",\n \"executeTime\": \"execute_time\",\n \"isDone\": \"is_done\"\n}\n\nintList = ['id', 'taskId', 'isDone']\n\n# db.create_all()\n","sub_path":"boss_service/models/Boss/OrderAidance.py","file_name":"OrderAidance.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"538438547","text":"import matplotlib.pyplot as plt\nfrom tapy import Indicators\nimport numpy as np\nimport pandas as pd\nfrom calculate_portfolio import calculate_portfolio\n\n\ndef demarker_strategy(obj, df_og):\n \"\"\"\n strategy: if demarker goes above 70 sell short\n if demarker goes below 30 buy long \n obj.period\n \"\"\"\n prepare_params(obj)\n obj.indicator = \"tapy\"\n for param in obj.tested_params:\n df = df_og.copy()\n df[\"param\"] = param\n print(\"param\", param)\n print(\"df param\", df)\n df = Indicators(df)\n df.de_marker(period=param, column_name=\"dem\")\n df = df.df\n df.fillna(0.0, inplace=True)\n long = False\n short = False\n print(\"df\", df)\n df[\"signals\"] = 0.0\n for i in range(len(df)):\n if df[\"dem\"][i] != 0.0:\n if df[\"dem\"][i] > 0.70 and short == False:\n df[\"signals\"][i] = -1.0\n short = True\n long = False\n if df[\"dem\"][i] < 0.30 and long == False:\n df[\"signals\"][i] = 1.0\n long = True\n short = False\n calculate_portfolio(obj, df, \"tapy\")\n\n\ndef prepare_params(obj):\n if isinstance(obj.period, list) or isinstance(obj.period, range):\n obj.tested_params = [a for a in obj.period]\n else:\n obj.tested_params.append(obj.period)\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"demarker_strat.py","file_name":"demarker_strat.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"403898011","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n# Fixing random state for reproducibility\nnp.random.seed(19680801)\n\n\nplt.rcdefaults()\nfig, ax = plt.subplots()\n\n# Example data\npeople = ('Proposed \\n(3-gram terms)', 'Subba B. et al. [30] \\n(Top 25% 3-gram terms)', 'Borisaniya et al. [29]')\ny_pos = np.arange(len(people))\nno_terms = [550, 1454, 7920]\nwidth = 0.5\n\nax.barh(y_pos, no_terms, width, align='center', edgecolor='black', color='#C0C0C0', zorder=3)\nax.set_yticks(y_pos)\nax.set_yticklabels(people)\nax.invert_yaxis() # labels read top-to-bottom\nax.set_xlabel('Feature count', fontsize=14, labelpad=5)\nax.set_ylabel('HIDS frameworks', fontsize=14)\nax.set_xlim([0,9000])\nax.grid(zorder=0,linestyle='--', linewidth=0.5, color='#D4D4D4')\n\n# for i, v in enumerate(no_terms):\n# ax.text(v + 10, i, str(v))\n\nplt.show()\n","sub_path":"scripts/feature_count_plot.py","file_name":"feature_count_plot.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"174281696","text":"from classes.stack import Stack\nfrom classes.graph import Graph\nfrom classes.weightedDigraph import WeightedDigraph\n\ndef build_graph_from_file(path):\n file = open(path, \"r\")\n lines = [line.rstrip('\\n') for line in file]\n file.close()\n\n graph_size = int(lines[0])\n edges_size = int(lines[1])\n edges = lines[2:]\n\n if len(edges) != edges_size:\n print(\"Podano niewłaściwą liczbę krawędzi - %d\" % len(edges))\n return\n\n graph = Graph(graph_size)\n\n for edge in edges:\n edge = edge.split()\n if len(edge) != 2:\n print(\"\\n\\nNie utworzono grafu\")\n print(\"Nieprawidłowa linijka: \", edge)\n return\n vertex_from = int(edge[0]) - 1\n vertex_to = int(edge[1]) - 1\n graph.addEdge(vertex_from, vertex_to)\n\n graph.toString()\n return graph\n\n\ndef build_bipartite_graph_from_file(path):\n file = open(path, \"r\")\n lines = [line.rstrip('\\n') for line in file]\n file.close()\n\n graph_size = int(lines[0])\n edges_size = int(lines[1])\n edges = lines[2:]\n vertexes_left = []\n vertexes_right = []\n\n if len(edges) != edges_size:\n print(\"Podano niewłaściwą liczbę krawędzi - %d\" % len(edges))\n return\n\n graph = WeightedDigraph(graph_size)\n\n # dodaj źródło\n graph.addVertex()\n source_index = graph_size\n\n #dodaj ujście\n graph.addVertex()\n sink_index = graph_size + 1\n\n for edge in edges:\n edge = edge.split()\n if len(edge) != 2:\n print(\"\\n\\nNie utworzono grafu\")\n print(\"Nieprawidłowa linijka: \", edge)\n return\n vertex_from = int(edge[0]) - 1\n if vertex_from not in vertexes_left:\n vertexes_left.append(vertex_from)\n graph.addEdge(source_index, vertex_from, 1)\n\n vertex_to = int(edge[1]) - 1\n if vertex_to not in vertexes_right:\n vertexes_right.append(vertex_to)\n graph.addEdge(vertex_from, vertex_to, 1)\n graph.addEdge(vertex_to, sink_index, 1)\n\n graph.add_vertexes_left(vertexes_left)\n graph.add_vertexes_right(vertexes_right)\n\n graph.toString()\n return graph\n\ndef is_connected(graph):\n size = graph.getSize()\n matrix = graph.getAdjacencyMatrix()\n\n stack = Stack()\n visited_bool = [False for i in range(size)]\n components = []\n components_amount = 0\n\n for vertex in range(size):\n if visited_bool[vertex]:\n continue\n else:\n visited = []\n visited = dfs_components(vertex + 1, matrix, stack, visited, visited_bool)\n components.append(visited)\n components_amount += 1\n\n print(\"\\nComponents: \", components, \"\\nComponents amount: \", components_amount)\n\n if components_amount > 1:\n print(\"Graf nie jest spójny\\n\")\n output = False\n else:\n print(\"Graf jest spójny\\n\")\n output = True\n\n return output\n\n\ndef dfs_components(current_vertex, matrix, stack, visited, visited_bool):\n connections = matrix[current_vertex - 1]\n\n if not visited_bool[current_vertex - 1]:\n # print(\"Odwiedzenie \", current_vertex, \", odwiedzone: \", visited)\n visited_bool[current_vertex - 1] = True\n visited.append(current_vertex)\n stack.push(current_vertex)\n\n for i in range(len(connections)):\n if i != current_vertex - 1 and connections[i] != 0 and not visited_bool[i]:\n return dfs_components(i + 1, matrix, stack, visited, visited_bool)\n stack.pop()\n\n if stack.is_empty():\n return visited\n\n return dfs_components(stack.last_element(), matrix, stack, visited, visited_bool)\n\n\ndef is_bipartite(matrix, size, colored_verticles):\n colors = [0 for i in range(size)]\n queue = []\n\n for vertex in range(size):\n if colors[vertex] != 0:\n continue\n colors[vertex] = 1\n queue.append(vertex)\n while queue:\n queue_vertex = queue.pop(0)\n connections = matrix[queue_vertex]\n for index, value in enumerate(connections):\n if value == 1:\n if colors[index] != 0:\n continue\n if colors[index] == colors[queue_vertex]:\n # print(\"Index; \", index, \" Vertex: \", queue_vertex)\n print(\"Nie jest dwudzielny\")\n colored_verticles.append(colors)\n return False\n colors[index] = colors[queue_vertex] * -1\n queue.append(index)\n print(\"Jest dwudzielny\")\n colored_verticles.append(colors)\n return True\n\ndef build_bipartite_graph(matrix, size, vertexes_left, vertexes_right):\n # modyfikacja grafu na podstawie kolorowania\n directed_graph = WeightedDigraph(size)\n # dodanie źródła\n directed_graph.addVertex()\n source_index = size\n # dodanie ujścia\n directed_graph.addVertex()\n sink_index = size + 1\n\n for vertex in vertexes_left:\n # dodawanie krawędzi od źródła do wierzchołków po lewej\n directed_graph.addEdge(source_index, vertex - 1, 1)\n print(\"Dodawanie krawędzi \", source_index + 1, vertex)\n vertex_connections = matrix[vertex - 1]\n for index, value in enumerate(vertex_connections):\n if value != 0:\n # łączenie wierzchołków po lewej stronie z tymi po prawej\n directed_graph.addEdge(vertex - 1, index, 1)\n print(\"Dodawanie krawędzi \", vertex, index + 1)\n for vertex in vertexes_right:\n # dodawanie krawędzi od wierzchołków po prawej do ujścia\n directed_graph.addEdge(vertex - 1, sink_index, 1)\n print(\"Dodawanie krawędzi \", vertex, sink_index + 1)\n\n return directed_graph\n\n\ndef find_max_match(graph):\n\n # wprowadzenie zwykłego grafu\n if graph.getType() == \"graph\":\n\n matrix = graph.getAdjacencyMatrix()\n size = graph.getSize()\n\n colors = []\n #kolorowanie\n if not is_bipartite(matrix, size, colors):\n colors = colors[0]\n print(colors)\n return\n\n colors = colors[0]\n print(colors)\n\n vertexes_left = []\n vertexes_right = []\n for index, value in enumerate(colors):\n if value == 1:\n vertexes_left.append(index + 1)\n else:\n vertexes_right.append(index + 1)\n print(\"\\nWierzchołki z lewej: \", vertexes_left)\n print(\"Wierzchołki z prawej: \", vertexes_right)\n print()\n\n directed_graph = build_bipartite_graph(matrix, size, vertexes_left, vertexes_right)\n dir_matrix = directed_graph.getAdjacencyMatrix()\n dir_size = directed_graph.getSize()\n # wprowadzenie grafu już podzielonego\n else:\n directed_graph = graph\n dir_matrix = directed_graph.getAdjacencyMatrix()\n dir_size = directed_graph.getSize()\n vertexes_right = directed_graph.get_vertexes_right()\n vertexes_left = directed_graph.get_vertexes_left()\n\n print(\"Utworzona macierz: \")\n for row in dir_matrix:\n print(row)\n\n source = dir_size - 1\n sink = dir_size\n parents = [-1 for i in range(dir_size)]\n max_flow = 0\n max_matching = [-1 for i in range(dir_size)]\n\n while BFS_maximum_flow(dir_matrix, source, sink, parents):\n min_path_flow = float(\"Inf\")\n\n # szukamy minimalną wartość w ścieżce przepływu\n vertex = sink - 1\n while vertex != source - 1:\n min_path_flow = min(min_path_flow, dir_matrix[parents[vertex]][vertex])\n vertex = parents[vertex]\n\n max_flow += min_path_flow\n\n vertex = sink - 1\n while vertex != source - 1:\n ver_parent = parents[vertex]\n # zapisywanie skojarzeń\n max_matching[vertex] = ver_parent\n # zmianiamy wartości w macierzy sąsiedztwa\n dir_matrix[ver_parent][vertex] -= min_path_flow\n dir_matrix[vertex][ver_parent] += min_path_flow\n vertex = parents[vertex]\n\n print(\"Parents: \", parents)\n print(\"Max matching list: \", max_matching)\n\n print(\"Maksymalny przepływ: \", max_flow)\n\n # budowanie maksymalnego skojarzenia na podstawie wartości z tablicy parents na indeksach wierzchołków po prawej\n max_matching_edges = []\n for vertex in vertexes_right:\n vertex_from = max_matching[vertex - 1] + 1\n if vertex_from not in vertexes_left:\n continue\n vertex_to = vertex\n max_matching_edges.append([vertex_from, vertex_to])\n\n sorted_max_matching_edges = sorted(max_matching_edges, key = lambda x: x[0])\n\n print(\"\\n\")\n print(\"Maksymalne skojarzenie: \")\n print(sorted_max_matching_edges)\n\n return sorted_max_matching_edges\n\n\ndef BFS_maximum_flow(matrix, s, t, parent):\n size = len(matrix)\n visited = [False for i in range(size)]\n\n source = s - 1\n sink = t - 1\n\n queue = [source]\n visited[source] = True\n\n print(\"\\nPrzejście BFS:\")\n\n while queue:\n vertex = queue.pop(0)\n\n for index, value in enumerate(matrix[vertex]):\n if not visited[index] and value > 0:\n queue.append(index)\n visited[index] = True\n parent[index] = vertex\n print(\"Rodzic: \", vertex, \" Wierzchołek: \", index)\n if visited[sink]:\n return True\n\n return False\n","sub_path":"zad5/methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":9430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"629737549","text":"# coding=utf-8\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport constructores as cn\n\nnodos = [(0,0), (0,8), (8,0), (8,8), (16,0), (16,8)]\n\nbarras = [(nodos[0], nodos[1]), (nodos[2], nodos[3]), (nodos[4],nodos[5]), (nodos[1], nodos[3]), (nodos[3],nodos[5]), (nodos[0],nodos[2]), (nodos[2],nodos[4]), (nodos[0],nodos[3]), (nodos[2],nodos[5])]\n\n\nclass Barra:\n \"\"\"\n Representación lógica de las propiedades geometricas de una barra.\n Necesita de un punto inicial y un punto final.\n \"\"\"\n def __init__(self, xs, ys):\n self.xn1, self.xn2 = xs\n self.yn1, self.yn2 = ys\n self.largo = np.sqrt((self.xn2 - self.xn1)**2 + (self.yn2 - self.yn1)**2)\n self.angulo = np.arctan((self.yn2 - self.yn1) / (self.xn2 - self.xn1))\n self.nodo1 = (self.xn1, self.yn1)\n self.nodo2 = (self.xn2, self.yn2)\n\n def get_len(self, d, eje):\n \"\"\"\n Devuelve la longitud con respecto a un eje dada una longitud 'd' sobre la barra.\n \"\"\"\n if eje == 'x':\n return d / (np.cos(self.angulo))\n elif eje == 'y':\n return d / (np.sin(self.angulo))\n else:\n raise IOError\n \n def get_y(self, x):\n return x * np.tan(self.angulo)\n \n def get_x(self, y):\n return y / np.tan(self.angulo)\n\n def get_nodo1(self):\n return self.nodo1\n\n def get_nodo2(self):\n return self.nodo2\n\n \nclass Esfuerzo:\n \"\"\"\"\n Define de forma lógica los esfuerzos.\n \n Tipos de esfuerzos:\n - 'pn' = Esfuerzo puntual sobre nodo.\n - 'pb' = Esfuerzo puntual sobre barra.\n - 'd' = Esfuerzo distribuido.\n \n Aplicacion: Its form depends on the type of effort.\n - 'pn' : (bar,) Where the bar object must be the one that has the effort\n applied in its first node.\n - 'pb' : (bar, int) Any bar. Int is the distance from the node 1 to the\n point given directly, not the X distance or the Y distance.\n - 'd' : (bar, int) Where Int is the distance from the node 1. If the\n distribuited effort is applied in the whole bar, you don't put anything.\n \"\"\"\"\"\n def __init__(self, tipo, aplicacion, vector, fuerza):\n self.ubicacion = cn.ubicar_esfuerzo(tipo, aplicacion, vector, fuerza) \n self.resultante = cn.resultante(tipo, aplicacion, vector, fuerza)\n\n\n\n#apoyos = [(nodos[0], 1), (nodos[4], 2)]\n\n#esfuerzos = [(nodos[2], -150, 'y', 'pr'), (nodos[1], 50, 'x'), 'pr']\nesfuerzo_oblicuo = [()]\n\n\n\n\ndef reactions(esfuerzos, apoyos, nodos):\n #Primeros renglón de la matriz con los esfuerzos:\n sum_fuerzas_x = [x[1] if x[2] == 'x' else 0 for x in esfuerzos]\n sum_fuerzas_y = [y[1] if y[2] == 'y' else 0 for y in esfuerzos] \n \n \n renglon_mom_apoyo = []\n renglon_fuerzas_x = []\n renglon_fuerzas_y = []\n \n mom_apoyo = []\n for i, e in enumerate(esfuerzos):\n if e[2] == 'x':\n mom_apoyo.append(e[0][1]*e[1])\n else:\n mom_apoyo.append(e[0][0]*e[1])\n print(mom_apoyo)\n \n b = (-sum(sum_fuerzas_x), -sum(sum_fuerzas_y), -sum(mom_apoyo))\n print(b) \n \n #Segundo renglón de la matriz con las reacciones:\n for x in apoyos:\n if x[1] == 2:\n renglon_fuerzas_x.append(0)\n renglon_fuerzas_y.append(1)\n renglon_fuerzas_x.append(1)\n renglon_fuerzas_y.append(0) \n else:\n renglon_fuerzas_x.append(0)\n renglon_fuerzas_y.append(1)\n\n #Tercer renglón de la matriz con los momentos:\n ##Se toma el nodo 1 como centro.\n renglon_mom_apoyo = []\n\n for a in apoyos:\n if a[0] == (0,0):\n renglon_mom_apoyo.append(0)\n elif a[1] >= 1 and a[0][1] == 0:\n renglon_mom_apoyo.append(a[0][0])\n renglon_mom_apoyo.append(0)\n \n reacciones = np.linalg.solve((renglon_fuerzas_x, renglon_fuerzas_y, renglon_mom_apoyo), b)\n \n\nif __name__ == '__main__':\n print('\\n------ BARRA ------')\n barra = Barra((0, 5), (0, 0))\n barra2 = Barra((0,0),(10,0))\n print(barra.get_nodo1())\n print(barra.angulo)\n print(barra.get_len(2, 'x'))\n print(barra.get_len(3, 'y'))\n \n print('\\n------ ESFUERZOS ------')\n esfuerzo_pb = Esfuerzo('pb', (barra, 3), (0, -1), 10)\n esfuerzo_dr = Esfuerzo('d', (barra2,), (0, -1), (10, 10))\n print(esfuerzo_pb.ubicacion)\n print(esfuerzo_dr.ubicacion)\n\n\n\n\n#TODO: Continuar con desarrollo de la clase esfuerzo.\n##TODO: Testear esfuerzos luego de terminar constructores.py\n##TODO: Agregar resultante del esfuerzo.\n##TODO: Hallar expresiones analíticas para los Q distribuidos.\n\n#TODO: Agregar posibilidad de empotrados.\n","sub_path":"truss_solver.py","file_name":"truss_solver.py","file_ext":"py","file_size_in_byte":4335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"266186290","text":"from common.Common import Common\nimport asyncio,threading,time,logging,sys\nfrom datetime import datetime\n\nETF_AMOUNT = 1050\n\n\nclass Etf:\n\n def __init__(self):\n\n self._exchange = HbClient()\n self._etf_coins = dict()\n self._swap = None\n self._exchange_data = []\n self._open = []\n self._etf_config = {}\n self._thread = None\n self._etf_rate_in = -10\n self._etf_rate_out = 10\n\n def start(self):\n\n res = self._exchange.etf_config()\n p = ['HB10_USDT']\n self._etf_config['HB10_USDT'] = {'count':1}\n unit = res['unit_price']\n for i in unit:\n pair = i['coin'] + '_USDT'\n p.append(pair)\n self._etf_config[pair] = {'count': i['amount']}\n\n query = {'exchange': 'HbClient', 'pair': {'$in': p}}\n res = DB.find(Common.db_coin_pair, query)\n for i in res:\n self._etf_config[i['pair']].update({'amount_precision':i['amount_precision'],'price_precision':i['price_precision'],'min_amount':i['min_amount']})\n\n self._exchange.d_ws_sub(self,p,{Common.ws_order:'',Common.ws_depth:''})\n\n def ws_depth(self,exchange,ws_data):\n\n pair = ws_data['pair']\n buy = ws_data['buy']\n sell = ws_data['sell']\n config = self._etf_config[pair]\n count = config['count'] * ETF_AMOUNT\n\n num = 0\n for i in sell:\n if num >= count:\n buy_price = i[0]\n break\n num += i[1]\n\n num = 0\n for i in buy:\n if num >= count:\n sell_price = i[0]\n break\n num += i[1]\n\n buy_money = count * buy_price\n sell_money = count * sell_price\n\n if len(self._etf_coins) < 11:\n if pair not in self._etf_coins:\n self._etf_coins[pair] = {'coin': pair.split('_')[0], 'amount': 0, 'buy_price': buy_price,'sell_price':sell_price,\n 'count': config['count'], 'buy_money': buy_money,'sell_money':sell_money,\n 'amount_precision': config['amount_precision'],\n 'price_precision': config['price_precision'],\n 'min_amount': config['min_amount']}\n\n return\n\n data = self._etf_coins[pair]\n data.update({'buy_money': buy_money,'sell_money':sell_money})\n\n if self._swap:\n return\n\n bm = 0\n hb10_bm = 0\n sm = 0\n hb10_sm = 0\n for k, v in self._etf_coins.items():\n\n if k == 'HB10_USDT':\n hb10_bm = v['buy_money']\n hb10_sm = v['sell_money']\n else:\n bm += v['buy_money']\n sm += v['sell_money']\n\n in_rate = hb10_sm / bm - 1\n out_rate = hb10_bm / sm - 1\n\n if in_rate > self._etf_rate_in:\n logging.info(f'in---{hb10_sm}----{bm}---=={in_rate}')\n\n self._etf_rate_in = in_rate\n elif out_rate < self._etf_rate_out:\n logging.info(f'out---{hb10_bm}----{sm}---=={out_rate}')\n\n self._etf_rate_out = out_rate\n\n if in_rate > 0.0082:\n\n self._swap = 'in'\n self._exchange.d_ws_close()\n\n tasks = []\n for k, v in self._etf_coins.items():\n if k != 'HB10_USDT':\n amount = v['count'] * ETF_AMOUNT\n amount = amount / 0.997\n amount = Utility.precison_value(v['amount_precision'], amount)\n amount = amount + Utility.precision(v['amount_precision'])\n amount = Utility.precison_value(v['amount_precision'], amount)\n\n price = v['buy_price']\n price = Utility.precison_value(v['price_precision'], price)\n\n tasks.append({'coin': v['coin'], 'price': price, 'amount': amount})\n\n self.async_task(self.buy, tasks)\n\n elif out_rate < -0.0092:\n\n self._swap = 'out'\n self._exchange.d_ws_close()\n\n v = self._etf_coins['HB10_USDT']\n amount = ETF_AMOUNT / 0.996\n amount = Utility.precison_value(v['amount_precision'], amount)\n amount = amount + Utility.precision(v['amount_precision'])\n amount = Utility.precison_value(v['amount_precision'], amount)\n\n price = v['buy_price']\n price = Utility.precison_value(v['price_precision'], price)\n\n self.buy({'coin': 'HB10', 'price': price, 'amount': amount})\n\n # Utility.email_analyze(text)\n\n def ws_trade(self,exchange,ws_data):\n\n try:\n pair = ws_data['pair']\n price = ws_data['price']\n\n if len(self._etf_coins) < 11:\n\n if pair not in self._etf_coins:\n config = self._etf_config[pair]\n money = config['count'] * ETF_AMOUNT * price\n self._etf_coins[pair] = {'coin': pair.split('_')[0],'amount': 0, 'price': price, 'count': config['count'], 'money': money,'amount_precision': config['amount_precision'],'price_precision': config['price_precision'],'min_amount':config['min_amount']}\n\n return\n\n data = self._etf_coins[pair]\n money = data['count'] * ETF_AMOUNT * price\n data.update({'money': money})\n\n if self._swap:\n return\n\n money = 0\n m10 = 0\n\n for k,v in self._etf_coins.items():\n\n if k == 'HB10_USDT':\n m10 = v['money']\n else:\n money += v['money']\n\n rate = m10 / money - 1\n if rate > self._etf_rate_in:\n logging.info(f'in---{m10}----{money}---=={rate}')\n self._etf_rate_in = rate\n elif rate < self._etf_rate_out:\n logging.info(f'out---{m10}----{money}---=={rate}')\n self._etf_rate_out = rate\n\n if rate > 0.0082:\n\n self._swap = 'in'\n self._exchange.d_ws_close()\n\n tasks = []\n for k, v in self._etf_coins.items():\n if k != 'HB10_USDT':\n amount = v['count'] * ETF_AMOUNT\n amount = amount / 0.997\n amount = Utility.precison_value(v['amount_precision'], amount)\n amount = amount + Utility.precision(v['amount_precision'])\n amount = Utility.precison_value(v['amount_precision'], amount)\n\n price = v['price']\n price = Utility.precison_value(v['price_precision'], price)\n\n tasks.append({'coin': v['coin'], 'price': price, 'amount': amount})\n\n self.async_task(self.buy, tasks)\n\n elif rate < -0.0092:\n\n self._swap = 'out'\n self._exchange.d_ws_close()\n\n v = self._etf_coins['HB10_USDT']\n amount = ETF_AMOUNT / 0.996\n amount = Utility.precison_value(v['amount_precision'], amount)\n amount = amount + Utility.precision(v['amount_precision'])\n amount = Utility.precison_value(v['amount_precision'], amount)\n\n price = v['price']\n price = Utility.precison_value(v['price_precision'], price)\n\n self.buy({'coin': 'HB10', 'price': price, 'amount': amount})\n\n #Utility.email_analyze(text)\n\n except Exception as e:\n logging.error(f'{sys._getframe().f_code.co_name}---{e}')\n\n def ws_order(self,exchange,ws_data):\n\n logging.info(f'{sys._getframe().f_code.co_name}---{ws_data}')\n\n pair = ws_data['pair']\n\n if pair not in self._etf_coins:\n return\n\n if ws_data['status'] == Common.status_open:\n\n data = {'pair':pair,'order_id': ws_data['order_id'], 'money': ws_data['money'], 'amount': ws_data['amount'], 'unfee_amount': ws_data['unfee_amount'],'side':ws_data['side'],'price':ws_data['price']}\n self._open.append(data)\n self.open_order()\n\n elif ws_data['status'] == Common.status_filled:\n\n for i in self._open:\n if i['pair'] == pair:\n self._open.remove(i)\n break\n\n if self._swap == 'in':\n\n self._exchange_data.append({'pair': pair, 'money': ws_data['money']})\n\n if len(self._exchange_data) == 10:\n\n logging.debug(333333333333333333333)\n #Utility.email_analyze('333333333333333333333')\n\n self._swap = 'swap_in'\n self.swap_in(ETF_AMOUNT)\n\n v = self._etf_coins['HB10_USDT']\n #price = v['price']\n price = v['sell_price']\n price = Utility.precison_value(v['price_precision'], price)\n\n self.sell({'coin': 'HB10', 'price': price, 'amount': ETF_AMOUNT})\n\n elif self._swap == 'swap_in':\n\n logging.debug(f'4444444444444444444444,{self._exchange_data}')\n\n #Utility.email_analyze('4444444444444444444444')\n\n money = 0\n for i in self._exchange_data:\n money += i['money']\n\n profit_money = ws_data['money'] - money\n rate = '{:.2%}'.format(profit_money / money)\n\n data = {'swap':'in','coins':self._exchange_data,'coins_money':money,'hb10_money':ws_data['money'],'profit_money':profit_money,'rate':rate,'datetime':datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n DB.insert_one(Common.db_etf, data)\n\n self._exchange_data.clear()\n self._etf_coins.clear()\n self._swap = None\n\n self._exchange.d_ws_resub\n\n # t = threading.Timer(300, self._exchange.d_ws_resub)\n # t.start()\n\n elif self._swap == 'out':\n\n logging.debug(555555555555555)\n #Utility.email_analyze('555555555555555')\n\n self._swap = 'swap_out'\n amount = int(ETF_AMOUNT / 0.996 * 0.998)\n self.swap_out(amount)\n self._hb10_money = ws_data['money']\n\n time.sleep(0.1)\n tasks = []\n for k, v in self._etf_coins.items():\n if k != 'HB10_USDT':\n amount = v['count'] * ETF_AMOUNT\n amount = Utility.precison_value(v['amount_precision'], amount)\n\n price = v['sell_price']\n price = Utility.precison_value(v['price_precision'], price)\n\n tasks.append({'coin': v['coin'], 'price': price, 'amount': amount})\n\n self.async_task(self.sell, tasks)\n\n elif self._swap == 'swap_out':\n\n logging.debug(66666666666666)\n\n #Utility.email_analyze('66666666666666')\n\n self._exchange_data.append({'pair': pair, 'money': ws_data['money']})\n\n if len(self._exchange_data) == 10:\n\n money = 0\n for i in self._exchange_data:\n money += i['money']\n\n profit_money = money - self._hb10_money\n rate = '{:.2%}'.format(profit_money / self._hb10_money)\n\n data = {'swap': 'out', 'coins': self._exchange_data, 'coins_money': money,'hb10_money': self._hb10_money,'profit_money':profit_money, 'rate': rate,'datetime':datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n DB.insert_one(Common.db_etf, data)\n\n self._exchange_data.clear()\n self._etf_coins.clear()\n self._swap = None\n\n self._exchange.d_ws_resub\n\n # t = threading.Timer(300, self._exchange.d_ws_resub)\n # t.start()\n\n elif ws_data['status'] == Common.status_part_filled:\n\n flag = True\n for i in self._open:\n if i['pair'] == pair:\n i.update({'unfee_amount': ws_data['unfee_amount']})\n flag = False\n break\n\n if flag:\n data = {'pair': pair, 'order_id': ws_data['order_id'], 'money': ws_data['money'],'amount': ws_data['amount'], 'unfee_amount': ws_data['unfee_amount'], 'side': ws_data['side'],'price': ws_data['price']}\n self._open.append(data)\n self.open_order()\n\n elif ws_data['status'] == Common.status_canceled or ws_data['status'] == Common.status_part_canceled:\n for i in self._open:\n if i['pair'] == pair:\n self._open.remove(i)\n break\n\n def ws_account(self,exchange,ws_data):\n\n logging.info(ws_data)\n\n def buy(self,data):\n logging.info(f'etf_buy---{data}')\n try:\n self._exchange.buy(data['coin'],'USDT', data['price'], data['amount'])\n except Exception as e:\n logging.error(f'{sys._getframe().f_code.co_name}---{e}')\n\n def sell(self, data):\n logging.info(f'etf_sell---{data}')\n try:\n self._exchange.sell(data['coin'], 'USDT', data['price'], data['amount'])\n except Exception as e:\n logging.error(f'{sys._getframe().f_code.co_name}---{e}')\n\n def swap_in(self, amount):\n try:\n self._exchange.swap_in(amount)\n except Exception as e:\n logging.error(f'{sys._getframe().f_code.co_name}---{e}')\n\n def swap_out(self, amount):\n try:\n self._exchange.swap_out(amount)\n except Exception as e:\n logging.error(f'{sys._getframe().f_code.co_name}---{e}')\n\n def async_task(self,func,execute_data):\n\n async def execute(data):\n await loop.run_in_executor(None, func, data)\n\n tasks = []\n\n for i in execute_data:\n tasks.append(execute(i))\n\n loop = asyncio.new_event_loop()\n #asyncio.set_event_loop(loop)\n loop.run_until_complete(asyncio.wait(tasks))\n loop.close()\n\n def open_order(self):\n\n def execute(event):\n\n event.wait(60)\n\n while len(self._open) > 0:\n\n for i in self._open:\n\n pair = i['pair']\n etf_coin = self._etf_coins[pair]\n min_amount = etf_coin['min_amount']\n amount = i['amount'] - i['unfee_amount']\n amount = Utility.precison_value(etf_coin['amount_precision'], amount)\n\n if amount >= min_amount:\n\n coin,fund = pair.split('_')\n res = self._exchange.depth(coin, fund)\n\n if i['side'] == Common.trade_side_buy:\n # price = res['buy'][9][0]\n # if price > i['price']:\n # self._exchange.cancelOrder(coin, fund, i['order_id'])\n # event.wait(0.3)\n # price = res['buy'][4][0]\n # self.buy({'coin': coin, 'price': price, 'amount': amount})\n\n self._exchange.cancelOrder(coin, fund, i['order_id'])\n event.wait(0.3)\n price = res['sell'][1][0]\n self.buy({'coin': coin, 'price': price, 'amount': amount})\n\n else:\n # price = res['sell'][9][0]\n # if price < i['price']:\n # self._exchange.cancelOrder(coin, fund, i['order_id'])\n # event.wait(0.3)\n # price = res['sell'][4][0]\n # self.sell({'coin': coin, 'price': price, 'amount': amount})\n\n self._exchange.cancelOrder(coin, fund, i['order_id'])\n event.wait(0.3)\n price = res['buy'][1][0]\n self.sell({'coin': coin, 'price': price, 'amount': amount})\n\n if len(self._open) > 0:\n event.wait(60)\n\n self._thread = None\n\n if not self._thread:\n\n event = threading.Event()\n self._thread = threading.Thread(target=execute,args=(event,))\n self._thread.setDaemon(True)\n self._thread.start()","sub_path":"strategy/etf.py","file_name":"etf.py","file_ext":"py","file_size_in_byte":16646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"36804907","text":"# Copyright (C) 2013 Alex Nitz\n#\n# This program is free software; you can redistribute it and/or modify it\n# under the terms of the GNU General Public License as published by the\n# Free Software Foundation; either version 3 of the License, or (at your\n# option) any later version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General\n# Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n\n#\n# =============================================================================\n#\n# Preamble\n#\n# =============================================================================\n#\n\"\"\"This module contains a legacy wrapper to use the FindChirpSPTemplate approximant\n\"\"\"\nimport lalsimulation\nimport numpy\nimport lal\nimport pycbc\nimport pycbc.pnutils\nfrom math import sqrt\nfrom pycbc.types import Array, zeros, complex64, float32, FrequencySeries, complex128\nfrom pycbc.waveform.spa_tmplt import spa_tmplt_precondition, spa_amplitude_factor, spa_tmplt\n\ndef findchirp_template(**p):\n import lalinspiral\n m1 = p['mass1']\n m2 = p['mass2']\n s1z = p['spin1z']\n s2z = p['spin2z']\n\n M = m1 + m2\n mc, et = pycbc.pnutils.mass1_mass2_to_mchirp_eta(m1, m2)\n\n m_sec = M * lal.MTSUN_SI;\n piM = lal.PI * m_sec;\n kmin = int(p['f_lower'] / float(p['delta_f']))\n vISCO = 1. / sqrt(6.)\n fISCO = vISCO * vISCO * vISCO / piM;\n kmax = int(fISCO / p['delta_f'])\n\n if 'out' in p:\n n = len(p['out'])\n else:\n n = kmax\n\n N = (n-1)*2\n delta_t = 1.0 / (p['delta_f'] * N)\n\n amp_factor = spa_amplitude_factor(mass1=m1, mass2=m2) / p['distance']\n\n fctmplt = lalinspiral.FindChirpTemplate()\n fctmplt.data = zeros(n, dtype=complex64).lal()\n\n tmplt = lalinspiral.InspiralTemplate()\n tmplt.totalMass = M\n tmplt.eta = et\n tmplt.mu = m1*m2 / M\n tmplt.spin1[2] = s1z\n tmplt.spin2[2] = s2z\n tmplt.fFinal = fISCO\n\n params = lalinspiral.FindChirpTmpltParams()\n\n vec = numpy.arange(0, n, 1)\n vec2 = numpy.zeros(n, dtype=float32)\n vec2[1:] = vec[1:] ** (-1.0/3.0)\n params.xfacVec = Array(vec2, dtype=float32).lal()\n\n params.approximant=lalsimulation.FindChirpSP\n\n # Max implemented order is 7 for fctmplt\n if p['phase_order'] == -1:\n p['phase_order'] = 7\n\n params.order = int(p['phase_order'])\n params.deltaT = delta_t\n params.fLow = p['f_lower']\n params.dynRange = pycbc.DYN_RANGE_FAC\n\n lalinspiral.FindChirpSPTemplate(fctmplt, tmplt, params)\n kfac = spa_tmplt_precondition(n, p['delta_f'])\n\n htilde = FrequencySeries(fctmplt.data.data, delta_f=p['delta_f'])\n htilde *= (amp_factor * kfac)\n\n if 'out' in p:\n p['out'][:] = htilde[:]\n htilde = FrequencySeries(p['out'], copy=False, delta_f=p['delta_f'])\n\n return htilde\n","sub_path":"pycbc/waveform/fctmplt.py","file_name":"fctmplt.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"433367234","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\nbl_info = {\n 'name': \"Selection +\",\n 'description': \"Add sets of keyboard shortcut to make navigation and editing easier\",\n 'warning': \"Incompatible with emulate numpad as it uses some of number keys as a shortcut\",\n 'author': \"KotaKerdus\",\n 'version': (0, 1),\n 'blender': (2, 79, 0),\n 'location': \"View3D\",\n 'category': \"3D View\"\n}\n\nmesh_selection_keymap = {\n 'ONE': 'VERT',\n 'TWO': 'EDGE',\n 'THREE': 'FACE'\n}\n\nuv_selection_keymap = {\n 'ONE': 'VERTEX',\n 'TWO': 'EDGE',\n 'THREE': 'FACE',\n 'FOUR': 'ISLAND'\n}\n\nimport bpy\nfrom bpy.types import (AddonPreferences)\nfrom bpy.app.handlers import persistent\n\n# ─── PREFERENCES SECTION ────────────────────────────────────────────────────────\n\nclass SPPrefs(AddonPreferences):\n bl_idname = __name__\n def draw(self, context):\n layout = self.layout\n\n box = layout.box()\n box.label(\"3D View\", icon=\"OBJECT_DATAMODE\")\n boxone = box.box()\n boxone.label(\"DRAG SELECTMOUSE > Border Gesture\", icon=\"BORDER_RECT\")\n boxone.label(\"To de-select hold CTRL\", icon=\"INFO\")\n boxtwo = box.box()\n boxtwo.label(\"HOLD C > Circle Gesture\", icon=\"BORDER_LASSO\")\n boxtwo.label(\"To de-select use ACTIONMOUSE instead\", icon=\"INFO\")\n\n box = layout.box()\n box.label(\"Edit Mode\", icon=\"EDITMODE_HLT\")\n boxone = box.box()\n boxone.label(\"SHIFT + DOUBLE SELECTMOUSE > Add Linked Mesh to selection\", icon=\"OBJECT_DATAMODE\")\n boxone.label(\"CTRL + DOUBLE SELECTMOUSE > Substract Linked Mesh from selection\", icon=\"MESH_CUBE\")\n boxone = box.box()\n boxone.label(\"1 > Vertex Select\", icon=\"VERTEXSEL\")\n boxone.label(\"2 > Edge Select\", icon=\"EDGESEL\")\n boxone.label(\"3 > Face Select\", icon=\"FACESEL\")\n boxone.label(\"4 > Occlude Geometry\", icon=\"ORTHO\")\n boxone.label(\"Shift + (1, 2, 3) button to add/remove type selection\", icon=\"INFO\")\n\n box = layout.box()\n box.label(\"Image/UV Editor\", icon=\"IMAGE_COL\")\n boxone = box.box()\n boxone.label(\"1 > Vertex Select\", icon=\"UV_VERTEXSEL\")\n boxone.label(\"2 > Edge Select\", icon=\"UV_EDGESEL\")\n boxone.label(\"3 > Face Select\", icon=\"UV_FACESEL\")\n boxone.label(\"4 > Island Select\", icon=\"UV_ISLANDSEL\")\n\n# ─── METHOD ─────────────────────────────────────────────────────────────────────\n\ndef kmi_props_set(kmi_props, attr, val):\n try: setattr(kmi_props, attr, val)\n except AttributeError: print(\"Warning: property '%s' not found in keymap item '%s'\" %\n (attr, kmi_props.__class__.__name__))\n except Exception as e: print(\"Warning: %r\" % e)\n\n# ─── OPERATOR ───────────────────────────────────────────────────────────────────\n\n@persistent\ndef initial():\n disable_default_keymaps()\n new_select()\n gesture_border()\n gesture_circle()\n mesh_quick_selection()\n uv_quick_selection()\n\n# Disable some of the default keymap keys\ndef disable_default_keymaps():\n wm = bpy.context.window_manager\n\n # Disable default 'Activate/Select' keymap key\n active_select_keymaps = wm.keyconfigs.default.keymaps[\"3D View\"].keymap_items\n for key in active_select_keymaps:\n if key.idname == \"view3d.select\" and key.shift == False and key.ctrl == False and key.alt == False:\n key.active = False\n elif key.idname == \"view3d.select\" and key.shift == True and key.ctrl == False and key.alt == False:\n key.active = False\n elif key.idname == \"view3d.select\" and key.shift == False and key.ctrl == True and key.alt == False:\n key.active = False\n\n # Disable default 'Pick Shortest Path' keymap key with CTRL enabled (located in 3D View > Mesh category)\n shortest_path_keymap = wm.keyconfigs.default.keymaps[\"Mesh\"].keymap_items\n for key in shortest_path_keymap:\n if key.idname == \"mesh.shortest_path_pick\" and key.properties.use_fill == False:\n key.active = False\n break\n\n # Disable default 'Border Select' and 'Circle Select' in UV Editor category\n uv_border_select_keymaps = wm.keyconfigs.default.keymaps[\"UV Editor\"].keymap_items\n for key in uv_border_select_keymaps:\n if key.idname == \"uv.select_border\":\n key.active = False\n\n # Disable default 'Gesture Border' modal keymap keys\n gesture_border_keymaps = wm.keyconfigs.default.keymaps[\"Gesture Border\"].keymap_items\n for key in gesture_border_keymaps:\n key.active = False\n\n # Disable default 'Gesture Circle' modal keymap keys\n gesture_circle_keymaps = wm.keyconfigs.default.keymaps[\"View3D Gesture Circle\"].keymap_items\n for key in gesture_circle_keymaps:\n key.active = False\n\n# Add modified 'Activate/Select keymap key\ndef new_select():\n wm = bpy.context.window_manager\n km = wm.keyconfigs.addon.keymaps.new(\"3D View\", space_type='VIEW_3D', region_type='WINDOW', modal=False)\n\n kmi = km.keymap_items.new('view3d.select', 'SELECTMOUSE', 'CLICK')\n kmi_props_set(kmi.properties, 'extend', False)\n kmi_props_set(kmi.properties, 'deselect', False)\n kmi_props_set(kmi.properties, 'toggle', False)\n kmi_props_set(kmi.properties, 'center', False)\n kmi_props_set(kmi.properties, 'enumerate', False)\n kmi_props_set(kmi.properties, 'object', False)\n addon_keymaps.append((km, kmi))\n kmi = km.keymap_items.new('view3d.select', 'SELECTMOUSE', 'CLICK', shift=True)\n kmi_props_set(kmi.properties, 'extend', False)\n kmi_props_set(kmi.properties, 'deselect', False)\n kmi_props_set(kmi.properties, 'toggle', True)\n kmi_props_set(kmi.properties, 'center', False)\n kmi_props_set(kmi.properties, 'enumerate', False)\n kmi_props_set(kmi.properties, 'object', False)\n addon_keymaps.append((km, kmi))\n kmi = km.keymap_items.new('view3d.select', 'SELECTMOUSE', 'CLICK', ctrl=True)\n kmi_props_set(kmi.properties, 'extend', False)\n kmi_props_set(kmi.properties, 'deselect', False)\n kmi_props_set(kmi.properties, 'toggle', False)\n kmi_props_set(kmi.properties, 'center', True)\n kmi_props_set(kmi.properties, 'enumerate', False)\n kmi_props_set(kmi.properties, 'object', True)\n addon_keymaps.append((km, kmi))\n\n # Now pick shortest path will trigger if the CTRL is released (default: press)\n km = wm.keyconfigs.addon.keymaps.new(\"Mesh\", space_type='EMPTY', region_type='WINDOW', modal=False)\n kmi = km.keymap_items.new('mesh.shortest_path_pick', 'SELECTMOUSE', 'RELEASE', ctrl=True)\n kmi_props_set(kmi.properties, 'use_fill', False)\n addon_keymaps.append((km, kmi))\n\n # TODO: Review to whether to delete or not\n # km = wm.keyconfigs.addon.keymaps.new(\"Object Mode\", space_type='EMPTY', region_type='WINDOW', modal=False)\n # kmi = km.keymap_items.new('object.select_all', 'SELECTMOUSE', 'PRESS')\n # kmi_props_set(kmi.properties, 'action', 'DESELECT')\n # addon_keymaps.append((km, kmi))\n\n # km = wm.keyconfigs.addon.keymaps.new(\"Mesh\", space_type='EMPTY', region_type='WINDOW', modal=False)\n # kmi = km.keymap_items.new('mesh.select_all', 'SELECTMOUSE', 'PRESS')\n # kmi_props_set(kmi.properties, 'action', 'DESELECT')\n # addon_keymaps.append((km, kmi))\n\n# Gesture Border for quick rectangle selection by dragging SELECTMOUSE\ndef gesture_border():\n wm = bpy.context.window_manager\n km = wm.keyconfigs.addon.keymaps.new(\"Gesture Border\", space_type='EMPTY', region_type='WINDOW', modal=True)\n\n kmi = km.keymap_items.new_modal('BEGIN', 'SELECTMOUSE', 'PRESS')\n addon_keymaps.append((km, kmi))\n kmi = km.keymap_items.new_modal('SELECT', 'SELECTMOUSE', 'ANY')\n addon_keymaps.append((km, kmi))\n kmi = km.keymap_items.new_modal('SELECT', 'SELECTMOUSE', 'RELEASE', shift=True)\n addon_keymaps.append((km, kmi))\n kmi = km.keymap_items.new_modal('DESELECT', 'SELECTMOUSE', 'RELEASE', ctrl=True)\n addon_keymaps.append((km, kmi))\n kmi = km.keymap_items.new_modal('CANCEL', 'SELECTMOUSE', 'RELEASE', alt=True)\n addon_keymaps.append((km, kmi))\n\n # Call rectangle selection on SELECTMOUSE drag\n km = wm.keyconfigs.addon.keymaps.new(\"3D View\", space_type='VIEW_3D', region_type='WINDOW')\n kmi = km.keymap_items.new('view3d.select_border', 'EVT_TWEAK_S', 'ANY')\n kmi_props_set(kmi.properties, 'extend', False)\n addon_keymaps.append((km, kmi))\n kmi = km.keymap_items.new('view3d.select_border', 'EVT_TWEAK_S', 'ANY', shift=True)\n kmi_props_set(kmi.properties, 'extend', True)\n addon_keymaps.append((km, kmi))\n kmi = km.keymap_items.new('view3d.select_border', 'EVT_TWEAK_S', 'ANY', ctrl=True)\n kmi_props_set(kmi.properties, 'extend', False)\n addon_keymaps.append((km, kmi))\n\n km = wm.keyconfigs.addon.keymaps.new(\"UV Editor\", space_type='EMPTY', region_type='WINDOW')\n kmi = km.keymap_items.new('uv.select_border', 'EVT_TWEAK_S', 'ANY')\n addon_keymaps.append((km, kmi))\n kmi = km.keymap_items.new('uv.select_border', 'EVT_TWEAK_S', 'ANY', shift=True)\n addon_keymaps.append((km, kmi))\n kmi = km.keymap_items.new('uv.select_border', 'EVT_TWEAK_S', 'ANY', ctrl=True)\n addon_keymaps.append((km, kmi))\n\n# Gesture circle for quick Circle selection by holding C button\ndef gesture_circle():\n wm = bpy.context.window_manager\n km = wm.keyconfigs.addon.keymaps.new('View3D Gesture Circle', space_type='EMPTY', region_type='WINDOW', modal=True)\n\n kmi = km.keymap_items.new_modal('CONFIRM', 'C', 'RELEASE')\n addon_keymaps.append((km, kmi))\n kmi = km.keymap_items.new_modal('SELECT', 'SELECTMOUSE', 'PRESS')\n addon_keymaps.append((km, kmi))\n kmi = km.keymap_items.new_modal('DESELECT', 'MIDDLEMOUSE', 'PRESS')\n addon_keymaps.append((km, kmi))\n kmi = km.keymap_items.new_modal('NOP', 'SELECTMOUSE', 'RELEASE')\n addon_keymaps.append((km, kmi))\n kmi = km.keymap_items.new_modal('NOP', 'MIDDLEMOUSE', 'RELEASE')\n addon_keymaps.append((km, kmi))\n kmi = km.keymap_items.new_modal('SUBTRACT', 'WHEELUPMOUSE', 'PRESS')\n addon_keymaps.append((km, kmi))\n kmi = km.keymap_items.new_modal('SUBTRACT', 'NUMPAD_MINUS', 'PRESS')\n addon_keymaps.append((km, kmi))\n kmi = km.keymap_items.new_modal('ADD', 'WHEELDOWNMOUSE', 'PRESS')\n addon_keymaps.append((km, kmi))\n kmi = km.keymap_items.new_modal('ADD', 'NUMPAD_PLUS', 'PRESS')\n addon_keymaps.append((km, kmi))\n kmi = km.keymap_items.new_modal('SIZE', 'TRACKPADPAN', 'ANY')\n addon_keymaps.append((km, kmi))\n\n# Bind 1, 2, 3, 4 key to change between element modes in Edit Mode\ndef mesh_quick_selection():\n wm = bpy.context.window_manager\n km = wm.keyconfigs.addon.keymaps.new(\"Mesh\", space_type='EMPTY', region_type='WINDOW', modal=False)\n\n # Default quick selection\n for key, val in mesh_selection_keymap.items():\n kmi = km.keymap_items.new('mesh.select_mode', key, 'PRESS')\n kmi_props_set(kmi.properties, 'type', val)\n addon_keymaps.append((km, kmi))\n\n # Extend quick selection (holding SHIFT will toggle it)\n for key, val in mesh_selection_keymap.items():\n kmi = km.keymap_items.new('mesh.select_mode', key, 'PRESS', shift=True)\n kmi_props_set(kmi.properties, 'type', val)\n kmi_props_set(kmi.properties, 'use_extend', True)\n addon_keymaps.append((km, kmi))\n\n # Occlude geometry\n kmi = km.keymap_items.new('wm.context_toggle', 'FOUR', 'PRESS')\n kmi_props_set(kmi.properties, 'data_path', 'space_data.use_occlude_geometry')\n addon_keymaps.append((km,kmi))\n\n # Select linked mesh elements\n #NOTE: This one bugged? instead of selecting it, it select then deselect it... useless...\n # kmi = km.keymap_items.new( 'mesh.select_linked_pick', 'SELECTMOUSE', 'DOUBLE_CLICK')\n # kmi_props_set(kmi.properties, 'deselect', False)\n # addon_keymaps.append((km,kmi))\n kmi = km.keymap_items.new( 'mesh.select_linked_pick', 'SELECTMOUSE', 'DOUBLE_CLICK', shift=True)\n kmi_props_set(kmi.properties, 'deselect', False)\n addon_keymaps.append((km,kmi))\n kmi = km.keymap_items.new( 'mesh.select_linked_pick', 'SELECTMOUSE', 'DOUBLE_CLICK', ctrl=True)\n kmi_props_set(kmi.properties, 'deselect', True)\n addon_keymaps.append((km,kmi))\n\n# Same as above but for Image/UV Editor\ndef uv_quick_selection():\n wm = bpy.context.window_manager\n km = wm.keyconfigs.addon.keymaps.new('UV Editor', space_type = 'EMPTY', region_type = 'WINDOW')\n\n for key, val in uv_selection_keymap.items():\n kmi = km.keymap_items.new('wm.context_set_enum', key, 'PRESS')\n kmi_props_set(kmi.properties, 'data_path', 'tool_settings.uv_select_mode')\n kmi_props_set(kmi.properties, 'value', val)\n addon_keymaps.append((km, kmi))\n\n# Clear and reset all the keymaps\ndef reset_keymaps():\n wm = bpy.context.window_manager\n\n # Clear all the keymaps this addon created\n for km, kmi in addon_keymaps:\n km.keymap_items.remove(kmi)\n addon_keymaps.clear()\n\n # Re-enable all the gesture keymaps to its default state\n active_select_keymaps = wm.keyconfigs.default.keymaps[\"3D View\"].keymap_items\n for key in active_select_keymaps:\n if key.idname == \"view3d.select\" and key.shift == False and key.ctrl == False and key.alt == False:\n key.active = True\n elif key.idname == \"view3d.select\" and key.shift == True and key.ctrl == False and key.alt == False:\n key.active = True\n elif key.idname == \"view3d.select\" and key.shift == False and key.ctrl == True and key.alt == False:\n key.active = True\n\n shortest_path_keymap = wm.keyconfigs.default.keymaps[\"Mesh\"].keymap_items\n for key in shortest_path_keymap:\n if key.idname == \"mesh.shortest_path_pick\" and key.properties.use_fill == False:\n key.active = True\n break\n\n uv_border_select_keymaps = wm.keyconfigs.default.keymaps[\"UV Editor\"].keymap_items\n for key in uv_border_select_keymaps:\n key.active = False\n\n gesture_border_keymaps = wm.keyconfigs.default.keymaps[\"Gesture Border\"].keymap_items\n for key in gesture_border_keymaps:\n key.active = True\n\n gesture_circle_keymaps = wm.keyconfigs.default.keymaps[\"View3D Gesture Circle\"].keymap_items\n for key in gesture_circle_keymaps:\n key.active = True\n\n# ─── SYSTEM ─────────────────────────────────────────────────────────────────────\n\naddon_keymaps = []\n\ndef register():\n bpy.utils.register_module(__name__)\n\n disable_default_keymaps()\n new_select()\n gesture_border()\n gesture_circle()\n mesh_quick_selection()\n uv_quick_selection()\n\ndef unregister():\n bpy.utils.unregister_module(__name__)\n\n reset_keymaps()\n\nif __name__ == '__main__':\n register()","sub_path":"selection-plus.py","file_name":"selection-plus.py","file_ext":"py","file_size_in_byte":15896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"159933043","text":"################################\n##### Instagram crawler for text\n################################\n\n\n## import module\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\nfrom bs4 import BeautifulSoup as bs\n\nimport time\nimport sys\nfrom imp import reload\n\nimport re\n\nfrom urllib.request import urlopen\nimport requests\nimport datetime \nimport wget\nimport html2text\n\nimport json\nfrom pandas.io.json import json_normalize\n\nimport pandas as pd\nimport numpy as np\n\nfrom random import *\n\n\n## Instagram Sign in \n\nreload(sys)\n#sys.setdefaultencoding('utf-8') python3에는 디폴트라 굳이 설정 ㄴㄴ\n\n\ntargetInstagramUrl = 'https://www.instagram.com/accounts/login/?source=auth_switcher'\n\noptions = webdriver.ChromeOptions()\n#mobile_emulation = { \"deviceName\": \"Nexus 5\" }\n#options.add_experimental_option(\"mobileEmulation\", mobile_emulation)\n\nlogin_id = input('put your login id : ')\nlogin_pw = input('put your login password : ')\n\ndriver = webdriver.Chrome('chromedriver.exe', chrome_options=options)\ndriver.get(targetInstagramUrl)\n\n#login process ----------------------------------------------------------------- start\n#login process ----------------------------------------------------------------- start\ntime.sleep(2)\n\ndriver.find_elements_by_name(\"username\")[0].send_keys(login_id)\ndriver.find_elements_by_name(\"password\")[0].send_keys(login_pw)\n\ndriver.find_element_by_xpath(\"//form/div[4]/button\").submit()\n\ntime.sleep(3)\n\ndriver.find_element_by_xpath(\"/html/body/div[3]/div/div/div[3]/button[2]\").click()\n#login process ----------------------------------------------------------------- end\n#login process ----------------------------------------------------------------- end\n\n\n## Hashtag Search\n\nhashtag = input('What do you wanna search for : ')\ndriver.get('https://www.instagram.com/explore/tags/'+ hashtag)\n\n\n## Get links of posts\n\nlinks = []\nlast_height = driver.execute_script(\"return document.body.scrollHeight\") # Get scroll height\n\nwhile True:\n \n source = driver.page_source\n data = bs(source, 'html.parser')\n body = data.find('body')\n script = body.find('span')\n\n for link in script.findAll('a'):\n if re.match(\"/p\", link.get('href')):\n links.append('https://www.instagram.com'+link.get('href'))\n \n # Scroll down to bottom \n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n \n # Wait to load page\n time.sleep(uniform(3.0, 5.0))\n\n # Calculate new scroll height and compare with last scroll height\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\n \n if new_height == last_height:\n break\n last_height = new_height\n \n\nlinks = list(set(links))\n\n\n## Web Scraping(json) -> DataFrame\n\nresult = pd.DataFrame()\n\nfor i in range(len(links)) : \n try:\n\n page = urlopen(links[i]).read()\n data = bs(page, 'html.parser')\n body = data.find('body')\n script = body.find('script')\n raw = script.text.strip().replace('window._sharedData =', '').replace(';', '')\n json_data=json.loads(raw)\n\n posts =json_data['entry_data']['PostPage'][0]['graphql']\n posts = json.dumps(posts)\n posts = json.loads(posts)\n\n x = pd.DataFrame.from_dict(json_normalize(posts), orient='columns') \n x.columns = x.columns.str.replace(\"shortcode_media.\", \"\")\n result=result.append(x)\n \n except:\n np.nan\n \n\nresult = result.drop_duplicates(subset = 'shortcode')\nresult.index = range(len(result.index))\n\n\n## Make DataFrame into CSV\n\nresult.to_csv('myfile.csv', encoding = 'UTF-8-sig') #엑셀로 열 것 같아서.. ㅋㅋ\n\n","sub_path":"insta_text.py","file_name":"insta_text.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"293295096","text":"import csv\nimport pandas as pd\nimport numpy as np\nfrom os import getcwd, listdir\nfrom scipy.stats import linregress\nfrom scipy.odr import Model, Data, ODR\nfrom os.path import isfile, join\nimport math\nimport matplotlib.pyplot as plt\n\n\n\ndef get_fluidtypes_fractions():\n \"\"\"\n\n :return:\n \"\"\"\n file_name = 'MassFraction_FluidTypes.csv'\n cwd = getcwd()\n file = cwd + '\\\\' + file_name\n with open(file, \"r\") as csvfile:\n df = pd.read_csv(csvfile)\n return df\n\n\ndef euclidean3(vector1, vector2):\n ''' use numpy.linalg.norm to calculate the euclidean distance. '''\n distance = np.linalg.norm(vector1 - vector2)\n return distance\n\n\ndef calculate_asphaltene(df, components_frame, res_fluid, isAsphalteneSelected):\n \"\"\"\n if asphaltene fluid is selected and the choice to compute the asphaltene fluid is ON,\n do a 2D linear polinomila fit on rhoe, sigma or rhoe, HI with all fluid types except asphaltenes/tar\n compute the orthogonal distance from reservoir fluid point to line and from asphaltene fluid point to line\n fraction equals the ratio from distances times mass fraction of asphaltenes in asphaltene fluid\n :return: asphaltenes compoenent fraction\n \"\"\"\n\n\n # isAsphalteneSelected = False\n\n if isAsphalteneSelected:\n Asp_point = np.array((df[df.index=='8Asphaltene']['HI'], df[df.index=='8Asphaltene']['rhoe']))\n Res_point = np.array( (df[df.index == res_fluid]['HI'], df[df.index == res_fluid]['rhoe']))\n\n yarray = np.array(df[(df.index!='8Asphaltene')&(df.index!=res_fluid)]['rhoe'])\n xarray = np.array(df[(df.index != '8Asphaltene') & (df.index != res_fluid)]['HI'])\n z = np.polyfit(xarray,yarray,1)\n\n d_asp = abs(z[0] * Asp_point[0] + z[1] - Asp_point[1]) / math.sqrt((z[0]**2 + 1))\n d_res = abs(z[0] * Res_point[0] + z[1] - Res_point[1]) / math.sqrt((z[0]**2 + 1))\n\n fraction = d_res / d_asp * components_frame[components_frame['CNAMES']=='asphaltene']['8Asphaltene']\n return float(fraction)\n else:\n return 0.0\n\n\n\ndef calculate_fraction(df, fluid_to_solve, isAsphalteneSelected):\n \"\"\"\n\n :type nuclear_frame:\n \"\"\"\n\n distances = []\n res_fluid = '9ReservoirFluid'\n res_vector = np.array(df.loc[res_fluid])\n for index, row in df.iterrows():\n fluid_vector = np.array(df.loc[index])\n distances.append(euclidean3(res_vector, fluid_vector))\n ### normalize distances\n norm_dist = []\n sum_distances = sum(distances)\n\n for dist in distances:\n norm_dist.append(dist/sum_distances)\n\n df['Ndistance'] = norm_dist\n print(df)\n\n c_fraction = []\n components_frame = get_fluidtypes_fractions()\n for c in components_frame['CNAMES']:\n fraction = 0\n for index, row in df.iterrows():\n if df.loc[index]['Ndistance'] != 0.0:\n fraction += (1 / df.loc[index]['Ndistance']) * \\\n float(components_frame[components_frame['CNAMES']==c][index])\n else:\n fraction += 0\n if c == 'asphaltene':\n fraction = calculate_asphaltene(df, components_frame, res_fluid, isAsphalteneSelected)\n c_fraction.append(fraction)\n norm_fractions = []\n sum_fractions = sum(c_fraction)\n ### normalize fractions\n for f in c_fraction:\n norm_fractions.append(f / sum_fractions)\n\n components_frame['Computed Mass Fractions'] = norm_fractions\n\n moles = []\n for c in components_frame['CNAMES']:\n moles.append(float(float(components_frame[components_frame['CNAMES']==c]['Computed Mass Fractions']) /\n float(components_frame[components_frame['CNAMES']==c]['MolarMass'])))\n\n mole_fractions = []\n for m in moles:\n mole_fractions.append(m / sum(moles))\n\n\n components_frame['Computed Mole Fractions'] = mole_fractions\n\n\n\n original_frame = pd.read_csv(getcwd() + '/results/composition/'+ res_fluid + '.csv')\n components_frame[fluid_to_solve] = original_frame['ZI']\n\n#### save results in file\n\n result_path = getcwd() + '/results/'\n\n components_frame.to_csv(result_path + 'TEST.csv', sep = \",\", index=False)\n\n xlsx_writer = pd.ExcelWriter(result_path + 'TEST.xlsx', engine='xlsxwriter')\n components_frame.to_excel(xlsx_writer, 'DataFrame')\n xlsx_writer.save()\n\n### group per hydrocarbon group\n L_alk = ['C1', 'C2', 'C3', 'iC4', 'nC4']\n M_alk = ['iC5', 'nC5', 'C6', 'C7', 'C8', 'C9', 'C10']\n H_alk = ['C11', 'C12','C13', 'C14','C15', 'C16','C17', 'C18','C19',\n 'C20','C21', 'C22','C23', 'C24','C25', 'C26','C27', 'C28','C29','C30+']\n inor = ['N2', 'H2S', 'CO2']\n aromatics = ['Benzene','Toluene', 'Ethylbenzene', 'oXylene', 'mpXylene']\n cyclic = ['McycloC5', 'CycloC6', 'McycloC6']\n\n\n L_alk_fraction = 0.0\n M_alk_fraction = 0.0\n H_alk_fraction = 0.0\n inor_fraction = 0.0\n aro_fraction = 0.0\n cyclic_fraction = 0.0\n fluid_to_solve_L_alk_fraction = 0.0\n fluid_to_solve_M_alk_fraction = 0.0\n fluid_to_solve_H_alk_fraction = 0.0\n fluid_to_solve_inor_fraction = 0.0\n fluid_to_solve_aro_fraction = 0.0\n fluid_to_solve_cyclic_fraction = 0.0\n for c in components_frame['CNAMES']:\n if c in L_alk:\n L_alk_fraction += float(components_frame[components_frame['CNAMES']==c]['Computed Mole Fractions'])\n fluid_to_solve_L_alk_fraction += float(components_frame[components_frame['CNAMES'] == c][fluid_to_solve])\n elif c in M_alk:\n M_alk_fraction += float(components_frame[components_frame['CNAMES']==c]['Computed Mole Fractions'])\n fluid_to_solve_M_alk_fraction += float(components_frame[components_frame['CNAMES'] == c][fluid_to_solve])\n elif c in H_alk:\n H_alk_fraction += float(components_frame[components_frame['CNAMES']==c]['Computed Mole Fractions'])\n fluid_to_solve_H_alk_fraction += float(components_frame[components_frame['CNAMES'] == c][fluid_to_solve])\n elif c in inor:\n inor_fraction += float(components_frame[components_frame['CNAMES']==c]['Computed Mole Fractions'])\n fluid_to_solve_inor_fraction += float(components_frame[components_frame['CNAMES'] == c][fluid_to_solve])\n elif c in aromatics:\n aro_fraction += float(components_frame[components_frame['CNAMES']==c]['Computed Mole Fractions'])\n fluid_to_solve_aro_fraction += float(components_frame[components_frame['CNAMES'] == c][fluid_to_solve])\n elif c in cyclic:\n cyclic_fraction += float(components_frame[components_frame['CNAMES']==c]['Computed Mole Fractions'])\n fluid_to_solve_cyclic_fraction += float(components_frame[components_frame['CNAMES'] == c][fluid_to_solve])\n\n groups = ['light alkanes', 'medium alkanes', 'heavy alkanes', 'inorganic', 'aromatic', 'cyclic']\n dt = [[L_alk_fraction,fluid_to_solve_L_alk_fraction], [M_alk_fraction, fluid_to_solve_M_alk_fraction],\n [H_alk_fraction, fluid_to_solve_H_alk_fraction], [inor_fraction, fluid_to_solve_inor_fraction],\n [aro_fraction, fluid_to_solve_aro_fraction], [cyclic_fraction, fluid_to_solve_cyclic_fraction]]\n\n group_frame = pd.DataFrame(dt, columns = ['Computed Mole Fraction', fluid_to_solve], index= groups)\n\n ### Plot results\n\n ### Full spectrum\n # plt.figure()\n components_frame.plot(x='CNAMES',y =['Computed Mole Fractions',fluid_to_solve],kind ='bar',\n color=['steelblue', 'midnightblue'],\n title=\"Mole fractions for fluid\\n \" + fluid_to_solve)\n\n ### Per group\n group_frame.plot(y = ['Computed Mole Fraction', fluid_to_solve], use_index=True, kind='bar',\n color= ['steelblue', 'midnightblue'],\n title=\"Mole fractions per group for fluid\\n\"+fluid_to_solve, rot=1)\n # plt.figure()\n plt.show()","sub_path":"massfraction.py","file_name":"massfraction.py","file_ext":"py","file_size_in_byte":7854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"506377178","text":"#!/usr/bin/env python3\n\nfrom sacremoses import MosesPunctNormalizer\nfrom sacremoses import MosesTokenizer\n\nFILE_NAME = \"/media/zhake/data/Projects/kaz-parallel-corpora/crawl/strategy2050_kz/xmls/30-35/texts/kaz_all_text_clean_split.txt\"\n\nmpn = MosesPunctNormalizer()\nmt = MosesTokenizer()\n\nwith open(file=FILE_NAME, mode=\"r\") as f_in:\n norm_text = [mpn.normalize(text=line) for line in f_in]\n\ntok_text = [\n mt.tokenize(text=line, return_str=True, escape=False) for line in norm_text\n]\n\nwith open(file=f\"{FILE_NAME}_tok\", mode=\"w\") as f_out:\n for line in tok_text:\n print(line.strip(), file=f_out)\n","sub_path":"sacre_norm_tok.py","file_name":"sacre_norm_tok.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"132197650","text":"\"\"\"\nProgram Name: string_manip.py\nContact(s): George McCabe\nDescription: METplus utility to handle string manipulation\n\"\"\"\n\nimport sys\nimport os\nimport re\nfrom csv import reader\nimport random\nimport string\nimport logging\n\ntry:\n from .constants import VALID_COMPARISONS, LOWER_TO_WRAPPER_NAME\nexcept ImportError:\n sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))\n from constants import VALID_COMPARISONS, LOWER_TO_WRAPPER_NAME\n\n\ndef get_wrapper_name(process_name):\n \"\"\"! Determine name of wrapper from string that may not contain the correct\n capitalization, i.e. Pcp-Combine translates to PCPCombine\n\n @param process_name string that was listed in the PROCESS_LIST\n @returns name of wrapper (without 'Wrapper' at the end) and None if\n name cannot be determined\n \"\"\"\n lower_process = (process_name.replace('-', '').replace('_', '')\n .replace(' ', '').lower())\n if lower_process in LOWER_TO_WRAPPER_NAME.keys():\n return LOWER_TO_WRAPPER_NAME[lower_process]\n\n return None\n\n\ndef remove_quotes(input_string):\n \"\"\"!Remove quotes from string\"\"\"\n if not input_string:\n return ''\n\n # strip off double and single quotes\n return input_string.strip('\"').strip(\"'\")\n\n\ndef getlist(list_str, expand_begin_end_incr=True):\n \"\"\"! Returns a list of string elements from a comma\n separated string of values.\n This function MUST also return an empty list [] if s is '' empty.\n This function is meant to handle these possible or similar inputs:\n AND return a clean list with no surrounding spaces or trailing\n commas in the elements.\n '4,4,2,4,2,4,2, ' or '4,4,2,4,2,4,2 ' or\n '4, 4, 4, 4, ' or '4, 4, 4, 4 '\n Note: getstr on an empty variable (EMPTY_VAR = ) in\n a conf file returns '' an empty string.\n\n @param list_str the string being converted to a list.\n @returns list of strings formatted properly and expanded as needed\n \"\"\"\n if not list_str:\n return []\n\n # remove surrounding comma and spaces from the string\n list_str = list_str.strip(', ').strip()\n\n # remove trailing semi-colon IF found after []s\n if list_str.endswith('];'):\n list_str = list_str.strip('; ').strip()\n\n # remove [ from start (left) and ] from end (right)\n list_str = list_str.lstrip('[ ').rstrip('] ').strip()\n\n # remove space around commas\n list_str = re.sub(r'\\s*,\\s*', ',', list_str)\n\n # option to not evaluate begin_end_incr\n if expand_begin_end_incr:\n list_str = _handle_begin_end_incr(list_str)\n\n # use regex split to split list string by commas that are not\n # found within []s or ()s\n item_list = re.split(r',\\s*(?![^\\[\\]]*\\]|[^()]*\\))', list_str)\n\n # regex split will still split by commas that are found between\n # quotation marks, so call function to put them back together properly\n item_list = _fix_list(item_list)\n\n return item_list\n\n\ndef getlistint(list_str):\n \"\"\"! Get list and convert all values to int\n\n @param list_str the string being converted to a list.\n @returns list of ints\n \"\"\"\n list_str = getlist(list_str)\n list_str = [int(i) for i in list_str]\n return list_str\n\n\ndef _handle_begin_end_incr(list_str):\n \"\"\"! Check for instances of begin_end_incr() in the input string and\n evaluate as needed\n\n @param list_str string that contains a comma separated list\n @returns string that has list expanded\n \"\"\"\n\n matches = _begin_end_incr_findall(list_str)\n\n for match in matches:\n item_list = _begin_end_incr_evaluate(match)\n if item_list:\n list_str = list_str.replace(match, ','.join(item_list))\n\n return list_str\n\n\ndef _begin_end_incr_findall(list_str):\n \"\"\"! Find all instances of begin_end_incr in list string\n\n @param list_str string that contains a comma separated list\n @returns list of strings that have begin_end_incr() characters\n \"\"\"\n # remove space around commas (again to make sure)\n # this makes the regex slightly easier because we don't have to include\n # as many \\s* instances in the regex string\n list_str = re.sub(r'\\s*,\\s*', ',', list_str)\n\n # find begin_end_incr and any text before and after that are not a comma\n # [^,\\s]* evaluates to any character that is not a comma or space\n return re.findall(\n r\"([^,]*begin_end_incr\\(\\s*-?\\d*,-?\\d*,-*\\d*,?\\d*\\s*\\)[^,]*)\",\n list_str\n )\n\n\ndef _begin_end_incr_evaluate(item):\n \"\"\"! Expand begin_end_incr() items into a list of values\n\n @param item string containing begin_end_incr() tag with\n possible text before and after\n @returns list of items expanded from begin_end_incr\n \"\"\"\n match = re.match(\n r\"^(.*)begin_end_incr\\(\\s*(-*\\d*),(-*\\d*),(-*\\d*),?(\\d*)\\s*\\)(.*)$\",\n item\n )\n if match:\n before = match.group(1).strip()\n after = match.group(6).strip()\n start = int(match.group(2))\n end = int(match.group(3))\n step = int(match.group(4))\n precision = match.group(5).strip()\n\n if start <= end:\n int_list = range(start, end+1, step)\n else:\n int_list = range(start, end-1, step)\n\n out_list = []\n for int_values in int_list:\n out_str = str(int_values)\n\n if precision:\n out_str = out_str.zfill(int(precision))\n\n out_list.append(f\"{before}{out_str}{after}\")\n\n return out_list\n\n return None\n\n\ndef _fix_list(item_list):\n \"\"\"! The logic that calls this function may have incorrectly split up\n a string that contains commas within quotation marks. This function\n looks through the list and finds items that appear to have been split up\n incorrectly and puts them back together properly.\n\n @param item_list list of items to be corrected\n @returns corrected list\n \"\"\"\n fixed_list = []\n list_buffer = []\n for item in item_list:\n quote_count = item.count('\"')\n if not list_buffer:\n # if there are an even number of quotation marks, add to list\n if quote_count % 2 == 0:\n fixed_list.append(item)\n # otherwise add it to the list buffer\n else:\n list_buffer.append(item)\n else:\n list_buffer.append(item)\n if quote_count == 1:\n fixed_list.append(','.join(list_buffer))\n list_buffer.clear()\n\n # if there are still items in the buffer, add them to end of list\n if list_buffer:\n fixed_list.append(','.join(list_buffer))\n\n # remove extra quotation marks around string\n out_list = []\n for item in fixed_list:\n if item[0] == '\"' and item[-1] == '\"':\n out_list.append(item.strip('\"'))\n else:\n out_list.append(item)\n\n return out_list\n\n\ndef list_to_str(list_of_values, add_quotes=True):\n \"\"\"! Turn a list of values into a single string\n\n @param list_of_values list of values, i.e. ['value1', 'value2']\n @param add_quotes if True, add quotation marks around values,\n default is True\n\n @returns string created from list_of_values with the values separated\n by commas, i.e. '\"value1\", \"value2\"' or 1, 3 if add_quotes is False\n \"\"\"\n # return empty string if list is empty\n if not list_of_values:\n return ''\n\n if add_quotes:\n # remove any quotes that are already around items, then add quotes\n values = [remove_quotes(item) for item in list_of_values]\n return '\"' + '\", \"'.join(values) + '\"'\n\n return ', '.join(list_of_values)\n\n\ndef comparison_to_letter_format(expression):\n \"\"\"! Convert comparison operator to the letter version if it is not already\n\n @param expression string starting with comparison operator to convert,\n i.e. gt3 or <=5.4\n @returns letter comparison operator, i.e. gt3 or le5.4 or None if invalid\n \"\"\"\n for symbol_comp, letter_comp in VALID_COMPARISONS.items():\n if letter_comp in expression or symbol_comp in expression:\n return expression.replace(symbol_comp, letter_comp)\n\n return None\n\n\ndef format_thresh(thresh_str):\n \"\"\"! Format thresholds for file naming\n\n @param thresh_str string of the thresholds.\n Can be a comma-separated list, i.e. gt3,<=5.5, ==7\n\n @returns string of comma-separated list of the threshold(s) with\n letter format, i.e. gt3,le5.5,eq7\n \"\"\"\n if isinstance(thresh_str, list):\n return format_thresh(','.join(thresh_str))\n\n formatted_thresh_list = []\n # separate thresholds by comma and strip off whitespace around values\n thresh_list = [thresh.strip() for thresh in thresh_str.split(',')]\n for thresh in thresh_list:\n if not thresh:\n continue\n\n thresh_letter = comparison_to_letter_format(thresh)\n if thresh_letter:\n formatted_thresh_list.append(thresh_letter)\n\n return ','.join(formatted_thresh_list)\n\n\ndef is_python_script(name):\n \"\"\" Check if field name is a python script by checking if any of the words\n in the string end with .py\n\n @param name string to check\n @returns True if the name is determined to be a python script command\n \"\"\"\n if not name:\n return False\n\n all_items = name.split(' ')\n if any(item.endswith('.py') for item in all_items):\n return True\n\n return False\n\n\ndef camel_to_underscore(camel):\n \"\"\"! Change camel case notation to underscore notation, i.e. GridStatWrapper to grid_stat_wrapper\n Multiple capital letters are excluded, i.e. PCPCombineWrapper to pcp_combine_wrapper\n Numerals are also skipped, i.e. ASCII2NCWrapper to ascii2nc_wrapper\n Args:\n @param camel string to convert\n @returns string in underscore notation\n \"\"\"\n s1 = re.sub(r'([^\\d])([A-Z][a-z]+)', r'\\1_\\2', camel)\n return re.sub(r'([a-z])([A-Z])', r'\\1_\\2', s1).lower()\n\n\ndef get_threshold_via_regex(thresh_string):\n \"\"\"!Ensure thresh values start with >,>=,==,!=,<,<=,gt,ge,eq,ne,lt,le and then a number\n Optionally can have multiple comparison/number pairs separated with && or ||.\n\n @param thresh_string: String to examine, i.e. <=3.4\n @returns None if string does not match any valid comparison operators\n or does not contain a number afterwards. Regex match object with\n comparison operator in group 1 and number in group 2 if valid\n \"\"\"\n\n comparison_number_list = []\n # split thresh string by || or &&\n thresh_split = re.split(r'\\|\\||&&', thresh_string)\n # check each threshold for validity\n for thresh in [item.strip() for item in thresh_split]:\n found_match = False\n for comp in list(VALID_COMPARISONS)+list(VALID_COMPARISONS.values()):\n # if valid, add to list of tuples\n # must be one of the valid comparison operators followed by\n # at least 1 digit or NA\n if thresh == 'NA':\n comparison_number_list.append((thresh, ''))\n found_match = True\n break\n\n match = re.match(r'^('+comp+r')(.*\\d.*)$', thresh)\n if match:\n comparison = match.group(1)\n number = match.group(2)\n # try to convert to float if it can, but allow string\n try:\n number = float(number)\n except ValueError:\n pass\n\n comparison_number_list.append((comparison, number))\n found_match = True\n break\n\n # if no match was found for the item, return None\n if not found_match:\n return None\n\n if not comparison_number_list:\n return None\n\n return comparison_number_list\n\n\ndef validate_thresholds(thresh_list, logger=None):\n \"\"\" Checks list of thresholds to ensure all of them have the correct format\n Should be a comparison operator with number pair combined with || or &&\n i.e. gt4 or >3&&<5 or gt3||lt1\n\n @param thresh_list list of strings to check\n @param logger (optional) logging object to output error\n @returns True if all items in the list are valid format, False if not\n \"\"\"\n valid = True\n for thresh in thresh_list:\n match = get_threshold_via_regex(thresh)\n if match is None:\n valid = False\n\n if valid is False:\n err_str = (\"Threshold values must use >,>=,==,!=,<,<=,gt,ge,eq,ne,lt, \"\n \"or le with a number, optionally combined with && or ||\")\n if logger:\n logger.error(err_str)\n else:\n print(f'ERROR: {err_str}')\n return False\n return True\n\n\ndef round_0p5(val):\n \"\"\"! Round to the nearest point five (ie 3.3 rounds to 3.5, 3.1\n rounds to 3.0) Take the input value, multiply by two, round to integer\n (no decimal places) then divide by two. Expect any input value of n.0,\n n.1, or n.2 to round down to n.0, and any input value of n.5, n.6 or\n n.7 to round to n.5. Finally, any input value of n.8 or n.9 will\n round to (n+1).0\n\n @param val : The number to be rounded to the nearest .5\n @returns n.0, n.5, or (n+1).0 value as a result of rounding\n \"\"\"\n return round(val * 2) / 2\n\n\ndef generate_tmp_filename():\n random_string = ''.join(random.choice(string.ascii_letters)\n for _ in range(10))\n return f\"metplus_tmp_{random_string}\"\n\n\ndef template_to_regex(template):\n \"\"\"!Convert path with filename template tags for regular expression by\n escaping '.' character with a backslash and replacing any lead template\n tags with a wildcard regular expression.\n\n @param template string to convert\n @returns formatted string\n \"\"\"\n in_template = template.replace('.', r'\\.')\n return re.sub(r'{lead[^}]*}', '.*', in_template)\n\n\ndef split_level(level):\n \"\"\"! If level value starts with a letter, then separate that letter from\n the rest of the string. i.e. 'A03' will be returned as 'A', '03'. If no\n level type letter is found and the level value consists of alpha-numeric\n characters, return an empty string as the level type and the full level\n string as the level value\n\n @param level input string to parse/split\n @returns tuple of level type and level value\n \"\"\"\n if not level:\n return '', ''\n\n match = re.match(r'^([a-zA-Z])(\\w+)$', level)\n if match:\n level_type = match.group(1)\n level = match.group(2)\n return level_type, level\n\n match = re.match(r'^[\\w]+$', level)\n if match:\n return '', level\n\n return '', ''\n\n\ndef format_level(level):\n \"\"\"! Format level string to prevent NetCDF level values from creating\n filenames and field names with bad characters. Replaces '*' with 'all'\n and ',' with '_'\n\n @param level string of level to format\n @returns formatted string\n \"\"\"\n return level.replace('*', 'all').replace(',', '_')\n\n\ndef expand_int_string_to_list(int_string):\n \"\"\"! Expand string into a list of integer values. Items are separated by\n commas. Items that are formatted X-Y will be expanded into each number\n from X to Y inclusive. If the string ends with +, then add a str '+'\n to the end of the list. Used in .github/jobs/get_use_case_commands.py\n\n @param int_string String containing a comma-separated list of integers\n @returns List of integers and potentially '+' as the last item\n \"\"\"\n subset_list = []\n\n # if string ends with +, remove it and add it back at the end\n if int_string.strip().endswith('+'):\n int_string = int_string.strip(' +')\n has_plus = True\n else:\n has_plus = False\n\n # separate into list by comma\n comma_list = int_string.split(',')\n for comma_item in comma_list:\n dash_list = comma_item.split('-')\n # if item contains X-Y, expand it\n if len(dash_list) == 2:\n for i in range(int(dash_list[0].strip()),\n int(dash_list[1].strip())+1,\n 1):\n subset_list.append(i)\n else:\n subset_list.append(int(comma_item.strip()))\n\n if has_plus:\n subset_list.append('+')\n\n return subset_list\n\n\ndef subset_list(full_list, subset_definition):\n \"\"\"! Extract subset of items from full_list based on subset_definition\n Used in internal/tests/use_cases/metplus_use_case_suite.py\n\n @param full_list List of all use cases that were requested\n @param subset_definition Defines how to subset the full list. If None,\n no subsetting occurs. If an integer value, select that index only.\n If a slice object, i.e. slice(2,4,1), pass slice object into list.\n If list, subset full list by integer index values in list. If\n last item in list is '+' then subset list up to 2nd last index, then\n get all items from 2nd last item and above\n \"\"\"\n if subset_definition is not None:\n subset_list = []\n\n # if case slice is a list, use only the indices in the list\n if isinstance(subset_definition, list):\n # if last slice value is a plus sign, get rest of items\n # after 2nd last slice value\n if subset_definition[-1] == '+':\n plus_value = subset_definition[-2]\n # add all values before last index before plus\n subset_list.extend([full_list[i]\n for i in subset_definition[:-2]])\n # add last index listed + all items above\n subset_list.extend(full_list[plus_value:])\n else:\n # list of integers, so get items based on indices\n subset_list = [full_list[i] for i in subset_definition]\n else:\n subset_list = full_list[subset_definition]\n else:\n subset_list = full_list\n\n # if only 1 item is left, make it a list before returning\n if not isinstance(subset_list, list):\n subset_list = [subset_list]\n\n return subset_list\n\n\ndef find_indices_in_config_section(regex, config, sec='config',\n index_index=1, id_index=None):\n \"\"\"! Use regular expression to get all config variables that match and\n are set in the user's configuration. This is used to handle config\n variables that have multiple indices, i.e. FCST_VAR1_NAME, FCST_VAR2_NAME,\n etc.\n\n @param regex regular expression to use to find variables\n @param config METplusConfig object to search\n @param sec (optional) config file section to search. Defaults to config\n @param index_index 1 based number that is the regex match index for the\n index number (default is 1)\n @param id_index 1 based number that is the regex match index for the\n identifier. Defaults to None which does not extract an identifier\n @returns dictionary where keys are the index number and the value is a\n list of identifiers (if id_index=None) or a list containing None\n \"\"\"\n # regex expression must have 2 () items and the 2nd item must be the index\n all_conf = config.keys(sec)\n indices = {}\n regex = re.compile(regex)\n for conf in all_conf:\n result = regex.match(conf)\n if result is None:\n continue\n\n index = result.group(index_index)\n if id_index:\n identifier = result.group(id_index)\n else:\n identifier = None\n\n if index not in indices:\n indices[index] = [identifier]\n else:\n indices[index].append(identifier)\n\n return indices\n\n\ndef get_logfile_info(config):\n \"\"\"!Get path to log file from LOG_METPLUS config variable or return a\n useful message if it is not set to instruct users how to set it.\n\n @param config METplusConfig object to read LOG_METPLUS from\n @returns path to log file or message if unset\n \"\"\"\n log_file = config.getstr('config', 'LOG_METPLUS', '')\n return log_file if log_file else 'Set LOG_METPLUS to write logs to a file'\n\n\ndef log_terminal_includes_info(config):\n \"\"\"!Check LOG_LEVEL_TERMINAL to see if it is set to a logging level that\n includes INFO output. Check [runtime] section if not found in [config]\n because the variable is moved at the end of the run.\n\n @param config METplusConfig object to query\n @returns True if log level is set to include INFO messages. False if not.\n \"\"\"\n log_terminal_level = logging.getLevelName(\n config.getstr('config', 'LOG_LEVEL_TERMINAL',\n config.getstr('runtime', 'LOG_LEVEL_TERMINAL'))\n )\n return log_terminal_level <= logging.INFO\n","sub_path":"metplus/util/string_manip.py","file_name":"string_manip.py","file_ext":"py","file_size_in_byte":20654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"526805712","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# (c) Shrimadhav U K\n\nfrom telethon import events\nimport asyncio\n\n\n@borg.on(events.NewMessage(pattern=r\"\\.typewriter (.*)\", outgoing=True))\nasync def _(event):\n if event.fwd_from:\n return\n # https://t.me/AnotherGroup/176551\n input_str = event.pattern_match.group(1)\n typing_symbol = \"|\"\n DELAY_BETWEEN_EDITS = 0.3\n previous_text = \"\"\n await event.edit(typing_symbol)\n await asyncio.sleep(DELAY_BETWEEN_EDITS)\n for character in input_str:\n previous_text = previous_text + \"\" + character\n typing_text = previous_text + \"\" + typing_symbol\n await event.edit(typing_text)\n await asyncio.sleep(DELAY_BETWEEN_EDITS)\n await event.edit(previous_text)\n await asyncio.sleep(DELAY_BETWEEN_EDITS)\n","sub_path":"stdplugins/typewriter.py","file_name":"typewriter.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"447919455","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime, timedelta\nfrom report.basic_quotes_pd import get_underlyer_quotes\nimport numpy as np\nimport pandas as pd\n\n\ndef process_underlyer_position(row):\n spot = row['underlyerPrice']\n multiplier = np.float64(row['underlyerInstrumentMultiplier'])\n row['underlyerNetPosition'] = np.float64(row['netPosition']) if row['instrumentId'] == row[\n 'underlyerInstrumentId'] else 0\n row['delta'] = 0 if row['instrumentId'] == row['underlyerInstrumentId'] else np.float64(row['delta']) / multiplier\n row['gamma'] = np.float64(row['gamma']) * spot / 100 / multiplier\n row['gammaCash'] = row['gamma'] * spot\n row['vega'] = np.float64(row['vega']) / 100\n row['theta'] = np.float64(row['theta']) / 365\n row['rho'] = np.float64(row['rhoR']) / 100\n return row\n\n\ndef eod_risk_by_underlyer_report(positions, underlyer_positions, domain, headers, pricing_environment):\n \"\"\"Return Eod risk collected by book and underlyer.\n\n positions: eod position report\n underlyer_positions: basic underlyer position report\"\"\"\n # find books and underlyers\n underlyer_positions_multiplier = underlyer_positions[['instrumentId', 'underlyerInstrumentMultiplier']]\n sub_underlyer_positions = underlyer_positions.apply(lambda row: process_underlyer_position(row), axis=1)[\n ['bookId', 'instrumentId', 'underlyerNetPosition', 'delta', 'gamma', 'gammaCash', 'vega', 'theta', 'rho']]\n sub_underlyer_positions.rename(columns={'bookId': 'bookName', 'instrumentId': 'underlyerInstrumentId'},\n inplace=True)\n sub_positions_df = positions[\n ['bookName', 'underlyerInstrumentId', 'delta', 'deltaDecay', 'deltaWithDecay', 'gamma', 'gammaCash', 'vega',\n 'theta', 'rho']]\n book_underlyers = pd.concat([sub_positions_df, sub_underlyer_positions], ignore_index=True, sort=False)\n risk_report_data = book_underlyers[book_underlyers.bookName == book_underlyers.bookName].fillna(0)\n\n underlyers = list(risk_report_data['underlyerInstrumentId'].dropna().unique())\n\n risk_report = risk_report_data.groupby(['bookName', 'underlyerInstrumentId']).sum().reset_index()\n\n quotes = get_underlyer_quotes(underlyers, datetime.now(), domain, headers)[['close']]\n quotes.reset_index(inplace=True)\n quotes.rename(columns={'instrumentId': 'underlyerInstrumentId', 'close': 'underlyerPrice'}, inplace=True)\n risk_report = risk_report.merge(quotes, on='underlyerInstrumentId', how='left')\n\n yst_quotes = get_underlyer_quotes(underlyers, datetime.now() - timedelta(days=1), domain, headers)[['close']]\n yst_quotes.reset_index(inplace=True)\n yst_quotes.rename(columns={'instrumentId': 'underlyerInstrumentId'}, inplace=True)\n risk_report = risk_report.merge(yst_quotes, on='underlyerInstrumentId', how='left')\n risk_report['underlyerPriceChangePercent'] = (np.float64(risk_report['underlyerPrice']) - np.float64(\n risk_report['close'])) / np.float64(risk_report['underlyerPrice'])\n\n risk_report['netDelta'] = np.float64(risk_report['underlyerNetPosition']) + np.float64(risk_report['delta'])\n risk_report['pricingEnvironment'] = pricing_environment\n\n # deltaCash\n underlyer_positions_multiplier.columns = ['underlyerInstrumentId', 'underlyerMultiplier']\n positions_multiplier = positions[['underlyerInstrumentId', 'underlyerMultiplier']]\n multiplier = pd.concat([underlyer_positions_multiplier, positions_multiplier], ignore_index=True, sort=False)\n multiplier.drop_duplicates(inplace=True)\n risk_report = risk_report.merge(multiplier, on='underlyerInstrumentId')\n risk_report['deltaCash'] = np.float64(risk_report['netDelta']) * np.float64(risk_report['underlyerPrice'])\n risk_report.drop(columns=['close', 'underlyerMultiplier'], inplace=True)\n risk_report.drop_duplicates(inplace=True)\n return list(risk_report.fillna(0).to_dict(orient='index').values())\n","sub_path":"scripts/airflow/report/eod/eod_risk_by_underlyer_report_pd.py","file_name":"eod_risk_by_underlyer_report_pd.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"358016959","text":"import re\nimport os\nimport json\nimport glob\n\n\n\nSCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))\nBASE_PATH = os.path.abspath(os.path.join(SCRIPT_PATH, \"..\"))\nBASE_URL = \" https://mklarz.github.io/etjenesten-cybertalent2020-winter-highscore/\"\n\nDATA_PATH = BASE_PATH + \"/data\"\n\nTEMPLATES_PATH = BASE_PATH + \"/templates\"\nINDEX_TEMPLATE_PATH = TEMPLATES_PATH + \"/index.html\"\nUSER_TEMPLATE_PATH = TEMPLATES_PATH + \"/user.html\"\nHIGHSCORE_TEMPLATE_PATH = TEMPLATES_PATH + \"/highscore.html\"\n\nHIGHSCORE_JSON_PATH = DATA_PATH + \"/highscore.min.json\"\nHIGHSCORE_HTML_PATH = BASE_PATH + \"/index.html\"\nHIGHSCORE_LIST_ITEM_HTML_FORMAT = '
  • {}{}
  • \\n'\n\nUSERS_PATH = BASE_PATH + \"/users/\"\nUSER_JSON_PATH = USERS_PATH + \"{}.json\"\nUSER_HTML_PATH = USERS_PATH + \"{}.html\"\nUSER_CATEGORY_HTML_FORMAT = '
  • {}{}%
  • \\n'\n\nUSERS = [user_file.replace(USERS_PATH, \"\").replace(\".json\", \"\") for user_file in glob.glob(USER_JSON_PATH.format(\"*\"))]\nprint(\"Found\", len(USERS), \"users.\")\n\n\n# Templates\nwith open(INDEX_TEMPLATE_PATH) as fd:\n index_template = fd.read()\n\nwith open(USER_TEMPLATE_PATH) as fd:\n user_template = fd.read()\n\nwith open(HIGHSCORE_TEMPLATE_PATH) as fd:\n highscore_template = fd.read()\n\n# Parse users\nfor user_id in USERS:\n print(\"Parsing user:\", user_id)\n with open(USER_JSON_PATH.format(user_id)) as fd:\n user_data = json.load(fd)\n \n position_str = \"{}. plass\".format(user_data[\"position\"]) if user_data[\"position\"] else \"Ukjent plassering\"\n\n user_html = user_template\n user_html = user_html.replace(\"{USER_NAME}\", user_data[\"name\"])\n user_html = user_html.replace(\"{USER_POINTS}\", str(user_data[\"points\"]))\n user_html = user_html.replace(\"{USER_POSITION}\", position_str)\n categories_html = \"\"\n for category_name, percent in user_data[\"categories\"].items():\n categories_html += USER_CATEGORY_HTML_FORMAT.format(category_name, percent)\n user_html = user_html.replace(\"{USER_CATEGORIES}\", categories_html.strip())\n\n with open(USER_HTML_PATH.format(user_id), \"w\") as fd:\n fd.write(user_html)\n\n\n# Generate highscore\n\nprint(\"Saving highscore HTML to {}\".format(HIGHSCORE_HTML_PATH))\nwith open(HIGHSCORE_JSON_PATH) as f:\n highscore_html = highscore_template\n highscore_users_html = \"\"\n highscore = json.load(f)\n for user in highscore:\n highscore_users_html += HIGHSCORE_LIST_ITEM_HTML_FORMAT.format(\n user[\"user_id\"],\n user[\"name\"],\n user[\"points\"],\n )\n highscore_html = highscore_html.replace(\"{USERS}\", highscore_users_html)\n\n with open(HIGHSCORE_HTML_PATH, \"w\") as fd:\n fd.write(highscore_html)\n","sub_path":"scripts/generate_html.py","file_name":"generate_html.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"588999300","text":"#encoding:UTF-8\n#Autor: Lenin Silva Gutiérrez, A01373214\n#Escribe en números romanos\n\nfrom math import fabs #Para poder obtener el valor absoluto de números\n\ndef ponerTres(a): #Pone 'I' el número que veces que se le indique\n num=a*\"I\"\n return num #Regresa un string\n \ndef ponerCinco(b): \n if (b-5)<0: #Condición para 4\n num=ponerTres(int(fabs(b-5)))+\"V\"\n return num\n num=\"V\"+ponerTres(b-5)#Condición a partir del cinco. Pone 'V' y el número de 'I' restantes con la función anterior\n return num #Regresa un string\n \ndef ponerDiez(c):\n if (c-10)<0: #Condición para 9\n num=ponerTres(int(fabs(c-10)))+\"X\"\n return num\n num=\"X\"+ponerTres(c-10)#Condición a partir del cinco. Pone 'X' y el número de 'I' restantes con ponerTres()\n return num #Regresa un string\n\ndef main():\n numero=int(input(\"Valor entre 1 y 13\"))\n while numero<1 or numero>13: #Loop para que sólo sea posible poner valores dentro del rango\n print (\"Escoja un valor entre 1 y 13\")\n numero=int(input(\"Valor entre 1 y 13\"))\n \n #Determina qué función se debe aplicar según el valor del número \n if numero>=9: \n numero_romano=ponerDiez(numero)\n elif numero>=4:\n numero_romano=ponerCinco(numero)\n else:\n numero_romano=ponerTres(numero)\n \n print (\"El numero %d en romano es '%s'\"%(numero, numero_romano)) #Imprime el número en romano\n \nmain()\n ","sub_path":"romanos.py","file_name":"romanos.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"143761315","text":"\"\"\"fibonacci with memoization.\"\"\"\n\nfrom time import time\n\ncache = {}\ndef fib(n, usecache=False):\n if usecache and n in cache: return cache[n]\n res = 0\n if n <= 1: res = n\n else: res = fib(n-1, usecache) + fib(n-2, usecache)\n cache[n] = res\n return res\n\n# for n in (20,30,33):\n# t0 = time()\n# z = fib(n)\n# t1 = round(time()-t0, 1)\n# print(f' fib.{n}={z} no cache: {t1}secs', flush=1)\n# t0 = time()\n# z = fib(n, usecache=True)\n# t1 = round(time()-t0, 1)\n# print(f' fib.{n}={z} w/ cache: {t1}secs', flush=1)\n\nprint( fib(100, usecache=1) )\n","sub_path":"memoize-fib.py","file_name":"memoize-fib.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"389198380","text":"from ase.build import molecule\nfrom dscribe.descriptors import CoulombMatrix\n\n# Define atomic structures\nsamples_mol = [molecule(\"H2O\"), molecule(\"NO2\"), molecule(\"CO2\")]\n\n# Setup descriptor\ncm_desc = CoulombMatrix(\n n_atoms_max=3,\n permutation=\"sorted_l2\")\n\n# Create descriptor\nwater = samples_mol[0]\ncoulomb_matrix = cm_desc.create(water)\n\nprint(\"Coulomb matrix for water:\\n\",coulomb_matrix)\n\n# Create multiple descriptors\ncoulomb_matrices = cm_desc.create(samples_mol)\n\nprint(\"List of Coulomb matrices:\\n\", coulomb_matrices)\n","sub_path":"dscribe/examples/simple_coulombmatrix.py","file_name":"simple_coulombmatrix.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"348249175","text":"# -*-coding:utf-8 -*-\n# __author__='Yan'\n# function: CNN +1*1 kernel\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow.contrib import layers\nimport numpy as np\nimport os\nimport datetime\nfrom sklearn import cross_validation\n\n# choose gpu\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n\n# add layers\ndef add_layer(inputs, n_features, n_labels, n_layer, activation=None):\n \"\"\"\n Return TensorFlow weights\n :param n_features: Number of features\n :param n_labels: Number of labels\n :return: TensorFlow hidden layer\n \"\"\"\n layer_name = \"layer%d\" % n_layer\n regularizer = layers.l2_regularizer(beta)\n # TODO: Return hidden layer\n with tf.name_scope(layer_name):\n with tf.name_scope('weights%d' % n_layer):\n W = tf.Variable(tf.truncated_normal([n_features, n_labels], stddev=(2.0 / n_features)),name='weights%d' % n_layer) # Weight中都是随机变量\n tf.add_to_collection(tf.GraphKeys.WEIGHTS, W)\n tf.summary.histogram(layer_name + \"/weights\", W) # 可视化观看变量\n with tf.name_scope('biases%d' % n_layer):\n b = tf.Variable(tf.zeros([n_labels]),name='biases%d' % n_layer) # biases推荐初始值不为0\n tf.summary.histogram(layer_name + \"/biases\", b) # 可视化观看变量\n with tf.name_scope('hidden%d' % n_layer):\n h = tf.add(tf.matmul(inputs, W), b,name='hidden%d' % n_layer) # inputs*Weight+biases\n tf.summary.histogram(layer_name + \"/hidden\", h) # 可视化观看变量\n if activation is None:\n outputs = h\n elif activation == 'relu':\n outputs = tf.nn.relu(h)\n outputs = tf.nn.dropout(outputs, keep_prob=keep_prob)\n tf.summary.histogram(layer_name + \"/outputs\", outputs) # 可视化观看变量\n return outputs\n\n\n# transform to one-hot coding\ndef dense_to_one_hot(labels_dense, num_classes):\n \"\"\"Convert class labels from scalars to one-hot vectors.\"\"\"\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot\n\n\n# data to batch\ndef batch_iter(sourceData, batch_size, num_epochs, shuffle=True):\n # data = np.array(sourceData) # 将sourceData转换为array存储\n data_size = len(sourceData)\n num_batches_per_epoch = int(len(sourceData) / batch_size) + 1\n for epoch in range(num_epochs):\n # Shuffle the data at each epoch\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = sourceData[shuffle_indices]\n else:\n shuffled_data = sourceData\n\n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n\n yield shuffled_data[start_index:end_index]\n\n\n\"\"\"def bn(x, is_training):\n x_shape = x.get_shape()\n params_shape = x_shape[-1:]\n\n axis = list(range(len(x_shape) - 1))\n\n beta = _get_variable('beta', params_shape, initializer=tf.zeros_initializer())\n gamma = _get_variable('gamma', params_shape, initializer=tf.ones_initializer())\n\n moving_mean = _get_variable('moving_mean', params_shape, initializer=tf.zeros_initializer(), trainable=False)\n moving_variance = _get_variable('moving_variance', params_shape, initializer=tf.ones_initializer(), trainable=False)\n\n # These ops will only be preformed when training.\n mean, variance = tf.nn.moments(x, axis)\n update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, BN_DECAY)\n update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, BN_DECAY)\n tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)\n tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)\n\n mean, variance = control_flow_ops.cond(\n is_training, lambda: (mean, variance),\n lambda: (moving_mean, moving_variance))\n\n return tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON)\n\"\"\"\n\n# time record\nstarttime = datetime.datetime.now()\n\n# data path\ntrainData_tmp = np.loadtxt('data/data223_train.txt', delimiter=' ', dtype=np.float16)\n# trainData_tmp = np.loadtxt('data/data223_train.txt', delimiter=' ', dtype=np.float16)\ntrainData = trainData_tmp\ntestData_tmp = np.loadtxt('data/data223_test.txt', delimiter=' ', dtype=np.float16)\ntestData = testData_tmp\n\n# show the input data\n# print('---input data---')\n# print(trainData)\n# print(trainData.shape)\n# x_data = np.array(trainData[:, 0:-1])\n# y_data = np.array(trainData[:, -1]).astype(np.int32) # .astype(np.int64)\n# data = batch_iter(trainData,50,1)\n\n\"\"\"\n# queue\nq = tf.FIFOQueue(capacity=5, dtypes=tf.float32) # enqueue 5 batches\n# We use the \"enqueue\" operation so 1 element of the queue is the full batch\nenqueue_op = q.enqueue(x_data)\nnumberOfThreads = 1\nqr = tf.train.QueueRunner(q, [enqueue_op] * numberOfThreads)\ntf.train.add_queue_runner(qr)\nx = q.dequeue() # It replaces our input placeholder\nprint(x)\n\n\nbatch_size = 50\nmini_after_dequeue = 1000\ncapacity = mini_after_dequeue+3*batch_size\n\nexample_batch, label_batch = tf.train.shuffle_batch(\n tensors=[x_data, y_data],\n batch_size=batch_size,\n capacity=capacity,\n min_after_dequeue=mini_after_dequeue\n)\n\n#batch_size = 4\n#mini_after_dequeue = 100\n#capacity = mini_after_dequeue+3*batch_size\n\n#example_batch,label_batch = tf.train.batch([image,label],batch_size = batch_size,capacity=capacity)\n\"\"\"\n\n# global config\nlearning_rate = 0.0001\nbeta = 0.001\nkeep_prob = 0.8\nnum_epochs = 101\ntest_epochs = 100\nbatch_size = 50\n\ndata_size = len(trainData)\nnum_batches_per_epoch = int(data_size / batch_size)\nmodel_save_name = '20171208_v401_tmp'\nmodel_restore_path = \"save/20171207_v401_300.ckpt\"\n\n# input placeholder\nwith tf.name_scope('inputs'):\n x = tf.placeholder(tf.float32, shape=[None, 223], name='x')\n y_ = tf.placeholder(tf.int32, shape=[None], name='y_')\n\n# weights & bias for nn layerscorrect_prediction\nlayer1 = add_layer(x, 223, 1000, 1, 'relu')\nlayer2 = add_layer(layer1, 1000, 1000, 2, 'relu')\n# layer3 = add_layer(layer2, 1000, 1000, 3, 'relu')\n# output = add_layer(layer3, 1000, 35, 4)\noutput = add_layer(layer2, 1000, 35, 3)\n\n\nconv1 = tf.layers.conv2d(\n inputs=x,\n filters=32,\n kernel_size=[1, 1],\n padding='same',\n activation=tf.nn.relu)\n\nconv2 = tf.layers.conv2d(\n inputs=x,\n filters=32,\n kernel_size=[1, 1],\n padding='same',\n activation=tf.nn.relu)\n\ndense = tf.layers.dense(inputs=conv2, units=1000,\n activation=tf.nn.relu)\n\n\n\n# tf.add_to_collection(tf.GraphKeys.WEIGHTS, W_2)\n# tf.add_to_collection(tf.GraphKeys.WEIGHTS, W_3)\n# reg_term = tf.contrib.layers.apply_regularization(regularizer)\n# loss = (tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y_,logits=z_3)) + reg_term)\n\n# Regularization L2\nL1 = tf.nn.softmax(output)\n\nregularizer = tf.contrib.layers.l2_regularizer(beta)\nreg_term = tf.contrib.layers.apply_regularization(regularizer)\n\n# optimizer paramaters\ncross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_, logits=output) + reg_term\nwith tf.name_scope('train'):\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy)\ncorrect_prediction = tf.equal(tf.argmax(L1, 1), tf.cast(y_, tf.int64))\n# correct_prediction = tf.equal(tf.argmax(L1,1), tf.argmax(y_,1))\n\nwith tf.name_scope('loss'):\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n tf.summary.scalar('loss', accuracy)\n# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n\n# save model\nsaver = tf.train.Saver()\nprint('---training---')\n\n\ndef train_test_model(train=True, show=False, continue_trian = False):\n if train:\n with tf.Session() as sess:\n # 合并到Summary中\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter(\"graph/\", sess.graph)\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n for epoch in range(num_epochs):\n # print('epoch:', epoch)\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = trainData[shuffle_indices]\n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n data_batch = shuffled_data[start_index:end_index]\n x_data = np.array(data_batch[:, 0:-1])\n y_data = np.array(data_batch[:, -1]).astype(np.int32) # .astype(np.int64)\n feed = {x: x_data, y_: y_data}\n optimizer.run(feed_dict=feed)\n train_accuracy = accuracy.eval(feed_dict=feed)\n result = sess.run(merged, feed_dict=feed) # merged也是需要run的\n writer.add_summary(result, epoch) # result是summary类型的,需要放入writer中,i步数(x轴)\n if epoch % 20 == 0:\n saver_path = saver.save(sess, 'save/' + model_save_name + '_%d.ckpt' % epoch)\n print(\"epoch %04d | training_accuracy %.6f\" % (epoch, train_accuracy))\n print('-----Testing-----')\n x_data = np.array(testData[:, 0:-1])\n y_data = np.array(testData[:, -1]).astype(np.int32) # .astype(np.int64)\n feed = {x: x_data, y_: y_data}\n test_num = x_data.shape[0]\n test_accuracy = accuracy.eval(feed_dict=feed)\n print(\"test_number %04d | testing_accuracy %.9f\" % (test_num, test_accuracy))\n print('-+---------------------------+-')\n print(\"Model saved in file:\", saver_path)\n\n else:\n with tf.Session() as sess:\n saver.restore(sess, model_restore_path)\n print('model restore !')\n print('-----Testing-----')\n x_data = np.array(testData[:, 0:-1])\n y_data = np.array(testData[:, -1]).astype(np.int32) # .astype(np.int64)\n feed = {x: x_data, y_: y_data}\n test_num = x_data.shape[0]\n test_accuracy = accuracy.eval(feed_dict=feed)\n print(\"test_samples %04d | testing_accuracy %.9f\" % (test_num, test_accuracy))\n\n if show:\n with tf.Session() as sess:\n saver.restore(sess, model_restore_path)\n print('model restore !')\n # data input\n x_data = np.array(testData[:, 0:-1])\n y_data = np.array(testData[:, -1]).astype(np.int32) # .astype(np.int64)\n for step_test in range(test_epochs):\n test_data = x_data[step_test]\n test_label = y_data[step_test]\n # print(test_label.shape)\n ret = sess.run(L1, feed_dict={x: test_data.reshape(1, 223)})\n print('---')\n print('hyperthesis:%d | ' % (ret.argmax())+'true Y:%d' % (test_label))\n\n if continue_trian:\n with tf.Session() as sess:\n saver.restore(sess, model_restore_path)\n print('model restore !')\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter(\"graph/\", sess.graph)\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n for epoch in range(num_epochs):\n # print('epoch:', epoch)\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = trainData[shuffle_indices]\n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n data_batch = shuffled_data[start_index:end_index]\n x_data = np.array(data_batch[:, 0:-1])\n y_data = np.array(data_batch[:, -1]).astype(np.int32) # .astype(np.int64)\n feed = {x: x_data, y_: y_data}\n optimizer.run(feed_dict=feed)\n train_accuracy = accuracy.eval(feed_dict=feed)\n result = sess.run(merged, feed_dict=feed) # merged也是需要run的\n writer.add_summary(result, epoch) # result是summary类型的,需要放入writer中,i步数(x轴)\n if epoch % 20 == 0:\n saver_path = saver.save(sess, 'save/' + model_save_name + '_continue_%d.ckpt' % epoch)\n print(\"epoch %04d | training_accuracy %.6f\" % (epoch, train_accuracy))\n print('-----Testing-----')\n x_data = np.array(testData[:, 0:-1])\n y_data = np.array(testData[:, -1]).astype(np.int32) # .astype(np.int64)\n feed = {x: x_data, y_: y_data}\n test_num = x_data.shape[0]\n test_accuracy = accuracy.eval(feed_dict=feed)\n print(\"test_number %04d | testing_accuracy %.9f\" % (test_num, test_accuracy))\n print('-+---------------------------+-')\n print(\"Model saved in file:\", saver_path)\n\n\n\nif __name__ == '__main__':\n train_test_model()\n endtime = datetime.datetime.now()\n print(endtime - starttime)","sub_path":"train_test_single_model_cnn_v0.py","file_name":"train_test_single_model_cnn_v0.py","file_ext":"py","file_size_in_byte":13445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"337944199","text":"import socket\n\ndef server():\n proto = socket.getprotobyname('tcp') # [1]\n serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM, proto)\n\n serv.bind((\"localhost\", 2222)) # [2]\n serv.listen(1) # [3]\n return serv\n\nserv = server()\n\nwhile 1:\n conn, addr = serv.accept() # [4]\n while 1:\n message = conn.recv(64) # [5]\n if message:\n conn.send('Hi, I am a server, I received: ' + message)\n else:\n break\n conn.close()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"165870172","text":"class Node():\n def __init__(self,values):\n self.values = values\n self.rchild = None\n self.lchild = None\n\nclass Tree():\n def __init__(self):\n self.root = None\n def add(self,values):\n node = Node(values)\n if self.root == None:\n self.root = node\n return\n queue = [self.root]\n\n\n while queue:\n c_node = queue.pop(0)\n if c_node.lchild is None:\n c_node.lchild = node\n return\n else:\n queue.append(c_node.lchild)\n if c_node.rchild is None:\n c_node.rchild = node\n return\n else:\n queue.append(c_node.rchild)\n def with_drive(self):\n if self.root is None:\n return\n queue = [self.root]\n\n while queue:\n c_node = queue.pop(0)\n print(c_node.values)\n if c_node.lchild is not None:\n queue.append(c_node.lchild)\n if c_node.rchild is not None:\n queue.append((c_node.rchild))\n def xianxu(self,node):\n if node ==None:\n return\n print(node.values)\n self.xianxu(node.lchild)\n self.xianxu(node.rchild)\n def zhongxu(self,node):\n if node == None:\n return\n self.zhongxu(node.lchild)\n print(node.values)\n self.zhongxu(node.rchild)\n def houxu(self,node):\n if node == None:\n return\n self.houxu(node.lchild)\n self.houxu(node.rchild)\n print(node.values)\n\n\n\nif __name__ ==\"__main__\":\n tree = Tree()\n tree.add(1)\n tree.add(2)\n tree.add(3)\n tree.add(4)\n tree.add(5)\n tree.houxu(tree.root)\n","sub_path":"python/测试啊啊.py","file_name":"测试啊啊.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"459767002","text":"import match_api\n\n\ndef crawl_matches(url):\n \"\"\"Crawl data of all accessible matches from Steam Web API.\n\n :param url: match history base URL\n :return: list of match ids\n \"\"\"\n last_match_id = 0\n match_ids = set()\n\n # crawling loop\n while True:\n\n # add starting match id to the URL if it is not first iteration\n if last_match_id == 0:\n crawling_url = url\n else:\n crawling_url = url + '&start_at_match_id=' + str(last_match_id)\n\n # crawl\n print('Crawling data from', crawling_url)\n data = match_api.crawl(crawling_url)\n print(len(data[\"result\"][\"matches\"]), data[\"result\"][\"matches\"])\n\n # test crawled matches data\n if len(data[\"result\"][\"matches\"]) > 0:\n\n # parse match ids\n for m_data in data[\"result\"][\"matches\"]:\n last_match_id = m_data[\"match_id\"]\n match_ids.add(last_match_id)\n\n else:\n # stop crawling if there are no results\n break\n\n return match_ids\n\n\ndef save_matches(db, match_ids):\n \"\"\"Save match ids to the database.\n\n :param db: database connection\n :param match_ids: list of match ids\n \"\"\"\n cur = db.cursor()\n\n if len(match_ids) > 0:\n query = \"INSERT OR IGNORE INTO matches (id, processed, invalid) VALUES \"\n\n for match_id in match_ids:\n query += \"(\" + str(match_id) + \", 0, 0), \"\n\n query = query[:-2] + \";\"\n\n print(query)\n cur.execute(query)\n db.commit()\n","sub_path":"src/crawler/match_history_crawler.py","file_name":"match_history_crawler.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"336205730","text":"import json\n\nfrom returns.pipeline import is_successful\nfrom returns.result import Success\n\nfrom p360_contact_manager.settings import LoadSettings, StoreSettings\n\n\ndef test_loading_settings(mocker):\n expected = {'a': '1', 'b': '1'}\n read_patch = mocker.patch(\n 'p360_contact_manager.common.ReadLocalFile.__call__',\n )\n read_patch.return_value = Success(json.dumps(expected))\n loaded = LoadSettings('tests/settings.json')()\n assert is_successful(loaded)\n assert loaded.unwrap() == expected\n\n\ndef test_non_existing_file_returns_empty_dict():\n loaded = LoadSettings('tests/does_not_exist.json')()\n assert is_successful(loaded)\n assert not len(loaded.unwrap())\n\n\ndef test_storing_settings(mocker):\n read_patch = mocker.patch(\n 'p360_contact_manager.common.ReadLocalFile.__call__',\n )\n read_patch.return_value = Success(json.dumps({'a': '1', 'b': '1'}))\n write_patch = mocker.patch(\n 'p360_contact_manager.common.WriteLocalFile.__call__',\n )\n expected = json.dumps({'a': '1', 'b': '2'})\n\n StoreSettings()({'b': '2'})\n\n write_patch.assert_called_once_with(\n expected,\n file_path='settings.json',\n )\n","sub_path":"tests/test_settings.py","file_name":"test_settings.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"19934102","text":"import numpy as np\nimport pytest\n\nimport xarray as xr\nfrom xarray import DataArray\nfrom xarray.tests import assert_allclose, assert_equal, raises_regex\n\n\n@pytest.mark.parametrize(\"as_dataset\", (True, False))\ndef test_weighted_non_DataArray_weights(as_dataset):\n\n data = DataArray([1, 2])\n if as_dataset:\n data = data.to_dataset(name=\"data\")\n\n with raises_regex(ValueError, \"`weights` must be a DataArray\"):\n data.weighted([1, 2])\n\n\n@pytest.mark.parametrize(\"as_dataset\", (True, False))\n@pytest.mark.parametrize(\"weights\", ([np.nan, 2], [np.nan, np.nan]))\ndef test_weighted_weights_nan_raises(as_dataset, weights):\n\n data = DataArray([1, 2])\n if as_dataset:\n data = data.to_dataset(name=\"data\")\n\n with pytest.raises(ValueError, match=\"`weights` cannot contain missing values.\"):\n data.weighted(DataArray(weights))\n\n\n@pytest.mark.parametrize(\n (\"weights\", \"expected\"),\n (([1, 2], 3), ([2, 0], 2), ([0, 0], np.nan), ([-1, 1], np.nan)),\n)\ndef test_weighted_sum_of_weights_no_nan(weights, expected):\n\n da = DataArray([1, 2])\n weights = DataArray(weights)\n result = da.weighted(weights).sum_of_weights()\n\n expected = DataArray(expected)\n\n assert_equal(expected, result)\n\n\n@pytest.mark.parametrize(\n (\"weights\", \"expected\"),\n (([1, 2], 2), ([2, 0], np.nan), ([0, 0], np.nan), ([-1, 1], 1)),\n)\ndef test_weighted_sum_of_weights_nan(weights, expected):\n\n da = DataArray([np.nan, 2])\n weights = DataArray(weights)\n result = da.weighted(weights).sum_of_weights()\n\n expected = DataArray(expected)\n\n assert_equal(expected, result)\n\n\n@pytest.mark.parametrize(\"da\", ([1.0, 2], [1, np.nan], [np.nan, np.nan]))\n@pytest.mark.parametrize(\"factor\", [0, 1, 3.14])\n@pytest.mark.parametrize(\"skipna\", (True, False))\ndef test_weighted_sum_equal_weights(da, factor, skipna):\n # if all weights are 'f'; weighted sum is f times the ordinary sum\n\n da = DataArray(da)\n weights = xr.full_like(da, factor)\n\n expected = da.sum(skipna=skipna) * factor\n result = da.weighted(weights).sum(skipna=skipna)\n\n assert_equal(expected, result)\n\n\n@pytest.mark.parametrize(\n (\"weights\", \"expected\"), (([1, 2], 5), ([0, 2], 4), ([0, 0], 0))\n)\ndef test_weighted_sum_no_nan(weights, expected):\n\n da = DataArray([1, 2])\n\n weights = DataArray(weights)\n result = da.weighted(weights).sum()\n expected = DataArray(expected)\n\n assert_equal(expected, result)\n\n\n@pytest.mark.parametrize(\n (\"weights\", \"expected\"), (([1, 2], 4), ([0, 2], 4), ([1, 0], 0), ([0, 0], 0))\n)\n@pytest.mark.parametrize(\"skipna\", (True, False))\ndef test_weighted_sum_nan(weights, expected, skipna):\n\n da = DataArray([np.nan, 2])\n\n weights = DataArray(weights)\n result = da.weighted(weights).sum(skipna=skipna)\n\n if skipna:\n expected = DataArray(expected)\n else:\n expected = DataArray(np.nan)\n\n assert_equal(expected, result)\n\n\n@pytest.mark.filterwarnings(\"ignore:Mean of empty slice\")\n@pytest.mark.parametrize(\"da\", ([1.0, 2], [1, np.nan], [np.nan, np.nan]))\n@pytest.mark.parametrize(\"skipna\", (True, False))\n@pytest.mark.parametrize(\"factor\", [1, 2, 3.14])\ndef test_weighted_mean_equal_weights(da, skipna, factor):\n # if all weights are equal (!= 0), should yield the same result as mean\n\n da = DataArray(da)\n\n # all weights as 1.\n weights = xr.full_like(da, factor)\n\n expected = da.mean(skipna=skipna)\n result = da.weighted(weights).mean(skipna=skipna)\n\n assert_equal(expected, result)\n\n\n@pytest.mark.parametrize(\n (\"weights\", \"expected\"), (([4, 6], 1.6), ([1, 0], 1.0), ([0, 0], np.nan))\n)\ndef test_weighted_mean_no_nan(weights, expected):\n\n da = DataArray([1, 2])\n weights = DataArray(weights)\n expected = DataArray(expected)\n\n result = da.weighted(weights).mean()\n\n assert_equal(expected, result)\n\n\n@pytest.mark.parametrize(\n (\"weights\", \"expected\"), (([4, 6], 2.0), ([1, 0], np.nan), ([0, 0], np.nan))\n)\n@pytest.mark.parametrize(\"skipna\", (True, False))\ndef test_weighted_mean_nan(weights, expected, skipna):\n\n da = DataArray([np.nan, 2])\n weights = DataArray(weights)\n\n if skipna:\n expected = DataArray(expected)\n else:\n expected = DataArray(np.nan)\n\n result = da.weighted(weights).mean(skipna=skipna)\n\n assert_equal(expected, result)\n\n\ndef expected_weighted(da, weights, dim, skipna, operation):\n \"\"\"\n Generate expected result using ``*`` and ``sum``. This is checked against\n the result of da.weighted which uses ``dot``\n \"\"\"\n\n weighted_sum = (da * weights).sum(dim=dim, skipna=skipna)\n\n if operation == \"sum\":\n return weighted_sum\n\n masked_weights = weights.where(da.notnull())\n sum_of_weights = masked_weights.sum(dim=dim, skipna=True)\n valid_weights = sum_of_weights != 0\n sum_of_weights = sum_of_weights.where(valid_weights)\n\n if operation == \"sum_of_weights\":\n return sum_of_weights\n\n weighted_mean = weighted_sum / sum_of_weights\n\n if operation == \"mean\":\n return weighted_mean\n\n\n@pytest.mark.parametrize(\"dim\", (\"a\", \"b\", \"c\", (\"a\", \"b\"), (\"a\", \"b\", \"c\"), None))\n@pytest.mark.parametrize(\"operation\", (\"sum_of_weights\", \"sum\", \"mean\"))\n@pytest.mark.parametrize(\"add_nans\", (True, False))\n@pytest.mark.parametrize(\"skipna\", (None, True, False))\n@pytest.mark.parametrize(\"as_dataset\", (True, False))\ndef test_weighted_operations_3D(dim, operation, add_nans, skipna, as_dataset):\n\n dims = (\"a\", \"b\", \"c\")\n coords = dict(a=[0, 1, 2, 3], b=[0, 1, 2, 3], c=[0, 1, 2, 3])\n\n weights = DataArray(np.random.randn(4, 4, 4), dims=dims, coords=coords)\n\n data = np.random.randn(4, 4, 4)\n\n # add approximately 25 % NaNs (https://stackoverflow.com/a/32182680/3010700)\n if add_nans:\n c = int(data.size * 0.25)\n data.ravel()[np.random.choice(data.size, c, replace=False)] = np.NaN\n\n data = DataArray(data, dims=dims, coords=coords)\n\n if as_dataset:\n data = data.to_dataset(name=\"data\")\n\n if operation == \"sum_of_weights\":\n result = data.weighted(weights).sum_of_weights(dim)\n else:\n result = getattr(data.weighted(weights), operation)(dim, skipna=skipna)\n\n expected = expected_weighted(data, weights, dim, skipna, operation)\n\n assert_allclose(expected, result)\n\n\n@pytest.mark.parametrize(\"operation\", (\"sum_of_weights\", \"sum\", \"mean\"))\n@pytest.mark.parametrize(\"as_dataset\", (True, False))\ndef test_weighted_operations_nonequal_coords(operation, as_dataset):\n\n weights = DataArray(np.random.randn(4), dims=(\"a\",), coords=dict(a=[0, 1, 2, 3]))\n data = DataArray(np.random.randn(4), dims=(\"a\",), coords=dict(a=[1, 2, 3, 4]))\n\n if as_dataset:\n data = data.to_dataset(name=\"data\")\n\n expected = expected_weighted(\n data, weights, dim=\"a\", skipna=None, operation=operation\n )\n result = getattr(data.weighted(weights), operation)(dim=\"a\")\n\n assert_allclose(expected, result)\n\n\n@pytest.mark.parametrize(\"dim\", (\"dim_0\", None))\n@pytest.mark.parametrize(\"shape_data\", ((4,), (4, 4), (4, 4, 4)))\n@pytest.mark.parametrize(\"shape_weights\", ((4,), (4, 4), (4, 4, 4)))\n@pytest.mark.parametrize(\"operation\", (\"sum_of_weights\", \"sum\", \"mean\"))\n@pytest.mark.parametrize(\"add_nans\", (True, False))\n@pytest.mark.parametrize(\"skipna\", (None, True, False))\n@pytest.mark.parametrize(\"as_dataset\", (True, False))\ndef test_weighted_operations_different_shapes(\n dim, shape_data, shape_weights, operation, add_nans, skipna, as_dataset\n):\n\n weights = DataArray(np.random.randn(*shape_weights))\n\n data = np.random.randn(*shape_data)\n\n # add approximately 25 % NaNs\n if add_nans:\n c = int(data.size * 0.25)\n data.ravel()[np.random.choice(data.size, c, replace=False)] = np.NaN\n\n data = DataArray(data)\n\n if as_dataset:\n data = data.to_dataset(name=\"data\")\n\n if operation == \"sum_of_weights\":\n result = getattr(data.weighted(weights), operation)(dim)\n else:\n result = getattr(data.weighted(weights), operation)(dim, skipna=skipna)\n\n expected = expected_weighted(data, weights, dim, skipna, operation)\n\n assert_allclose(expected, result)\n\n\n@pytest.mark.parametrize(\"operation\", (\"sum_of_weights\", \"sum\", \"mean\"))\n@pytest.mark.parametrize(\"as_dataset\", (True, False))\n@pytest.mark.parametrize(\"keep_attrs\", (True, False, None))\ndef test_weighted_operations_keep_attr(operation, as_dataset, keep_attrs):\n\n weights = DataArray(np.random.randn(2, 2), attrs=dict(attr=\"weights\"))\n data = DataArray(np.random.randn(2, 2))\n\n if as_dataset:\n data = data.to_dataset(name=\"data\")\n\n data.attrs = dict(attr=\"weights\")\n\n result = getattr(data.weighted(weights), operation)(keep_attrs=True)\n\n if operation == \"sum_of_weights\":\n assert weights.attrs == result.attrs\n else:\n assert data.attrs == result.attrs\n\n result = getattr(data.weighted(weights), operation)(keep_attrs=None)\n assert not result.attrs\n\n result = getattr(data.weighted(weights), operation)(keep_attrs=False)\n assert not result.attrs\n\n\n@pytest.mark.xfail(reason=\"xr.Dataset.map does not copy attrs of DataArrays GH: 3595\")\n@pytest.mark.parametrize(\"operation\", (\"sum\", \"mean\"))\ndef test_weighted_operations_keep_attr_da_in_ds(operation):\n # GH #3595\n\n weights = DataArray(np.random.randn(2, 2))\n data = DataArray(np.random.randn(2, 2), attrs=dict(attr=\"data\"))\n data = data.to_dataset(name=\"a\")\n\n result = getattr(data.weighted(weights), operation)(keep_attrs=True)\n\n assert data.a.attrs == result.a.attrs\n","sub_path":"xarray/tests/test_weighted.py","file_name":"test_weighted.py","file_ext":"py","file_size_in_byte":9482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"266002168","text":"'''\r\nCreated on 28 Dec 2013\r\n\r\n@author: Nathan\r\n'''\r\nimport unittest\r\nimport shutil\r\nfrom Repo import * \r\nimport os\r\n\r\nfrom TestKit import TestData, TestSubs\r\nfrom MasterLogger import MasterLogger\r\n\r\nclass EnvironmentTest(unittest.TestCase):\r\n\r\n @classmethod\r\n def setUpClass(self):\r\n self.myRepo = TestSubs.deleteThenCreateFreshMasterRepo(TestData.testMasterRepoRoot, \"MasterRepo.EnvironmentTest\")\r\n\r\n\r\n def testCreateEnvironment(self):\r\n myEnv = Environment(self.myRepo.getRepoLocation(), \"MyTestEnv\")\r\n myEnv.createEnvironment()\r\n self.assertTrue(myEnv.exists(), \"Problem when creating a new Environment\")\r\n \r\n def testAddDeleteAppFromEnvironment(self):\r\n appName = \"TestApp\"\r\n appVer = \"1.2\"\r\n envName = \"MyTestEnv4Apps\"\r\n myEnv = Environment(self.myRepo.getRepoLocation(), envName)\r\n myEnv.createEnvironment()\r\n # Add the rest when Application has been changed!\r\n \r\n def testAddHostToEnv(self):\r\n pass\r\n \r\n\r\n\r\nif __name__ == \"__main__\":\r\n #import sys;sys.argv = ['', 'Test.testName']\r\n unittest.main()","sub_path":"src/Master/EnvironmentTest.py","file_name":"EnvironmentTest.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"255983417","text":"import math\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG, encoding='utf-8', filename='calculator.log')\n\nclass Calculator():\n \n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def add(self): \n return self.x + self.y\n\n def sub(self):\n return self.x - self.y\n\n def multiply(self):\n return self.x * self.y\n\n def divide(self):\n \n if (y == 0 or x == 0):\n return \"You can't divide by zero.\"\n else:\n return self.x / self.y \n\n def sqrt(self):\n return 'Square root of numer one: ',math.sqrt(x), 'Square root of number 2: ', math.sqrt(y)\n\nx = int(input(\"Enter first number: \"))\ny = int(input(\"Enter second number: \"))\n\nobj = Calculator(x, y)\nwhile True:\n print(\"===================================================\")\n print(\"######## Welcome To the Calculator Program!########\")\n print(\"===================================================\")\n def menu():\n b = ('1. Add \\n2. Sub \\n3. Multiply \\n4. Divide \\n5. SquareRoot \\n-1. To Quit') \n print(b)\n\n menu()\n print()\n choice = int(input('Please select one of the options above : ')) \n print(\"===================================================\")\n\n if choice == 1:\n print(\"Result: \",obj.add())\n elif choice == 2:\n print(\"Result: \",obj.sub())\n elif choice == 3:\n print(\"Result: \",obj.multiply()) \n elif choice == 4:\n print(\"Result: \",obj.divide())\n elif choice == 5:\n print(\"Result: \",obj.sqrt())\n elif choice == 0:\n print('Again try one of the following')\n elif choice == -1:\n print('You have exited the Calculator.')\n break\n else:\n print('Invalid option') \n print(\"===================================================\") \nprint()\n\n\nif __name__ == \"__main__\":\n Calculator","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"601400375","text":"from conf import *\nimport requests\nfrom token_create import create_token\nfrom measure import run\n\n\ndef get_user(token):\n KEYSTONE_URL='http://172.31.10.11:5000/v3/'\n url = KEYSTONE_URL + 'users/cbb082c086c44138ba55e5b05aebec85'\n headers = {'content-type': 'application/json', 'X-Auth-Token': token}\n r = requests.get(url, headers=headers)\n\ndef revoke_few_tokens():\n master_token = create_token()\n url = KEYSTONE_URL + 'auth/tokens'\n for i in range(100):\n headers = {'content-type': 'application/json',\n 'X-Auth-Token': master_token,\n 'X-Subject-Token': create_token()}\n r = requests.delete(url, headers=headers)\n \n\n@run\ndef test(token):\n get_user(token)\n\nif __name__=='__main__':\n for i in range(1):\n test(create_token())\n #revoke_few_tokens()\n","sub_path":"token_verify.py","file_name":"token_verify.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"426408638","text":"from django.db.models import (\n CharField,\n TextField,\n IntegerField,\n FloatField,\n EmailField,\n ForeignKey,\n FileField,\n DateTimeField,\n DateField,\n AutoField,\n BooleanField,\n ManyToManyField\n)\nfrom django.forms.widgets import (\n Textarea,\n NumberInput,\n EmailInput,\n Input,\n Select,\n TextInput,\n FileInput,\n DateTimeInput,\n DateInput,\n HiddenInput,\n CheckboxInput,\n CheckboxSelectMultiple,\n)\n\nimport random\nimport string\nimport csv\n\n\ndef generate_random_string(n):\n \"\"\"\n Generates a random string of length n\n :param n: Length of string\n :return: Random string\n \"\"\"\n return ''.join(random.choices(string.ascii_lowercase, k=n))\n\n\ndef unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):\n \"\"\"\n CSV reader for UTF-8 documents\n :param unicode_csv_data: Data of CSV\n :param dialect: Dialect of CSV\n :param kwargs: Other args\n :return:\n \"\"\"\n # csv.py doesn't do Unicode; encode temporarily as UTF-8:\n csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),\n dialect=dialect, **kwargs)\n for row in csv_reader:\n # decode UTF-8 back to Unicode, cell by cell:\n yield [str(cell, 'utf-8') for cell in row]\n\n\ndef utf_8_encoder(unicode_csv_data):\n \"\"\"\n UTF-8 Encoder\n :param unicode_csv_data:\n :return: Generator of UTF-8 encoding\n \"\"\"\n for line in unicode_csv_data:\n yield line.encode('utf-8')\n\n\ndef field_to_widget(field):\n if type(field) is CharField:\n if field.choices:\n return Select(attrs={\"class\": \"form-control\"})\n return TextInput(attrs={\"class\": \"form-control\", \"rows\": 1})\n if type(field) is TextField:\n return Textarea(attrs={\"class\": \"form-control\", \"rows\": 1})\n if type(field) is AutoField:\n return HiddenInput(attrs={\"class\": \"form-control\", \"rows\": 1})\n if type(field) is IntegerField or type(field) is FloatField:\n return NumberInput(attrs={\"class\": \"form-control\"})\n if type(field) is EmailField:\n return EmailInput(attrs={\"class\": \"form-control\"})\n if type(field) is ForeignKey:\n return Select(attrs={\"class\": \"form-control\"})\n if type(field) is ManyToManyField:\n return CheckboxSelectMultiple(attrs={\"class\": \"\"})\n if type(field) is BooleanField:\n return CheckboxInput(attrs={\"class\": \"form-control\"})\n if type(field) is FileField:\n return FileInput(attrs={\"class\": \"form-control\"})\n if type(field) is DateField:\n return DateInput(attrs={\n \"class\": \"form-control date\",\n \"type\": \"date\"\n })\n if type(field) is DateTimeField:\n return DateTimeInput(attrs={\"class\": \"form-control datetimepicker\"})\n\n return Input(attrs={\"class\": \"form-control\"})\n\n\ndef generate_bootstrap_widgets_for_all_fields(model):\n return {x.name: field_to_widget(x) for x in model._meta.get_fields()}\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"551573513","text":"import json\n\npracownicy = []\nzbior = {}\n\nczynnosc = input('Co chcesz zrobić? [d - dodaj, w - wypisz]')\n\nif czynnosc == 'd':\n imie = input(\"Imię: \")\n nazwisko = input(\"Nazwisko: \")\n rok_urodzenia = input(\"Rok urodzenia: \")\n pensja = input(\"Pensja: \")\n with open(\"pracownicy.json\") as f:\n pracownicy.append(nowy_pracownik)\n json.dump(pracownicy, f)\n\nelif czynnosc == 'w':\n with open(\"pracownicy.json\") as f:\n print(json.dump(f))\n\nelse:\n 'Tym tym tym - nie ma takiej czynności'\n\n\n","sub_path":"zjazd_do_bazy_4/stdlib(bibliotekastandardowa)_python/zadanie_1.py","file_name":"zadanie_1.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"75539369","text":"from logging import getLogger\nfrom typing import Dict, Optional, Union\n\nfrom aiogram.dispatcher.filters.state import State\nfrom aiogram.types import InlineKeyboardMarkup, Message, CallbackQuery, ParseMode\nfrom aiogram.utils.exceptions import MessageNotModified\n\nfrom aiogram_dialog.manager.protocols import DialogManager\nfrom .dialog import Dialog, DialogWindowProto, DataGetter\nfrom .manager.intent import DialogUpdateEvent\nfrom .widgets.action import Actionable\nfrom .widgets.input import BaseInput, MessageHandlerFunc\nfrom .widgets.kbd import Keyboard\nfrom .widgets.text import Text\nfrom .widgets.utils import ensure_widgets\n\nlogger = getLogger(__name__)\n\n\nclass Window(DialogWindowProto):\n def __init__(self,\n *widgets: Union[str, Text, Keyboard, MessageHandlerFunc, BaseInput],\n state: State,\n getter: DataGetter = None,\n parse_mode: ParseMode = None):\n self.text, self.keyboard, self.on_message = ensure_widgets(widgets)\n self.getter = getter\n self.state = state\n self.parse_mode = parse_mode\n\n async def render_text(self, data: Dict, manager: DialogManager) -> str:\n return await self.text.render_text(data, manager)\n\n async def render_kbd(self, data: Dict, manager: DialogManager) -> InlineKeyboardMarkup:\n return InlineKeyboardMarkup(\n inline_keyboard=await self.keyboard.render_keyboard(data, manager)\n )\n\n async def load_data(self, dialog: \"Dialog\", manager: DialogManager) -> Dict:\n if not self.getter:\n return {}\n return await self.getter(**manager.data)\n\n async def process_message(self, message: Message, dialog: Dialog, manager: DialogManager):\n if self.on_message:\n await self.on_message.process_message(message, dialog, manager)\n\n async def process_callback(self, c: CallbackQuery, dialog: Dialog, manager: DialogManager):\n if self.keyboard:\n await self.keyboard.process_callback(c, dialog, manager)\n\n async def show(self, dialog: Dialog, manager: DialogManager) -> Message:\n logger.debug(\"Show window: %s\", self)\n current_data = await self.load_data(dialog, manager)\n text = await self.render_text(current_data, manager)\n kbd = await self.render_kbd(current_data, manager)\n event = manager.event\n context = manager.context\n if isinstance(event, CallbackQuery):\n if text == event.message.text:\n if kbd != event.message.reply_markup:\n return await event.message.edit_reply_markup(reply_markup=kbd)\n else:\n return event.message\n else:\n return await event.message.edit_text(\n text=text, reply_markup=kbd, parse_mode=self.parse_mode\n )\n elif isinstance(event, DialogUpdateEvent):\n if context and context.last_message_id:\n try:\n return await event.bot.edit_message_text(\n message_id=context.last_message_id, chat_id=event.chat.id,\n text=text, reply_markup=kbd, parse_mode=self.parse_mode\n )\n except MessageNotModified:\n pass # nothing to update\n else:\n if context and context.last_message_id:\n try:\n await manager.event.bot.edit_message_reply_markup(\n message_id=context.last_message_id, chat_id=manager.event.chat.id\n )\n except MessageNotModified:\n pass # nothing to remove\n return await manager.event.bot.send_message(\n chat_id=event.chat.id, text=text, reply_markup=kbd, parse_mode=self.parse_mode\n )\n\n def get_state(self) -> State:\n return self.state\n\n def find(self, widget_id) -> Optional[Actionable]:\n if self.keyboard:\n res = self.keyboard.find(widget_id)\n if res:\n return res\n if self.on_message:\n return self.on_message.find(widget_id)\n return None\n\n def __repr__(self):\n return f\"<{self.__class__.__qualname__}({self.state})>\"\n","sub_path":"aiogram_dialog/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":4255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"387124180","text":"name = input(\"请输入名字:\")\nage = input(\"请输入年龄:\")\nsex = input(\"请输入性别:\")\nheight = input(\"请输入身高:\")\n#print(name,age,sex)\n\n'''\nprint(name)\nprint(age)\nprint(sex)\n把自己的个人信息输入进来要整数 要有浮点数 要有字符格式 格式化输出到屏幕上\n'''\n\n#格式化输出\n#%s 占位符字符\n#%f 占位符点数 保留两位小数%.02f\n#%d 占位整数\nprint(\"我的名字是%s,我的年龄是%f,我的性别是%d,我的身高是%0.2f\"%(name,age,sex,height))\n\n","sub_path":"1807-1/06day/04-不知名.py","file_name":"04-不知名.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"242464462","text":"#!/usr/bin/env python\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# Michael A.G. Aivazis\n# California Institute of Technology\n# (C) 1998-2005 All Rights Reserved\n#\n# \n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n\n\nfrom .Device import Device\n\n\nclass TCPDevice(Device):\n\n def record(self, entry):\n\n if self._connection is None:\n return\n\n import pythia.journal.services\n request = pythia.journal.services.request(command=\"record\", args=[self.renderer.render(entry)])\n\n try:\n self._marshaller.send(request, self._connection)\n result = self._marshaller.receive(self._connection)\n except self._marshaller.RequestError:\n return\n\n return\n\n def __init__(self, key, port, host=''):\n import socket\n from .NetRenderer import NetRenderer\n\n Device.__init__(self, NetRenderer())\n\n self.host = host\n self.port = port\n\n import pythia.journal.services\n self._marshaller = pythia.journal.services.pickler()\n self._marshaller.key = key\n\n import pythia.pyre.ipc\n self._connection = pythia.pyre.ipc.connection('tcp')\n\n self._connection.connect((self.host, self.port))\n\n return\n\n\n# version\n__id__ = \"$Id: TCPDevice.py,v 1.3 2005/03/14 22:59:18 aivazis Exp $\"\n\n# End of file\n","sub_path":"pythia/journal/devices/TCPDevice.py","file_name":"TCPDevice.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"395778171","text":"import time\nfrom ParserObj import ParserObj\n\ndef f8():\n test_data, small_data, pp_data, duplex_data = \"test.json\", \"data1.json\", \"data2.json\", \"duplex_data.json\"\n req_types = [\"ifcDoor\", \"ifcproject\", \"ifcsite\", \"ifcbuilding\"]\n req_types += [\"ifcwall\", \"relatedElements\"]\n req_types2=[\"ifcdoor\"]\n req_fields = [\"globalid\", \"ownerHistory\", \"name\", \"description\", \"relatedelements\", \"isdecomposedby\"]\n\n # error in duplex_data.json\n p = ParserObj(pp_data, req_types, req_fields)\n\nif __name__ == '__main__':\n start_time = time.time()\n print('running program...')\n f8()\n end_time = time.time()\n print('...end program')\n print('time in seconds: %s' % (end_time-start_time))\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"83874691","text":"from fuzzywuzzy import fuzz\r\nimport csv\r\nfrom pyavrophonetic import avro\r\nimport WordDictionary.wordDictionaryCreatorAndFilteredWords as dictionary\r\nimport Unigram.uniGramBangla as unigram\r\nfrom BanglaWordSort.bangla_sort import bangla\r\nimport re\r\nfrom sklearn.metrics import accuracy_score\r\nfrom paragraphLevel import multiSelection\r\n\r\n\r\n#Globally Making a word dictionary\r\nWordDictionary = dictionary.makeDictionary('./files/data.csv')\r\nWordDictionary = sorted(WordDictionary, key=bangla)\r\n#print(WordDictionary)\r\n\r\n\r\ndef correct(inputLine):\r\n string_words = inputLine.split()\r\n \r\n for i in range(len(string_words)):\r\n if is_number(string_words[i]):\r\n continue\r\n \r\n suggestions = []\r\n value =[]\r\n for name in WordDictionary:\r\n if string_words[i] == name:\r\n value.append(fuzz.ratio(string_words[i], name))\r\n suggestions.append(name)\r\n elif fuzz.ratio(string_words[i], name) >= 65:\r\n value.append(fuzz.ratio(string_words[i], name))\r\n suggestions.append(name)\r\n print(suggestions)\r\n print(value)\r\n\r\n if len(suggestions) > 0:\r\n maxPer = 0\r\n cor = ''\r\n for name in suggestions:\r\n percent = fuzz.ratio(string_words[i], name)\r\n if percent > maxPer:\r\n if checkSameSuggestion(value):\r\n newList = wordWithMaxScore(value,suggestions)\r\n myUniGram = unigram.createUnigram('./files/data.csv')\r\n cor = unigram.findMax(newList,myUniGram)\r\n else:\r\n cor = name\r\n \r\n maxPer = percent\r\n\r\n \r\n print(cor,'---',maxPer,'\\n')\r\n string_words[i] = cor\r\n \r\n return \" \".join(string_words)\r\n\r\ndef wordWithMaxScore(value,string):\r\n \r\n valueLi = []\r\n newStr = []\r\n\r\n for i in range(len(value)):\r\n if max(value) == value[i]:\r\n valueLi.append(i)\r\n\r\n for i in range(len(valueLi)):\r\n newStr.append(string[valueLi[i]])\r\n\r\n return newStr\r\n\r\ndef is_number(s):\r\n try:\r\n float(s) \r\n except ValueError:\r\n try:\r\n complex(s) \r\n except ValueError:\r\n return False\r\n\r\n return True\r\n\r\ndef checkSameSuggestion(wordList):\r\n same = False\r\n count = 0\r\n for i in range(len(wordList)):\r\n if max(wordList) == wordList[i]:\r\n count+=1\r\n\r\n if count>1:\r\n same = True\r\n\r\n if same == True:\r\n return 1\r\n else:\r\n return 0\r\ndef createTestDataList(fileLocation):\r\n with open (fileLocation, \"r\",encoding = 'utf-8') as myfile:\r\n banglish_sentence=myfile.readlines()\r\n testData = banglish_sentence[0].split(' ')\r\n testData.pop(0)\r\n return testData\r\n\r\ndef checkFloat(string):\r\n string1 = string.split(\" \")\r\n\r\n oldFloat = re.findall(\"[-|+]?\\d+\\।\\d+\", string)\r\n\r\n\r\n c = []\r\n for i in range(len(oldFloat)):\r\n for j in range(len(string1)):\r\n if oldFloat[i] == string1[j]:\r\n c.append(j)\r\n for i in range(len(oldFloat)):\r\n oldFloat[i] = oldFloat[i].replace('।','.')\r\n\r\n for i in range(len(oldFloat)):\r\n string1[c[i]] = oldFloat[i]\r\n\r\n return \" \".join(string1)\r\n\r\ndef main():\r\n \r\n while(True):\r\n command = input('y/n:')\r\n if command != 'y':\r\n break\r\n fileName = input('Enter file name: ')\r\n testFileLocation = \"./True Value/test data/\"+fileName+\".txt\"\r\n with open (testFileLocation, \"r\",encoding = 'utf-8') as myfile:\r\n banglish_sentence=myfile.readlines()\r\n\r\n string_to_be_checked = checkFloat(avro.parse(banglish_sentence))\r\n \r\n prdicted_sentence = correct(string_to_be_checked)\r\n \r\n print(\"\\nConverted sentence: \", string_to_be_checked,'\\n')\r\n print(prdicted_sentence)\r\n\r\n trueFileLocation = \"./True Value/true data/\"+fileName+\"True.txt\"\r\n trueTestData = createTestDataList(trueFileLocation)\r\n predictedData = prdicted_sentence.split(' ')\r\n\r\n \r\n print(trueTestData)\r\n print(predictedData)\r\n \r\n print(\"Total Number Of Test Word: \",len(predictedData))\r\n errorData = 0\r\n for i in range(len(predictedData)):\r\n if predictedData[i] == trueTestData[i]:\r\n continue\r\n else:\r\n errorData += 1\r\n \r\n print(\"Corrected Word: \", len(predictedData) - errorData)\r\n print(\"Failed To Correct Word: \",errorData)\r\n \r\n accuracy = accuracy_score(trueTestData, predictedData)\r\n \r\n print(\"Accuracy: \",accuracy*100)\r\n \r\n \r\nmain()\r\n","sub_path":"spellcheckMain-Unigram(Multi Run).py","file_name":"spellcheckMain-Unigram(Multi Run).py","file_ext":"py","file_size_in_byte":5219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"440056948","text":"import os\n\nimport boto3 as boto3\n\nfrom pgatk_nexflow.utils.constants import S3_ENDPOINT, DOWNLOAD_URL_VALID\n\nsession = boto3.session.Session()\n\ns3_client = session.client(\n service_name='s3',\n endpoint_url=S3_ENDPOINT\n)\n\n\ndef generate_download_file(bucket, resource_path):\n return s3_client.generate_presigned_url('get_object',\n Params={'Bucket': bucket, 'Key': resource_path},\n ExpiresIn=DOWNLOAD_URL_VALID)\n","sub_path":"pgatk_nexflow/services/s3_service.py","file_name":"s3_service.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"514232611","text":"from __future__ import print_function\n\nimport argparse\nimport os\nimport sys\n\nsys.path.insert(0, os.path.realpath(__file__ + ('/..' * 2)))\nprint(f'Running from package root directory {sys.path[0]}')\n\nimport PIL.Image as Image\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\n\nfrom common.discriminators import *\nfrom common.utils import save_testdata_statistics\nfrom common.generators import Generator\nfrom common.models_fid import cnn\n\nfrom common.generators import Generator_stacked_mnist\nfrom common.discriminators import Discriminator_stacked_mnist\nfrom train_loop import TrainLoop\nfrom data_load import Loader\n\nparser = argparse.ArgumentParser(description='Hyper volume training of GANs')\nparser.add_argument('--batch-size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)')\nparser.add_argument('--epochs', type=int, default=50, metavar='N', help='number of epochs to train (default: 50)')\nparser.add_argument('--lr', type=float, default=0.0002, metavar='LR', help='learning rate (default: 0.0002)')\nparser.add_argument('--mgd-lr', type=float, default=0.01, metavar='LR', help='learning rate for mgd (default: 0.01)')\nparser.add_argument('--beta1', type=float, default=0.5, metavar='lambda', help='Adam beta param (default: 0.5), or alpha param for RMSprop')\nparser.add_argument('--beta2', type=float, default=0.999, metavar='lambda', help='Adam beta param (default: 0.999)')\nparser.add_argument('--ndiscriminators', type=int, default=8, help='Number of discriminators. Default=8')\nparser.add_argument('--checkpoint-epoch', type=int, default=None, metavar='N', help='epoch to load for checkpointing. If None, training starts from scratch')\nparser.add_argument('--checkpoint-path', type=str, default=None, metavar='Path', help='Path for checkpointing')\nparser.add_argument('--classifier-path', type=str, default=None, metavar='Path', help='Path to pretrained classifier on MNIST')\nparser.add_argument('--data-path', type=str, default='./train.hdf', metavar='Path', help='Path to hdf file containing stacked MNIST. Can be generated with gen_data.py')\nparser.add_argument('--workers', type=int, help='number of data loading workers', default=4)\nparser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')\nparser.add_argument('--save-every', type=int, default=5, metavar='N', help='how many epochs to wait before logging training status. Default is 5')\nparser.add_argument('--train-mode', choices=['vanilla', 'hyper', 'gman', 'gman_grad', 'loss_delta', 'mgd'], default='vanilla', help='Salect train mode. Default is vanilla (simple average of Ds losses)')\nparser.add_argument('--nadir-slack', type=float, default=1.5, metavar='nadir', help='factor for nadir-point update. Only used in hyper mode (default: 1.5)')\nparser.add_argument('--alpha', type=float, default=0.8, metavar='alhpa', help='Used in GMAN and loss_del modes (default: 0.8)')\nparser.add_argument('--no-cuda', action='store_true', default=False, help='Disables GPU use')\nparser.add_argument('--sgd', action='store_true', default=False, help='enables SGD - *MGD only* ')\nparser.add_argument('--job-id', type=str, default=None, help='Arbitrary id to be written on checkpoints')\nparser.add_argument('--optimizer', choices=['adam', 'amsgrad', 'rmsprop'], default='adam', help='Select optimizer (Default is adam).')\nargs = parser.parse_args()\nargs.cuda = True if not args.no_cuda and torch.cuda.is_available() else False\n\ntorch.manual_seed(args.seed)\nif args.cuda:\n\ttorch.cuda.manual_seed(args.seed)\n\ntrainset = Loader(args.data_path)\ntrain_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, num_workers=args.workers)\n\ngenerator = Generator_stacked_mnist().train()\nclassifier = cnn().eval()\nclassifier_state = torch.load(args.classifier_path, map_location=lambda storage, loc: storage)\nclassifier.load_state_dict(classifier_state['model_state'])\n\ndisc_list = []\n\nfor i in range(args.ndiscriminators):\n\tif args.optimizer == 'adam':\n\t\tdisc = Discriminator_stacked_mnist(optim.Adam, args.optimizer, args.lr, (args.beta1, args.beta2)).train()\n\telif args.optimizer == 'amsgrad':\t\n\t\tdisc = Discriminator_stacked_mnist(optim.Adam, args.optimizer, args.lr, (args.beta1, args.beta2), amsgrad = True).train()\n\telif args.optimizer == 'rmsprop':\n\t\tdisc = Discriminator_stacked_mnist(optim.RMSprop, args.optimizer, args.lr, (args.beta1, args.beta2)).train()\n\tdisc_list.append(disc)\n\n\nif args.cuda:\n\tgenerator = generator.cuda()\n\tclassifier = classifier.cuda()\n\tfor disc in disc_list:\n\t\tdisc = disc.cuda()\n\ttorch.backends.cudnn.benchmark=True\n\nif args.train_mode == 'mgd' and args.sgd:\n\toptimizer_g = optim.SGD(generator.parameters(), lr=args.mgd_lr)\nelif args.optimizer == 'adam':\n\toptimizer_g = optim.Adam(generator.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))\nelif args.optimizer == 'amsgrad':\n\toptimizer_g = optim.Adam(generator.parameters(), lr=args.lr, betas=(args.beta1, args.beta2), amsgrad = True)\nelif args.optimizer == 'rmsprop':\n\toptimizer_g = optim.RMSprop(generator.parameters(), lr=args.lr, alpha = args.beta1)\n\ntrainer = TrainLoop(generator, disc_list, optimizer_g, train_loader, classifier=classifier, nadir_slack=args.nadir_slack, alpha=args.alpha, train_mode=args.train_mode, checkpoint_path=args.checkpoint_path, checkpoint_epoch=args.checkpoint_epoch, cuda=args.cuda, job_id=args.job_id)\n\nprint('Cuda Mode is: {}'.format(args.cuda))\nprint('Train Mode is: {}'.format(args.train_mode))\nprint('Number of discriminators is: {}'.format(len(disc_list)))\n\ntrainer.train(n_epochs=args.epochs, save_every=args.save_every)\n","sub_path":"stacked_mnist/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"193591611","text":"#!/opt/python3/bin/python3\n\n'''\n\nTitle: Guess the number\nPurpose: none\n\n'''\n\nfrom random import randint\t# alternate way of importing a specific library from a module\n#import random\n\nguessesTaken = 6\ntotalGuesses = 0\nnumber= randint(1,20)\t\t# don't need to specifically say from which module\n#number= random.randint(1,20)\n\njinput1=input(\"Please enter your name: \")\nprint(\"Hello \" + jinput1 + \". Try to guess the number between 1 and 20. You have 5 guesses left.\")\n\nwhile guessesTaken > 1:\t\t# gets 5 guesses\n\tjguess=int(input(\"Take a guess: \"))\n\n\tguessesTaken-=1\t\t# increment the guesses\n\ttotalGuesses+=1\n\n\tif jguess < number:\n\t\tprint(\"Your guess is too low! You have \" + str(guessesTaken-1) + \" guesses left.\")\n\n\tif jguess > number:\n\t\tprint(\"Your guess is too high! You have \" + str(guessesTaken-1) + \" guesses left.\")\n\n\tif jguess == number:\n\t\tbreak\n\nif jguess == number:\n\tguessesTaken = str(guessesTaken)\n\tprint('Good Job, ' + jinput1 + '! It took you ' + str(totalGuesses) + ' guesses!')\n\nif jguess != number:\n\tnumber=str(number)\n\t#print(\"Sorry \", + jinput1, \". The number was \" + number)\n\tprint(\"Sorry \", jinput1, \". The number was \", number)\n","sub_path":"python/jguess.py","file_name":"jguess.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"372540959","text":"import numpy as np\n\n\nclass Game:\n def __init__(self, id, prob_head):\n self.id = id\n self.probHead = prob_head\n self.countWins = 0\n\n def simulate(self):\n \"\"\"\n simulates 20 coin tosses and counts the number of times {T, T, H} occurred\n \"\"\"\n\n # random number generator\n rnd = np.random.RandomState(seed=self.id)\n\n n_consecutive_tails = 0 # number of consecutive tails so far, set to 0\n\n # flip the coin 20 times\n for i in range(20):\n\n # find if this flip resulted in head or tail\n if rnd.random_sample() < self.probHead:\n\n # if it is head, check if the last 2 tosses resulted in {T, T}\n if n_consecutive_tails >= 2:\n # if so, {T, T, H} has occurred\n self.countWins += 1\n\n # if this is tail, we set the number of consecutive tails to 0\n n_consecutive_tails = 0\n\n else:\n # this flip resulted in tail, so we increment the number of consecutive tails by 1\n n_consecutive_tails += 1\n\n def get_reward(self):\n \"\"\"\n :return: the reward from this game = 100 * (number of {T, T, H}) - 250\n \"\"\"\n return 100 * self.countWins - 250\n\n\nclass SetOfGames:\n def __init__(self, id, prob_head):\n\n self.id = id\n self.probHead = prob_head\n self.gameRewards = []\n self.numLosses = 0 # number of games we lose money\n\n def simulate(self, n_games):\n\n for i in range(n_games):\n # create a new game\n game = Game(id=self.id*n_games+i, prob_head=self.probHead)\n # simulate the game with 20 flips\n game.simulate()\n # get the reward\n reward = game.get_reward()\n # store the reward\n self.gameRewards.append(reward)\n # find if we lost in this game\n if reward < 0:\n self.numLosses += 1\n\n def get_ave_reward(self):\n \"\"\"\n :return: the average reward from playing all games\n \"\"\"\n return sum(self.gameRewards) / len(self.gameRewards)\n\n def get_loss_probability(self):\n \"\"\"\n :return: the proportion of games that we lost money\n \"\"\"\n return self.numLosses / len(self.gameRewards)\n","sub_path":"GameClasses.py","file_name":"GameClasses.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"552955289","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 20 11:24:16 2013\n\n@author: ellen\n\"\"\"\n\nimport os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport ListUitpakken as get\n\n# data = np.loadtxt('20140320/20140320_axis2_6_2mmpinhole_bolo10mmback_externalchopper.csv',\n# skiprows=1, delimiter=',').transpose()\n\n# N = 101 # number of points in x direction\n# M = 1 # number of points in y direction\n\n# X = get.ReShape(data[0], (N,M))\n# Y = get.ReShape(data[1], (N,M))\n# Z = get.ReShape(data[2], (N,M))\n\ndef readfile(filename):\n data = np.loadtxt(filename, skiprows=1, delimiter=',').transpose()\n N = len(data[0])\n M = 1\n X = get.ReShape(data[0], (N,M))\n Y = get.ReShape(data[1], (N,M))\n Z = get.ReShape(data[2], (N,M))\n label = os.path.basename(filename).replace(\".csv\",\"\")\n return (X,Y,Z,label)\n\ndef makelog(data):\n max_number = data.max()\n data = data/max_number\n return np.log10(data)\n\ndef plot(X, Y, Z, label):\n plt.plot(X, makelog(Z), label=label)\n plt.scatter(X, makelog(Z), color='black', s=8)\n\nprint(sys.argv)\nfor filename in sys.argv[1:]:\n plot(*readfile(filename))\n\n# plot(*readfile('20140320/20140320_axis2_6_2mmpinhole_bolo10mmback_externalchopper.csv'))\n# plot(*readfile('20140320/20140320_axis2_9.csv'))\n\n#plt.yscale('log')\n#plt.xlabel('x position in mm around X = 45600')\n#plt.ylabel('voltage in mV')\n#plt.title('Delay 4.0 s, Y = 32500')\nplt.legend()\n\nplt.show()\n","sub_path":"ALLMEASUREMENTS/plotjes.py","file_name":"plotjes.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"135824483","text":"import asyncio\r\nimport os\r\nimport threading\r\nfrom abc import ABC, abstractmethod\r\nfrom multiprocessing.pool import Pool\r\n\r\nfrom tqdm import tqdm\r\n\r\nfrom ..utils import clean_filename, download_file\r\n\r\n\r\nclass DownloadMethod(ABC):\r\n @abstractmethod\r\n def download_audios(self, audios):\r\n pass\r\n\r\n @staticmethod\r\n def download_audio(audio):\r\n # build path\r\n path = \"{artist} - {title}.mp3\".format(**audio)\r\n path = clean_filename(path)\r\n path = os.path.join(audio['path'], path)\r\n\r\n download_file(audio['url'], path)\r\n\r\n\r\nclass SequentialMethod(DownloadMethod):\r\n def download_audios(self, audios):\r\n for audio in tqdm(audios):\r\n DownloadMethod.download_audio(audio)\r\n\r\n\r\nclass MultiprocessingMethod(DownloadMethod):\r\n CORES_COUNT = 4\r\n\r\n def download_audios(self, audios):\r\n with Pool(MultiprocessingMethod.CORES_COUNT) as pool:\r\n for _ in tqdm(pool.imap_unordered(DownloadMethod.download_audio, audios)):\r\n pass\r\n\r\n\r\nclass AsyncioMethod(DownloadMethod):\r\n def download_audios(self, audios):\r\n loop = asyncio.get_event_loop()\r\n future = asyncio.gather(*(AsyncioMethod.download_audio_async(audio) for audio in audios))\r\n loop.run_until_complete(future)\r\n\r\n @staticmethod\r\n async def download_audio_async(audio):\r\n return DownloadMethod.download_audio(audio)\r\n\r\n\r\nclass ThreadMethod(DownloadMethod):\r\n def download_audios(self, audios):\r\n threads = []\r\n for audio in audios:\r\n thread = threading.Thread(target=DownloadMethod.download_audio, args=[audio])\r\n thread.start()\r\n threads.append(thread)\r\n\r\n for thread in tqdm(threads):\r\n thread.join()\r\n","sub_path":"vkal/download/methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"245517929","text":"import numpy as np\nimport random\nfrom scipy.special import expit\n\nclass Network(object):\n def __init__(self,sizes):\n self.num_layers = len(sizes)\n self.sizes = sizes\n self.biases = [np.random.randn(y,1) for y in sizes[1:]]\n self.weights = [np.random.randn(y,x) for y,x in zip(sizes[:-1],sizes[1:])]\n\n def feedforward(self,a):\n for b,w in zip(self.biases,self.weights):\n # a = sigmoid(np.dot(w,a)+b)\n a = expit(np.dot(w,a)+b)\n return a\n# sizes = [2,3,1]\n# # bias = [np.random.randn(y,1) for y in sizes[1:]]\n# # print(bias)\n# # for x,y in zip(sizes[:-1],sizes[1:]):\n# # print(x,y)\n\nnet = Network([2,3,1])","sub_path":"03_8_gradientDescent.py","file_name":"03_8_gradientDescent.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"641305714","text":"import logging\n\nfrom processing_library.image.operations import copy_image\nfrom wrappers.arlexecute.execution_support.arlexecute import arlexecute\nfrom wrappers.arlexecute.image.gather_scatter import image_scatter_facets, image_gather_facets\n\nlog = logging.getLogger(__name__)\n\ndef image_arlexecute_map_workflow(im, imfunction, facets=1, overlap=0, taper=None, **kwargs):\n \"\"\"Apply a function across an image: scattering to subimages, applying the function, and then gathering\n \n :param im: Image to be processed\n :param imfunction: Function to be applied\n :param facets: See image_scatter_facets\n :param overlap: image_scatter_facets\n :param taper: image_scatter_facets\n :param kwargs: kwargs for imfunction\n :return: output image\n \"\"\"\n \n facets_list = arlexecute.execute(image_scatter_facets, nout=16)(im, facets=facets, overlap=overlap,\n taper=taper)\n root_list = [arlexecute.execute(imfunction)(facet, **kwargs) for facet in facets_list]\n gathered = arlexecute.execute(image_gather_facets)(root_list, im, facets=facets, overlap=overlap,\n taper=taper)\n return gathered\n\n\ndef sum_images_arlexecute(image_list, split=2):\n \"\"\" Sum a set of images\n\n :param image_list: List of (image, sum weights) tuples\n :param split: Split into\n :return: image\n \"\"\"\n def sum_images(imagelist):\n out = copy_image(imagelist[0])\n out.data += imagelist[1].data\n return out\n \n if len(image_list) > split:\n centre = len(image_list) // split\n result = [sum_images_arlexecute(image_list[:centre])]\n result.append(sum_images_arlexecute(image_list[centre:]))\n return arlexecute.execute(sum_images, nout=2)(result)\n else:\n return arlexecute.execute(sum_images, nout=2)(image_list)\n","sub_path":"workflows/arlexecute/image/image_arlexecute.py","file_name":"image_arlexecute.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"515123729","text":"import traceback\nimport pandas as pd\nfrom pandas import DataFrame as DF\nfrom talib import abstract as ab\nfrom zaifbot.bot_common.logger import logger\nfrom zaifbot.modules.utils import get_price_info\n\n\nHIGH = 'high'\nLOW = 'low'\nCLOSE = 'close'\nTIME = 'time'\n\n\ndef get_adx(currency_pair='btc_jpy', period='1d', count=100, length=14, to_epoch_time=None):\n try:\n count_needed = 2 * length - 1 + count\n df = DF(get_price_info(currency_pair, period, count_needed, to_epoch_time))\n adx = ab.ADX(df, timeperiod=length, prices=[HIGH, LOW, CLOSE], output_names=['adx']).rename('adx')\n adx = pd.concat([df[TIME], adx], axis=1).dropna()\n return {'success': 1, 'return': {'ADXs': adx.astype(object).to_dict(orient='records')}}\n except Exception as e:\n logger.error(e)\n logger.error(traceback.format_exc())\n return {'success': 0, 'error': e}\n\n\ndef get_macd(currency_pair='btc_jpy', period='1d', count=100, short=12, long=26, signal=9, to_epoch_time=None):\n try:\n count_needed = count + long + signal - 2\n df = DF(get_price_info(currency_pair, period, count_needed, to_epoch_time))\n macd = ab.MACD(df, price=CLOSE, fastperiod=short, slowperiod=long, signalperiod=signal)\n macd = pd.concat([df[TIME], macd], axis=1).dropna()\n return {'success': 1, 'return': {'MACDs': macd.astype(object).to_dict(orient='records')}}\n except Exception as e:\n logger.error(e)\n logger.error(traceback.format_exc())\n return {'success': 0, 'error': e}\n\n\ndef get_rsi(currency_pair='btc_jpy', period='1d', count=100, length=14, to_epoch_time=None):\n try:\n count_needed = count + length\n df = DF(get_price_info(currency_pair, period, count_needed, to_epoch_time))\n rsi = ab.RSI(df, price=CLOSE, timeperiod=length).rename('rsi')\n rsi = pd.concat([df[TIME], rsi], axis=1).dropna()\n return {'success': 1, 'return': {'RSIs': rsi.astype(object).to_dict(orient='records')}}\n except Exception as e:\n logger.error(e)\n logger.error(traceback.format_exc())\n return {'success': 0, 'error': e}\n","sub_path":"zaifbot/modules/indicators/indicators.py","file_name":"indicators.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"495826211","text":"#!/usr/bin/env python2\n'''\nRead a pmovie. In the absense of an output name, output\nthe name based on the frame step. With an output name, and\nwhen not outputting directly to hdf, output using dump_pickle.\n\nUsage:\n pmov.py [options] []\n pmov.py [options] [--hdf | -H ] \n\nOptions:\n --help -h Output this help.\n --sort -s Sort the pmovies by IPP.\n --hdf -H Output to hdf5 instead of to a pickle file.\n The group will be based on the step.\n --zip -c Use compression for hdf5.\n --verbose -v Be verbose.\n --lock=L -l L Specify a lock file for synchronized output for hdf5.\n --exp-d=DFILE Experimental hashing. It might work only for uniform\n grids. Specify specification file used to generate hash\n as DFILE.\n --exp-first=DFILE Experimental hashing, see above. Use this file as the\n first file to generate the hash specification from and\n output to DFILE.\n --dir=D -D D Output to this directory if given not name.\n --X -x Use X as a spatial dimension. Similar options below are\n for Y and Z. If none are passed, assume 3D cartesian.\n --Y -y See above.\n --Z -z See above.\n'''\nimport lspreader as rd;\nimport h5py as h5;\nfrom misc import dump_pickle, h5w, mkvprint, readfile;\nfrom docopt import docopt;\nimport numpy as np;\nimport numpy.lib.recfunctions as rfn;\n\ndef sortframe(frame):\n '''\n sorts particles for a frame\n '''\n d = frame['data'];\n sortedargs = np.lexsort([d['xi'],d['yi'],d['zi']])\n d = d[sortedargs];\n frame['data']=d;\n return frame;\n\ndef hdfoutput(outname, frames, dozip=False):\n '''Outputs the frames to an hdf file.'''\n with h5.File(outname,'a') as f:\n for frame in frames:\n group=str(frame['step']);\n h5w(f, frame, group=group,\n compression='lzf' if dozip else None);\n\ndef firsthash(frame,dims,removedupes=False):\n '''\n Hashes the first time step. Only will work as long as\n the hash can fit in a i8.\n\n Parameters:\n -----------\n frame : first frame.\n dims : iterable of strings for dimensions.\n\n Keywords:\n ---------\n removedups: specify duplicates for the given frame.\n \n Returns a dictionary of everything needed\n to generate hashes from the genhash function.\n \n '''\n #hashes must have i8 available\n #overwise, we'll have overflow\n def avgdiff(d):\n d=np.sort(d);\n d = d[1:] - d[:-1]\n return np.average(d[np.nonzero(d)]);\n ip = np.array([frame['data'][l] for l in dims]).T;\n avgdiffs = np.array([avgdiff(a) for a in ip.T]);\n mins = ip.min(axis=0);\n ips = (((ip - mins)/avgdiffs).round().astype('i8'))\n pws = np.floor(np.log10(ips.max(axis=0))).astype('i8')+1\n pws = list(pws);\n pw = [0]+[ ipw+jpw for ipw,jpw in\n zip([0]+pws[:-1],pws[:-1]) ];\n pw = 10**np.array(pw);\n #the dictionary used for hashing\n d=dict(labels=dims, mins=mins, avgdiffs=avgdiffs, pw=pw);\n if removedupes:\n hashes = genhash(frame,d,removedupes=False);\n #consider if the negation of this is faster for genhash\n uni,counts = np.unique(hashes,return_counts=True);\n d.update({'dupes': uni[counts>1]})\n return d;\n\ndef genhash(frame,d,removedupes=False):\n '''\n Generate the hashes for the given frame for a specification\n given in the dictionary d returned from firsthash.\n\n Parameters:\n -----------\n frame : frame to hash.\n d : hash specification generated from firsthash.\n\n Keywords:\n ---------\n removedups: put -1 in duplicates\n \n Returns an array of the shape of the frames with hashes.\n '''\n ip = np.array([frame['data'][l] for l in d['labels']]).T;\n scaled = ((ip - d['mins'])/d['avgdiffs']).round().astype('i8');\n hashes = (scaled*d['pw']).sum(axis=1);\n #marking duplicated particles\n if removedupes:\n dups = np.in1d(hashes,d['dupes'])\n hashes[dups] = -1\n return hashes;\ndef addhash(frame,d,removedupes=False):\n '''\n helper function to add hashes to the given frame\n given in the dictionary d returned from firsthash.\n\n Parameters:\n -----------\n frame : frame to hash.\n d : hash specification generated from firsthash.\n\n Keywords:\n ---------\n removedups: put -1 in duplicates\n \n Returns frame with added hashes, although it will be added in\n place.\n '''\n hashes = genhash(frame,d,removedupes);\n frame['data'] = rfn.rec_append_fields(\n frame['data'],'hash',hashes);\n return frame;\n\n#script start. This garbage to let people like scott use\n#it without having to call it script wise.\nif __name__=='__main__':\n opts = docopt(__doc__,help=True);\n vprint = mkvprint(opts);\n dims=[]\n if opts['--X']: dims.append('xi');\n if opts['--Y']: dims.append('yi');\n if opts['--Z']: dims.append('zi');\n if len(dims)==0:\n dims=['xi','yi','zi'];\n #reading in using the reader.\n frames=rd.read(opts['']);\n \n if opts['--sort']:\n vprint(\"sorting...\");\n frames[:] = [sortframe(frame) for frame in frames];\n vprint(\"done\");\n #experimental hashing\n if opts['--exp-first']:\n d=firsthash(frames[0],dims, removedupes=True);\n dump_pickle(opts['--exp-first'], d);\n frames[:] = [addhash(frame,d,removedupes=True) for frame in frames];\n elif opts['--exp-d']:\n d = readfile(opts['--exp-d'],dumpfull=True);\n frames[:] = [addhash(frame,d,removedupes=True) for frame in frames];\n #outputting.\n if opts['--hdf']:\n import fasteners;\n output = lambda :hdfoutput(opts[''], frames, opts['--zip']);\n if opts['--lock']:\n output = fasteners.interprocess_locked(opts['--lock'])(output);\n output();\n elif not opts['']:\n for frame in frames:\n outname = \"{}.{}\".format(opts[''],frame['step']);\n if opts['--dir']:\n outname = '{}/{}'.format(opts['--dir'], outname);\n np.savez(outname, **frame);\n else:\n dump_pickle(opts[''], frames);\n","sub_path":"pmov.py","file_name":"pmov.py","file_ext":"py","file_size_in_byte":6309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"233338568","text":"# /usr/bin/python\nimport argparse\nimport copy\nfrom argparse import RawTextHelpFormatter\n\n# variable \n# IGC_reference_database=\"/home1/Laisenying/.local/share/ngless/data/Modules/igc.ngm/0.9/igc.fna\"\n# gene_matrix='/home1/Laisenying/Data-analysis/CRC/04_genecount/DNA_hGEM.txt'\n# output_path='/home1/Laisenying/Data-analysis/CRC/05_geneprofile'\n# cutoff_value = 1.0\n# sample_value = 1\n\ndef parser():\n parser = argparse.ArgumentParser(description='TPM normalization and filtered', formatter_class=RawTextHelpFormatter)\n required = parser.add_argument_group('Required arguments')\n required.add_argument('-i', action='store', type=str, dest=\"gene_matrix\",\n help='hGEM gene matrix', required=True)\n required.add_argument('-minTPM', action='store', type=float, dest=\"cutoff_value\",\n help='min TPM', required=False,default=1.0)\n required.add_argument('-minSam', action='store', type=int, dest=\"sample_value\",\n help='min samples', required=False,default=1) \n required.add_argument('-ref', action='store', type=str, dest=\"IGC_reference_database\",\n help='path to IGC reference database', required=True) \n required.add_argument('-o', action='store', type=str, dest=\"output_path\",\n help='path to output folder', required=True) \n return parser \n \n# return true if at least one element greater than cut off value\n# return false if all element less than cut off value\ndef keep_this_line(aList):\n count = 0\n for element in aList:\n if float(element) >= args.cutoff_value:\n count = count + 1\n if count >= args.sample_value:\n return True\n else:\n return False\n\n \n\nif __name__ == '__main__':\n args = parser().parse_args()\n if args.cutoff_value == 1:\n print(\"the default TPM cut-off value is 1\")\n else:\n print(\"the TPM cut-off value is \" + str(args.cutoff_value))\n \n # get gene length\n IGC_reference_database_file = open(args.IGC_reference_database)\n IGC_reference = IGC_reference_database_file.readlines()\n dictionary = {}\n count = 0 \n while count < len(IGC_reference):\n if IGC_reference[count][0] == '>': # it is the header of the sequence\n gene_name = IGC_reference[count].split()[0][1:]\n dictionary[gene_name] = len(IGC_reference[count + 1]) - 1\n temp_count = count + 2\n while temp_count < len(IGC_reference) and IGC_reference[temp_count][0] != '>':\n dictionary[gene_name] = dictionary[gene_name] + len(IGC_reference[temp_count]) - 1\n temp_count = temp_count + 1\n \n count = count + 1\n \n \n # processing DNA matrix\n DNA_matrix_file = open(args.gene_matrix)\n DNA_matrix_data = DNA_matrix_file.readlines()\n DNA_matrix_length_before_filter = len(DNA_matrix_data) - 1\n DNA_matrix_data_original = copy.deepcopy(DNA_matrix_data)\n \n row = 0\n column = 0\n \n # convert DNA_matrix_data to the matrix\n while row < len(DNA_matrix_data):\n DNA_matrix_data[row] = DNA_matrix_data[row].split()\n row = row + 1\n \n # start calculating TPM\n row = 1\n column = 1\n # step 1 divide read counts by gene length in kilobase\n while row < len(DNA_matrix_data):\n column=1\n while column < len(DNA_matrix_data[0]):\n gene_name = DNA_matrix_data[row][0]\n gene_length = dictionary[gene_name]\n DNA_matrix_data[row][column] = str( float( DNA_matrix_data[row][column] ) / ( float(gene_length) / 1000) )\n column = column + 1\n row = row + 1\n \n # step 2: Count up all the RPK values in a sample and divide this number by 1,000,000\n scaling_factor = []\n scaling_factor.append(0)\n row = 1\n column = 1\n while column < len(DNA_matrix_data[0]):\n row = 1\n total=0.0\n while row < len(DNA_matrix_data):\n total = total + float(DNA_matrix_data[row][column])\n row = row + 1\n \n scaling_factor.append(total/1000000)\n column = column + 1\n \n # step 3\n row = 1\n column = 1\n while column < len(DNA_matrix_data[0]):\n row = 1\n while row < len(DNA_matrix_data):\n if(scaling_factor[column] == 0):\n DNA_matrix_data[row][column] = \"0\"\n else:\n DNA_matrix_data[row][column] = str( float( DNA_matrix_data[row][column] ) / scaling_factor[column] )\n row = row + 1\n column = column + 1\n \n # filter TPM\n # write to the file\n DNA_matrix_outFile = open(args.output_path + \"/DNA_hGEM_filt.txt\", \"w\")\n DNA_matrix_outFile.write(DNA_matrix_data_original[0])\n count = 1\n DNA_matrix_length_after_filter = 0\n while count < len(DNA_matrix_data_original):\n if keep_this_line(DNA_matrix_data[count][1:]) == True:\n DNA_matrix_data[count].append('\\n')\n DNA_matrix_outFile.write(\"\\t\".join(DNA_matrix_data[count]))\n DNA_matrix_length_after_filter = DNA_matrix_length_after_filter + 1\n count = count + 1\n \n DNA_matrix_file.close()\n DNA_matrix_outFile.close()\n \n # write summary file\n outFile_summary = open(args.output_path + \"/hGEM_filt.summary\", \"w\")\n outFile_summary.write(\"DNA TPM filtering:\\n\")\n rate = float((DNA_matrix_length_before_filter - DNA_matrix_length_after_filter) / float(DNA_matrix_length_before_filter)) * 100\n outFile_summary.write(\"Total: \" + str(DNA_matrix_length_before_filter) + \" genes\\tRemoval: \" + str(DNA_matrix_length_before_filter - DNA_matrix_length_after_filter) + \" genes\\tRemaining: \" + str(DNA_matrix_length_after_filter) + \" genes\\tFilter_rate: \" + str(rate) + \"\\n\\n\")\n\n \n \n","sub_path":"normalization.py","file_name":"normalization.py","file_ext":"py","file_size_in_byte":5548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"354905212","text":"from hstest.stage_test import *\nfrom hstest.test_case import TestCase\nfrom hstest.check_result import CheckResult\n\nfrom random import shuffle\n\nCheckResult.correct = lambda: CheckResult(True, '')\nCheckResult.wrong = lambda feedback: CheckResult(False, feedback)\n\ndescription_list = ['python', 'java', 'kotlin', 'javascript']\nout_of_description = ['clojure', 'haskell', 'typescript', 'assembler']\n\ncatch = {i: 0 for i in description_list}\n\n\nclass CoffeeMachineTest(StageTest):\n def generate(self) -> List[TestCase]:\n tests = []\n\n for word in description_list + out_of_description:\n for i in range(100):\n tests += [TestCase(stdin=word, attach=word)]\n\n shuffle(tests)\n\n word = 'last'\n tests += [TestCase(stdin=word, attach=word)]\n return tests\n\n def check(self, reply: str, attach: Any) -> CheckResult:\n\n survived = 'You survived!'\n hanged = 'You lost!'\n\n is_survived = survived in reply\n is_hanged = hanged in reply\n\n if is_survived and is_hanged:\n return CheckResult.wrong(\n f'Looks like your output contains both \\\"{survived}\\\"'\n f' and \\\"{hanged}\\\". You should output only one of them.'\n )\n\n if not is_survived and not is_hanged:\n return CheckResult.wrong(\n f'Looks like your output doesn\\'t contain neither \\\"{survived}\\\"'\n f' nor \\\"{hanged}\\\". You should output one of them.'\n )\n\n if attach in out_of_description:\n if is_survived:\n return CheckResult.wrong(\n f'Input contains a word out of the '\n f'list form the description but the '\n f'program output \\\"{survived}\\\"'\n )\n else:\n return CheckResult.correct()\n\n elif attach in description_list:\n\n if is_survived:\n hidden_attach = attach[:3] + '-'*len(attach[3:])\n if hidden_attach not in reply:\n return CheckResult.wrong(\n f'Program guessed the word \\\"{attach}\\\" '\n f'and should output clue \\\"{hidden_attach}\\\" '\n f'but this line is not in the output'\n )\n\n catch[attach] += is_survived\n return CheckResult.correct()\n\n else:\n if any(v == 0 for v in catch.values()):\n return CheckResult.wrong(\n \"Looks like your program is not using \"\n \"all of the words to guess from the list in description\"\n )\n else:\n return CheckResult.correct()\n\n\nif __name__ == '__main__':\n CoffeeMachineTest('hangman.hangman').run_tests()\n","sub_path":"Hangman/task/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"545075844","text":"\"\"\"df module for the cli.\"\"\"\nimport subprocess as su\n\nimport click\nimport texttable\n\nimport iocage.lib.ioc_common as ioc_common\nimport iocage.lib.ioc_json as ioc_json\nimport iocage.lib.ioc_list as ioc_list\n\n\n@click.command(name=\"df\", help=\"Show resource usage of all jails.\")\n@click.option(\"--header\", \"-h\", \"-H\", is_flag=True, default=True,\n help=\"For scripting, use tabs for separators.\")\n@click.option(\"--long\", \"-l\", \"_long\", is_flag=True, default=False,\n help=\"Show the full uuid.\")\n@click.option(\"--sort\", \"-s\", \"_sort\", default=\"tag\", nargs=1,\n help=\"Sorts the list by the given type\")\ndef cli(header, _long, _sort):\n \"\"\"Allows a user to show resource usage of all jails.\"\"\"\n jails, paths = ioc_list.IOCList(\"uuid\").list_datasets()\n pool = ioc_json.IOCJson().json_get_value(\"pool\")\n jail_list = []\n table = texttable.Texttable(max_width=0)\n\n for jail in jails:\n full_uuid = jails[jail]\n\n if not _long:\n uuid = full_uuid[:8]\n else:\n uuid = full_uuid\n\n path = paths[jail]\n conf = ioc_json.IOCJson(path).json_load()\n zconf = [\"zfs\", \"get\", \"-H\", \"-o\", \"value\"]\n mountpoint = f\"{pool}/iocage/jails/{full_uuid}\"\n\n tag = conf[\"tag\"]\n template = conf[\"type\"]\n\n if template == \"template\":\n mountpoint = f\"{pool}/iocage/templates/{tag}\"\n\n compressratio = su.Popen(zconf + [\"compressratio\", mountpoint],\n stdout=su.PIPE).communicate()[0].decode(\n \"utf-8\").strip()\n reservation = su.Popen(zconf + [\"reservation\", mountpoint],\n stdout=su.PIPE).communicate()[0].decode(\n \"utf-8\").strip()\n quota = su.Popen(zconf + [\"quota\", mountpoint],\n stdout=su.PIPE).communicate()[0].decode(\n \"utf-8\").strip()\n used = su.Popen(zconf + [\"used\", mountpoint],\n stdout=su.PIPE).communicate()[0].decode(\n \"utf-8\").strip()\n available = su.Popen(zconf + [\"available\", mountpoint],\n stdout=su.PIPE).communicate()[0].decode(\n \"utf-8\").strip()\n\n jail_list.append([uuid, compressratio, reservation, quota, used,\n available, tag])\n\n sort = ioc_common.ioc_sort(\"df\", _sort)\n jail_list.sort(key=sort)\n if header:\n jail_list.insert(0, [\"UUID\", \"CRT\", \"RES\", \"QTA\", \"USE\", \"AVA\", \"TAG\"])\n # We get an infinite float otherwise.\n table.set_cols_dtype([\"t\", \"t\", \"t\", \"t\", \"t\", \"t\", \"t\"])\n table.add_rows(jail_list)\n\n ioc_common.logit({\n \"level\" : \"INFO\",\n \"message\": table.draw()\n })\n else:\n for jail in jail_list:\n ioc_common.logit({\n \"level\" : \"INFO\",\n \"message\": \"\\t\".join(jail)\n })\n","sub_path":"iocage/cli/df.py","file_name":"df.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"304553119","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 21 16:37:10 2019\n\n@author: alessandro\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nimport gym\nimport matplotlib.pyplot as plt\n\n\ntf.reset_default_graph()\n\n# Create HiddenLayer class\n# Create Model class with replay buffer and copy method\n\n\nclass HiddenLayer:\n \n def __init__(self, layer_name, input_size, n_nodes, activation=tf.nn.tanh):\n \n self.f = activation\n \n with tf.variable_scope(layer_name):\n self.W = tf.get_variable(name=\"W\", \n shape=(input_size, n_nodes),\n dtype=tf.float32)\n \n self.b = tf.get_variable(name=\"b\",\n shape=(1, n_nodes),\n dtype=tf.float32)\n \n self.params = [self.W, self.b]\n \n \n def forward(self, X):\n \n out = tf.matmul(X, self.W) + self.b\n out = self.f(out)\n \n return out\n \n \n\nclass Model:\n \n def __init__(self, name, input_size, n_outputs, architecture, lr=10e-4, max_experience = 10000, min_experience = 100, batch_size=32):\n \n self.session = None\n self.lr = lr\n self.layers = []\n self.experience = {\"s1\": [], \"a\": [], \"r\": [], \"s2\": [], \"done\": []}\n self.max_experience = max_experience\n self.min_experience = min_experience\n self.batch_size = batch_size\n self.costs = []\n self.X = tf.placeholder(dtype=tf.float32,\n shape=(None, input_size))\n self.y = tf.placeholder(dtype=tf.float32,\n shape=(None,))\n self.actions = tf.placeholder(dtype=tf.int32,\n shape=(None,))\n \n for c, l in enumerate(architecture):\n \n layer = HiddenLayer(layer_name=\"{}/Layer{}\".format(name, c),\n input_size=input_size,\n n_nodes=l,\n activation=tf.nn.tanh)\n \n self.layers.append(layer)\n input_size = l\n \n layer = HiddenLayer(layer_name=\"{}/Layer{}\".format(name, len(architecture)),\n input_size=input_size,\n n_nodes=n_outputs,\n activation=lambda x: x)\n \n self.layers.append(layer)\n \n Z = self.X\n for layer in self.layers:\n Z = layer.forward(Z)\n \n self.output = Z\n \n specific_out = tf.reduce_sum(self.output * tf.one_hot(self.actions, depth=2), \n axis=1) #depth hardcoded, poi cambiala\n squared_diff = tf.square(self.y-specific_out)\n self.cost = tf.reduce_sum(squared_diff) # reduce_mean converge ovviamente più lentamente\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)\n self.train_op = self.optimizer.minimize(self.cost)\n \n \n def copy(self, other):\n \n ops = []\n print(\"Updating target model...\")\n \n for own_layer, other_layer in zip(self.layers, other.layers):\n \n actual = self.session.run(other_layer.params)\n for c, p in enumerate(actual):\n op = tf.assign(own_layer.params[c], p)\n ops.append(op)\n \n self.session.run(ops)\n \n \n def predict(self, X):\n \n X = X.reshape(1, -1)\n preds = self.session.run(self.output, feed_dict={self.X:X})\n \n return preds\n \n \n def set_session(self, session):\n \n self.session = session\n \n \n def add_exp(self, s1, a, r, s2, done):\n \n # Sicuramente snellibile\n \n self.experience[\"s1\"].append(s1)\n self.experience[\"a\"].append(a)\n self.experience[\"r\"].append(r)\n self.experience[\"s2\"].append(s2)\n self.experience[\"done\"].append(done)\n \n if len(self.experience[\"s1\"]) > self.max_experience:\n self.experience[\"s1\"].pop(0)\n self.experience[\"a\"].pop(0)\n self.experience[\"r\"].pop(0)\n self.experience[\"s2\"].pop(0)\n self.experience[\"done\"].pop(0)\n \n \n def train(self, env, target_model, gamma=0.99):\n \n # ci si allena solo sulla base del buffer di exp, non della iterazione in cui viene chiamato\n \n if len(self.experience[\"s1\"]) < self.min_experience:\n return\n \n idxs = np.random.choice(len(self.experience[\"s1\"]), size=self.batch_size, replace=False)\n s1 = []\n a = []\n g = []\n for idx in idxs:\n s1.append(self.experience[\"s1\"][idx])\n a.append(self.experience[\"a\"][idx])\n s2 = self.experience[\"s2\"][idx]\n r = self.experience[\"r\"][idx]\n done = self.experience[\"done\"][idx]\n if done:\n ret = r\n else:\n ret = r + gamma*np.max(target_model.predict(s2))\n g.append(ret)\n \n cost, _ = self.session.run([self.cost, self.train_op], feed_dict={self.X:s1,\n self.y:g,\n self.actions:a})\n \n self.costs.append(cost)\n \n \n\ndef playOne(model, env, target_model, eps, copy_period=50):\n \n s1 = env.reset()\n done = False\n total_reward = 0\n iterations = 0\n\n while not done:\n iterations += 1\n if np.random.rand() <= eps:\n a = env.action_space.sample()\n else:\n a = np.argmax(model.predict(s1))\n s2, reward, done, _ = env.step(a)\n total_reward += reward\n model.add_exp(s1, a, reward, s2, done)\n model.train(env, target_model)\n if iterations % copy_period == 0:\n target_model.copy(model)\n s1 = s2\n \n return total_reward\n\n\ndef playMultiple(model, target_model, env, epochs=3000):\n \n rewards = 0\n alpha = 0.1\n improvements = 0\n max_tot_r = 0\n \n for i in range(epochs):\n \n eps = 1.0/np.sqrt(improvements+1)\n\n tot_r = playOne(model, env, target_model, eps)\n if tot_r > max_tot_r or tot_r >= 200:\n improvements += 1\n max_tot_r = tot_r\n if i == 0:\n rewards = tot_r\n else:\n rewards = (1-alpha) * rewards + alpha * tot_r\n \n if i % 100 == 0:\n print(\"Current mean: {}, Current epsilon: {}\".format(rewards, eps))\n \n return rewards\n \n\n\nif __name__ == \"__main__\":\n \n # learning rate migliore: 10e-4\n \n env = gym.make(\"CartPole-v0\")\n model = Model(name=\"MainModel\",\n input_size=env.observation_space.shape[0], \n n_outputs=env.action_space.n,\n architecture=[200, 200])\n target_model = Model(name=\"TargetModel\",\n input_size=env.observation_space.shape[0], \n n_outputs=env.action_space.n,\n architecture=[200, 200])\n \n with tf.Session() as sess:\n \n sess.run(tf.global_variables_initializer())\n model.set_session(sess)\n target_model.set_session(sess)\n \n rewards = playMultiple(model=model,\n target_model=target_model,\n env=env)\n \n #plt.title(\"Model costs\")\n# plt.plot(model.costs)\n# plt.show()\n","sub_path":"reinforcement_learning/deep_rl/DQL2.py","file_name":"DQL2.py","file_ext":"py","file_size_in_byte":7695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"216613698","text":"# -*- coding:utf-8 -*-\n\n\"\"\"\n 加载提供从json文件里提取的资源数据\n\"\"\"\n\nimport os, json, random\nfrom utils import log as logger\n\n\nmedia_dict = {} # 保存的media数据,dict结构\n\ndef initialize(root_path):\n \n media_file = os.path.join(root_path, 'resource', 'media.json')\n with open(media_file, 'r') as f:\n data = f.read()\n global media_dict\n media_dict = json.loads(data)\n logger.info('loads media dict:', media_dict)\n\n\ndef getRandomMusic():\n \"\"\" 随机获取一首音乐\n \"\"\"\n music_names = media_dict.keys()\n select_music = random.choice(music_names)\n music_url = media_dict.get(select_music)\n logger.info('choice random music:', music_url)\n return music_url\n\n","sub_path":"src/utils/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"492292707","text":"import numpy as np\nimport utils\n\n\nclass Garnet:\n def __init__(self, num_state, num_action, branching_factor, num_features):\n self.num_state = num_state\n self.num_action = num_action\n self.branching_factor = branching_factor\n self.num_features = num_features\n\n self.behavior_policy = utils.get_uniform_policy(num_state, num_action)\n self.state_action_trans_kernel = utils.get_random_state_action_trans_kernel(num_state, num_action)\n self.trans_kernel = np.einsum('iij->ij', self.behavior_policy.dot(self.state_action_trans_kernel))\n self.features = utils.get_features(num_action, num_state, num_features)\n\n self.state_space = np.arange(num_state)\n self.action_space = np.arange(num_action)\n self.current_state = self.state_space[0]\n self.reward = np.random.uniform(size=num_state)\n\n def set_behavior_policy(self, policy):\n self.behavior_policy = policy\n self.trans_kernel = np.einsum('iij->ij', self.behavior_policy.dot(self.state_action_trans_kernel))\n\n def reset(self):\n self.current_state = self.state_space[0]\n\n def phi_table(self, action, state):\n return self.features[action, state, :].reshape((self.num_features, 1))\n\n def bellman_operator(self, v_theta, gamma=0.95):\n stationary = np.diag(utils.compute_stationary_dist(self.trans_kernel))\n inv = np.matmul(np.matmul(np.transpose(self.features), stationary), self.features)\n inv = np.linalg.inv(inv)\n projecion = np.matmul(np.matmul(self.features, inv), np.transpose(self.features))\n projecion = np.matmul(projecion, stationary)\n return self.reward.reshape(self.num_state, 1) + gamma*np.matmul(projecion, np.matmul(self.trans_kernel, v_theta))\n\n def step(self):\n \"\"\"\n :return: next state, reward, action\n \"\"\"\n # randomly pick one action based on the current state\n action = np.random.choice(a=self.action_space, p=self.behavior_policy[self.current_state, :])\n # randomly pick the next state\n probs = self.state_action_trans_kernel[self.current_state, action, :]\n next_state = np.random.choice(a=self.state_space, p=probs)\n reward = self.reward[next_state]\n\n self.current_state = np.copy(next_state)\n return next_state, reward, action\n\n def sample(self):\n action = np.random.choice(a=self.action_space, p=self.behavior_policy[self.current_state, :])\n # randomly pick the next state\n probs = self.state_action_trans_kernel[self.current_state, action, :]\n next_state = np.random.choice(a=self.state_space, p=probs)\n reward = self.reward[next_state]\n return self.current_state, action, next_state, reward\n\n def get_copy(self):\n env = Garnet(self.num_state, self.num_action, self.branching_factor, self.num_features)\n env.set_behavior_policy(policy=self.behavior_policy)\n env.state_action_trans_kernel = np.copy(self.state_action_trans_kernel)\n env.features = np.copy(self.features)\n env.reward = np.copy(self.reward)\n return env","sub_path":"garnet.py","file_name":"garnet.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"277500406","text":"from flask import Flask, render_template, send_file\nfrom flask_sqlalchemy import SQLAlchemy\n\nimport pandas as pd\n\nfrom config import SQLALCHEMY_DATABASE_URI\nfrom datetime import datetime\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\n\n@app.route(\"/\", methods=['POST', 'GET'])\ndef hello():\n return render_template('index.html')\n\n\nclass Sino_trade(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n region = db.Column(db.String(50), nullable=False)\n station = db.Column(db.String(50), nullable=False)\n article = db.Column(db.String(50), nullable=False)\n number = db.Column(db.String(50), nullable=False)\n sale = db.Column(db.Integer, default=0)\n rest = db.Column(db.Integer, default=0)\n month = db.Column(db.String(50), nullable=False)\n aroma_type = db.Column(db.String(50), nullable=False)\n date_created = db.Column(db.DateTime, default=datetime.utcnow)\n\n def __repr__(self):\n return '' % self.id\n\n\n@app.route(\"/month\")\ndef month():\n sql = 'select distinct month from sino_trade;'\n df = pd.read_sql(sql, db.engine)\n print(df)\n return render_template('month.html', data=df['month'])\n\n\nimport matplotlib.pyplot as plt\nimport seaborn as sb\nfrom io import BytesIO\n\n\n@app.route(\"/calc\")\ndef calc():\n return render_template('calc.html')\n\n\n@app.route('/donut_pie_chart/')\ndef donut_pie_chart():\n sql = 'select station, number, sale ' \\\n 'from sino_trade ' \\\n 'where region=\\'Актау\\' ' \\\n 'and month=\\'nov\\';'\n df = pd.read_sql(sql, db.engine)\n\n #----------------------------------\n sb.set_style('whitegrid')\n plt.plot(df['sale'])\n\n img = BytesIO()\n plt.savefig(img)\n img.seek(0)\n #----------------------------------\n\n #----------------------------------\n # sb.set_style('whitegrid')\n # fig = plt.figure()\n # ax = fig.add_axes([0, 0, 1, 1])\n # ax.plot(df['sale'])\n\n # img = BytesIO()\n # fig.savefig(img)\n # img.seek(0)\n #----------------------------------\n\n return send_file(img, mimetype='image/png')\n\n\n@app.route(\"/chart\")\ndef chart():\n #----------------------------------\n # legend = 'Monthly Data'\n # labels = ['January', 'February', 'March',\n # 'April', 'May', 'June',\n # 'July', 'August']\n # values = [10, 9, 8, 7, 6, 4, 7, 8]\n # return render_template('chart.html', values=values, labels=labels, legend=legend)\n # ----------------------------------\n\n legend = 'Monthly Data'\n sql = 'select station, number, sale ' \\\n 'from sino_trade ' \\\n 'where region=\\'Актау\\' ' \\\n 'and month=\\'nov\\';'\n df = pd.read_sql(sql, db.engine)\n return render_template('chart.html',\n values=df['sale'],\n labels=df['station'],\n legend=legend\n )\n\n\nif __name__ == '__main__':\n app.run(port=5000)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"412189312","text":"#!/usr/bin/env python3\n#\n# Copyright (c) 2020 Pedro Heleno Isolani\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"LVAP Manager App.\"\"\"\n\nfrom empower.core.app import EmpowerApp\nimport socket\nimport threading\nfrom empower.core.app import DEFAULT_PERIOD\n\n\nclass UplinkStatsManager(EmpowerApp):\n \"\"\"Uplink Stats Manager App\n\n Command Line Parameters:\n tenant_id: tenant id\n\n Example:\n ./empower-runtime.py empower.apps.handlers.uplinkstatshandler \\\n --tenant_id=52313ecb-9d00-4b7d-b873-b55d3d9ada26D\n /\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.__lvap_manager = {\n \"message\": \"Uplink Stats Manager is online!\",\n \"lvaps\": {\n 'DC:A6:32:65:E7:AA': {\n 'ip_addr': '192.168.2.21',\n 'queueing_delay': None\n },\n # 'DC:A6:32:0A:E1:D4': {\n # 'ip_addr': '192.168.2.22',\n # 'queueing_delay': None\n # },\n # 'DC:A6:32:0A:E0:C9': {\n # 'ip_addr': '192.168.2.23',\n # 'queueing_delay': None\n # },\n # 'DC:A6:32:0A:B8:22': {\n # 'ip_addr': '192.168.2.24',\n # 'queueing_delay': None\n # },\n # '00:00:00:00:00:00': {\n # 'ip_addr': '192.168.2.25',\n # 'queueing_delay': None\n # },\n # '00:00:00:00:00:00': {\n # 'ip_addr': '192.168.2.26',\n # 'queueing_delay': None\n # }\n }\n }\n\n def loop(self):\n \"\"\"Periodic job.\"\"\"\n for lvap in self.lvaps():\n crr_lvap_addr = str(lvap.addr)\n if crr_lvap_addr in self.__lvap_manager['lvaps']:\n # TODO: create a function with a thread for each lvap\n self.get_config_from_lvap(crr_lvap_addr)\n\n if self.__db_monitor is not None:\n fields = ['LVAP_ADDR', 'QUEUEING_DELAY_MS']\n addr = crr_lvap_addr\n queueing_delay = self.__lvap_manager['lvaps'][crr_lvap_addr]['queueing_delay']\n values = [addr, queueing_delay]\n\n # Saving into db\n self.monitor.insert_into_db(table='lvap_delay_stats', fields=fields, values=values)\n\n if self.__db_monitor is not None:\n self.monitor.keep_last_measurements_only(table='lvap_delay_stats')\n\n def get_config_from_lvap(self, crr_lvap_addr):\n # TODO: get config from lvap (thread)\n try:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.settimeout(10)\n s.connect(self.__lvap_manager['lvaps'][crr_lvap_addr]['ip_addr'], 7777)\n cmd = \"READ infobase.delay\"\n s.sendall(cmd.encode())\n data = s.recv(1024)\n self.log.debug(\"Requesting data from LVAP\")\n queueing_delay = data.decode()[3]\n self.__lvap_manager['lvaps'][crr_lvap_addr]['queueing_delay'] = queueing_delay\n except:\n self.__lvap_manager['lvaps'][crr_lvap_addr]['queueing_delay'] = None\n raise ValueError(\"Timeout requesting delay stats from LVAP\")\n\n @property\n def every(self):\n \"\"\"Return loop period.\"\"\"\n return self.__every\n\n @every.setter\n def every(self, value):\n \"\"\"Set loop period.\"\"\"\n self.log.info(\"Setting control loop interval to %ums\", int(value))\n self.__every = int(value)\n super().restart(self.__every)\n\n @property\n def lvap_manager(self):\n \"\"\"Return default LVAP Manager\"\"\"\n return self.__lvap_manager\n\n @lvap_manager.setter\n def lvap_manager(self, value):\n \"\"\"Set WiFi LVAP Manager\"\"\"\n self.__lvap_manager = value\n\n def to_dict(self):\n \"\"\" Return a JSON-serializable.\"\"\"\n return self.__lvap_manager\n\n\ndef launch(tenant_id, every=DEFAULT_PERIOD):\n \"\"\" Initialize the module. \"\"\"\n\n return UplinkStatsManager(tenant_id=tenant_id, every=every)","sub_path":"empower/apps/handlers/uplinkstatshandler.py","file_name":"uplinkstatshandler.py","file_ext":"py","file_size_in_byte":4700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"262303209","text":"\"\"\"\nText Reader Script\n=================\nThis script will list and display text files\n\"\"\"\nimport buttons\nimport color\nimport display\nimport os\nimport utime\n\nSTATE_LIST = \"List\"\nSTATE_SHOW = \"Show\"\nSPECIAL_NO_FILES = \"# no txt files\"\nSPECIAL_EXIT = \"[ exit ]\"\nSPECIAL_EMPTY = \"# empty file\"\nBUTTON_TIMER_POPPED = -1\n\n\ndef list_files():\n \"\"\"Create a list of available text files.\"\"\"\n files = sorted(os.listdir(\"/\"))\n\n # Filter for text files\n files = [txt for txt in files if txt.endswith(\".txt\")]\n\n return files\n\n\ndef triangle(disp, x, y, left):\n \"\"\"Draw a triangle to show there's more text in this line\"\"\"\n yf = 1 if left else -1\n scale = 6\n disp.line(x - scale * yf, int(y + scale / 2), x, y, col=[255, 0, 0])\n disp.line(x, y, x, y + scale, col=[255, 0, 0])\n disp.line(x, y + scale, x - scale * yf, y + int(scale / 2), col=[255, 0, 0])\n\n\ndef button_events(timeout=0):\n \"\"\"Iterate over button presses (event-loop).\"\"\"\n yield 0\n button_pressed = False\n count = 0\n while True:\n v = buttons.read(buttons.BOTTOM_LEFT | buttons.BOTTOM_RIGHT | buttons.TOP_RIGHT)\n if timeout > 0 and count > 0 and count % timeout == 0:\n yield BUTTON_TIMER_POPPED\n\n if timeout > 0:\n count += 1\n\n if v == 0:\n button_pressed = False\n\n if not button_pressed and v & buttons.BOTTOM_LEFT != 0:\n button_pressed = True\n yield buttons.BOTTOM_LEFT\n\n if not button_pressed and v & buttons.BOTTOM_RIGHT != 0:\n button_pressed = True\n yield buttons.BOTTOM_RIGHT\n\n if not button_pressed and v & buttons.TOP_RIGHT != 0:\n button_pressed = True\n yield buttons.TOP_RIGHT\n\n utime.sleep_ms(10)\n\n\nCOLOR1, COLOR2 = (color.CHAOSBLUE_DARK, color.CHAOSBLUE)\n\n\ndef file_len(filename):\n i = -1\n with open(filename) as fh:\n for i, l in enumerate(fh):\n pass\n return i + 1\n\n\ndef draw_filecontent(disp, filename, pos, linecount, lineoffset=0):\n disp.clear()\n with open(filename) as fh:\n\n # stop if file is empty\n if linecount <= 0:\n disp.print(SPECIAL_EMPTY, posy=20, bg=color.BLACK)\n return\n\n # calc start position\n start = 0\n if pos > 0:\n start = pos - 1\n if start + 4 > linecount:\n start = linecount - 4\n if start < 0:\n start = 0\n\n # loop throuhg all lines\n for i, line in enumerate(fh):\n if i >= start + 4 or i >= linecount:\n break\n if i >= start:\n disp.rect(\n 0,\n (i - start) * 20,\n 159,\n (i - start) * 20 + 20,\n col=COLOR1 if i == pos else COLOR2,\n )\n\n off = 0\n linelength = len(line)\n if i == pos and linelength > 11 and lineoffset > 0:\n off = (\n lineoffset if lineoffset + 11 < linelength else linelength - 11\n )\n if lineoffset > linelength:\n off = 0\n\n disp.print(\n line[off : (off + 11)],\n posy=(i - start) * 20,\n bg=COLOR1 if i == pos else COLOR2,\n )\n if linelength > 11 and off < linelength - 11:\n triangle(disp, 153, (i - start) * 20 + 6, False)\n if off > 0:\n triangle(disp, 6, (i - start) * 20 + 6, True)\n\n disp.update()\n\n\ndef draw_filelist(disp, filelist, pos, filecount, lineoffset):\n disp.clear()\n\n start = 0\n if pos > 0:\n start = pos - 1\n if start + 4 > filecount:\n start = filecount - 4\n if start < 0:\n start = 0\n\n for i, line in enumerate(filelist):\n if i >= start + 4 or i >= filecount:\n break\n if i >= start:\n disp.rect(\n 0,\n (i - start) * 20,\n 159,\n (i - start) * 20 + 20,\n col=COLOR1 if i == pos else COLOR2,\n )\n\n off = 0\n linelength = len(line)\n if i == pos and linelength > 10 and lineoffset > 0:\n off = lineoffset if lineoffset + 10 < linelength else linelength - 10\n if lineoffset > linelength:\n off = 0\n\n disp.print(\n \" \" + line[off : (off + 10)],\n posy=(i - start) * 20,\n bg=COLOR1 if i == pos else COLOR2,\n )\n if i == pos:\n disp.print(\">\", posy=(i - start) * 20, fg=color.COMMYELLOW, bg=COLOR1)\n\n if linelength > 10 and off < linelength - 10:\n triangle(disp, 153, (i - start) * 20 + 6, False)\n if off > 0:\n triangle(disp, 24, (i - start) * 20 + 6, True)\n\n disp.update()\n\n\ndef main():\n disp = display.open()\n current_state = STATE_LIST\n\n # list files variables\n\n filelist = list_files()\n if len(filelist) == 0:\n filelist.append(SPECIAL_NO_FILES)\n filelist.append(SPECIAL_EXIT)\n numfiles = len(filelist)\n current_file = 0\n\n # show files variables\n\n filename = \"\"\n linecount = 0\n linepos = 0\n lineoffset = 0\n lineoffdir = 0\n timerscrollspeed = 1\n timerstartscroll = 5\n timercountpopped = 0\n\n for ev in button_events(10):\n\n # list files\n\n if current_state == STATE_LIST:\n if ev == buttons.BOTTOM_RIGHT:\n # Scroll down\n current_file = (current_file + 1) % numfiles\n lineoffset = 0\n timercountpopped = 0\n\n elif ev == buttons.BOTTOM_LEFT:\n # Scroll up\n current_file = (current_file + numfiles - 1) % numfiles\n lineoffset = 0\n timercountpopped = 0\n\n elif ev == BUTTON_TIMER_POPPED:\n timercountpopped += 1\n if (\n timercountpopped >= timerstartscroll\n and (timercountpopped - timerstartscroll) % timerscrollspeed == 0\n ):\n lineoffset += 1\n\n elif ev == buttons.TOP_RIGHT:\n filename = filelist[current_file % numfiles]\n\n # exit or ignore\n if filename == SPECIAL_EXIT:\n os.exit()\n elif filename == SPECIAL_NO_FILES:\n continue\n\n # show file, switch state and draw\n current_state = STATE_SHOW\n disp.clear().update()\n\n # reset variables\n linepos = 0\n lineoffset = 0\n timercountpopped = 0\n linecount = file_len(filename)\n\n # draw\n draw_filecontent(disp, filename, linepos, linecount, lineoffset)\n continue\n\n draw_filelist(disp, filelist, current_file, numfiles, lineoffset)\n\n # show files\n\n elif current_state == STATE_SHOW:\n if ev == buttons.BOTTOM_RIGHT:\n if linepos < (linecount - 1):\n # Scroll down\n linepos += 1\n else:\n # goto first line\n linepos = 0\n lineoffset = 0\n timercountpopped = 0\n\n elif ev == buttons.BOTTOM_LEFT:\n if linepos > 0:\n # Scroll up\n linepos -= 1\n else:\n # got to last line\n linepos = linecount - 1\n lineoffset = 0\n timercountpopped = 0\n\n elif ev == BUTTON_TIMER_POPPED:\n timercountpopped += 1\n if (\n timercountpopped >= timerstartscroll\n and (timercountpopped - timerstartscroll) % timerscrollspeed == 0\n ):\n lineoffset += 1\n\n elif ev == buttons.TOP_RIGHT:\n # go back to file menu\n current_state = STATE_LIST\n lineoffset = 0\n timercountpopped = 0\n draw_filelist(disp, filelist, current_file, numfiles, 0)\n continue\n\n draw_filecontent(disp, filename, linepos, linecount, lineoffset)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"preload/apps/text_reader/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"570575780","text":"#!/usr/bin/env python\n#coding=utf-8\n\nclass lists:\n # 類別變數\n class_variable = []\n def __init__(self):\n # self.keywords 是實例變數\n self.instance_variable = []\n# 建立instance a與b\na = lists()\nb = lists()\n\n# 給類別變數值\na.class_variable.extend([1,2,3,4,5])\n\n# 呼叫instance a與b\nprint(\"call a\", a.class_variable)\nprint(\"call b\", b.class_variable)\n\n# 建立instance c與d\nc = lists()\nd = lists()\n\n# 給實例變數值\nc.instance_variable.extend([\"a\",\"b\",\"c\",\"d\",\"e\"])\n\n# 呼叫instance c與d\nprint(\"call c\", c.instance_variable)\nprint(\"call d\", d.instance_variable)\n\n\n\n\n\n","sub_path":"source code/code-Python 3.0.1/ch604.py","file_name":"ch604.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"450691605","text":"import csv\n# Read in raw data from csv\nrawData = csv.reader(open('filename.csv', 'rb'), dialect='excel')\n# the template. where data from the csv will be formatted to geojson\ntemplate = \\\n ''' \\\n { \"type\" : \"Feature\",\n \"geometry\" : {\n \"type\" : \"Point\",\n \"coordinates\" : [%s, %s]},\n \"properties\" : { \"id\" : %s, \"unixTime\" : \"%s\", \"msgtext\" : \"%s\", \"userID\": \"%s\"}\n },\n '''\n# the head of the geojson file\noutput = \\\n ''' \\\n{ \"type\" : \"Feature Collection\",\n \"features\" : [\n '''\n# loop through the csv by row skipping the first\niter = 0\nfor row in rawData:\n # iter += 1\n # if iter >= 2:\n id = row[2]\n lat = row[5]\n lon = row[6]\n unixTime = row[1]\n msgtext = row[3]\n userID = row[4]\n # output += template % (row[0], row[2], row[1], row[3], row[4])\n output += template % (lon, lat, id, unixTime, msgtext, userID)\n \n# the tail of the geojson file\noutput += \\\n ''' \\\n ]\n}\n '''\n \n# opens an geoJSON file to write the output to\noutFileHandle = open(\"filename.geojson\", \"w\")\noutFileHandle.write(output)\noutFileHandle.close()","sub_path":"csvtogeojson.py","file_name":"csvtogeojson.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"351402646","text":"def selection_sort(alist):\n for fillslots in range(len(alist)-1,0,-1):\n positionOfMax = 0\n for location in range(1,fillslots+1):\n if alist[location] > alist[positionOfMax]:\n positionOfMax = location\n\n temp = alist[fillslots]\n alist[fillslots] = alist[positionOfMax]\n alist[positionOfMax] = temp\n\nalist = [54,26,93,17,77,31,44,55,20]\nselection_sort(alist)\nprint(alist)","sub_path":"SortingAndSearching/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"544751753","text":"# Ray Peng\n# Platforming game\n# Mr Blake\n\n# 'W' to jump, 'A' to move left, 'D' to move right\n\n# region imports\n\nimport pygame, sys, random, math, os\nfrom pygame.locals import *\n\npygame.init()\n\n# endregion\n\n# region variables\n\nwindowSurfaceObj = pygame.display.set_mode((1280, 720))\ndir_path = os.path.dirname(os.path.realpath(__file__))\npygame.display.set_caption('Platformer')\ngameStart = False\nfpsClock = pygame.time.Clock()\nclrBlack = pygame.Color(0, 0, 0)\nclrRed = pygame.Color(255, 0, 0)\nclrGreen = pygame.Color(0, 255, 0)\nclrWhite = pygame.Color(255, 255, 255)\nclrDrkGrey = pygame.Color(102, 102, 102)\nanimImages = [pygame.image.load(dir_path + r'\\Sprites\\Main Character\\charIdle.png'), pygame.image.load(dir_path + r'\\Sprites\\Main Character\\charJumpRight.png'), pygame.image.load(dir_path + r'\\Sprites\\Main Character\\charWalk1Right.png'), pygame.image.load(dir_path + r'\\Sprites\\Main Character\\charWalk2Right.png'), pygame.image.load(dir_path + r'\\Sprites\\Main Character\\charJumpLeft.png'), pygame.image.load(dir_path + r'\\Sprites\\Main Character\\charWalk1Left.png'), pygame.image.load(dir_path + r'\\Sprites\\Main Character\\charWalk2Left.png')]\nanimNum = 0\nanimArrayNum = 0\nanimTimer = 0\nbckgrdImage = pygame.image.load(dir_path + r'\\Sprites\\background.png')\nstartmenu = pygame.image.load(dir_path + r'\\Sprites\\startmenu.png')\nrestartmenu = pygame.image.load(dir_path + r'\\Sprites\\restartmenu.png')\nprizeimage = pygame.image.load(dir_path + r'\\Sprites\\prize.png')\nplaylist = list()\nplaylist.append(dir_path + r'\\Sounds\\Music\\Premonition.mp3')\npygame.mixer.music.load(playlist[0])\npygame.mixer.music.set_volume(0.1)\npygame.mixer.music.play(-1) \nleft_pressed = False\nright_pressed = False\nup_pressed = False\ndown_pressed = False\nscreendim = pygame.display.get_surface().get_size()\nrunGame = True\n_JUMPTIME = 20 # Constant value, num pixels player jumps per frame. that determines jump time (higher value means longer jump time)\n_FALLSPEED = 5 # Constant value, num of pixels player falls per frame\n_DEBUG = False\n# highestplaty = screendim[1]\n# lowestplaty = 0\n# rightmostplatx = screendim[0]\n# leftmostplatx = 0\nvectorx = 0\nvectory = 0\nstagenum = 0\nrestart = False\n\n# endregion\n\n# region classes\n\nclass Solids:\n def __init__(self, color, height, width, xset, yset, rightbound, topbound, leftbound, botbound):\n self.size = (width, height)\n self.color = color\n self.x = xset # centre point, x-axis\n self.y = yset # centre point, y-axis\n class Boundary:\n def __init__(self,right,top,left,bottom):\n self.topright = pygame.Vector2(right, top)\n self.bottomleft = pygame.Vector2(left, bottom)\n self.boundary = Boundary(rightbound, topbound, leftbound, botbound) # physical bounds for this object\n # print(\"bound start\", self.boundary.topright.x, self.boundary.topright.y, self.boundary.bottomleft.x, self.boundary.bottomleft.y)\n\n def leftx(self, hypox = None): # x coord of left side (hypox is an hypothetical x coord)\n \"\"\"Calculates the leftmost x-axis position of the sprite given an imagined centre point\n\n Args:\n hypox (float, optional): The hypothetical centre position for the sprite along x-axis. Defaults to current self.x.\n\n Returns:\n float: The leftmost x-axis coordinate for the sprite\n \"\"\" \n if hypox is None:\n hypox = self.x\n # print(\"hypox1\")\n # print(hypox)\n return hypox - self.size[0]//2\n\n def rightx(self, hypox = None): # x coord of right side (hypox is an hypothetical x coord)\n \"\"\"Calculates the rightmost x-axis position of the sprite given an imagined centre point\n\n Args:\n hypox (float, optional): The hypothetical centre position for the sprite along x-axis. Defaults to current self.x.\n\n Returns:\n float: The rightmost x-axis coordinate for the sprite\n \"\"\" \n if hypox is None:\n hypox = self.x\n # print(\"hypox2\")\n # print(hypox)\n return hypox + self.size[0]//2\n\n def topy(self, hypoy = None): # y of top (hypoy is an hypothetical y coord)\n \"\"\"Calculates the highest y-axis position of the sprite given an imagined centre point\n\n Args:\n hypox (float, optional): The hypothetical centre position for the sprite along y-axis. Defaults to current self.y.\n\n Returns:\n float: The highest y-axis coordinate for the sprite\n \"\"\" \n if hypoy is None:\n hypoy = self.y\n # print(\"hypoy1\")\n # print(hypoy)\n return hypoy - self.size[1]//2\n\n def boty(self, hypoy = None): # y of bottom (hypoy is an hypothetical y coord)\n \"\"\"Calculates the lowest y-axis position of the sprite given an imagined centre point\n\n Args:\n hypox (float, optional): The hypothetical centre position for the sprite along y-axis. Defaults to current self.y.\n\n Returns:\n float: The lowest y-axis coordinate for the sprite\n \"\"\" \n if hypoy is None:\n hypoy = self.y\n # print(\"hypoy2\")\n # print(hypoy)\n return hypoy + self.size[1]//2\n\n def draw(self):\n pygame.draw.rect(windowSurfaceObj, (self.color), (int(self.leftx()), int(self.topy()), self.size[0], self.size[1]))\n if _DEBUG:\n pygame.draw.circle(windowSurfaceObj, clrGreen, (self.x, self.y), 5)\n\nclass Player(Solids):\n def __init__(self, color, height, width, xset, yset, rightbound, topbound, leftbound, botbound, speed, isfalling=False):\n # self.image = pygame.image.load('squirrel.png')\n self.direction = \"\"\n self.speed = speed\n self.isfalling = isfalling\n self.fallspeed = _FALLSPEED\n self.jumptime = _JUMPTIME # Determines jump time (higher value means longer jump time)\n self.jumpcounter = 0 # Tracks num of pixels the player gets to jump each frame\n self.onground = False\n super().__init__(color, height, width, xset, yset, rightbound, topbound, leftbound, botbound)\n \n def move(self, dirx = 0, diry = 0):\n if dirx < 0: # Going left\n # print(\"rightbound\", self.boundary.topright.x)\n if self.leftx(self.x + dirx) < self.boundary.bottomleft.x: # check if hypothetical pos_x of player's left side is < left bounds\n self.x = self.rightx(self.boundary.bottomleft.x) # set left side to be at the left bound edge\n # print(\"setbound left\", self.boundary.bottomleft.x)\n else:\n self.x += dirx\n elif dirx > 0: # Going right\n # print(\"leftbound\", self.boundary.bottomleft.x)\n if self.rightx(self.x + dirx) > self.boundary.topright.x: # check if hypothetical pos_x of player's right side is > right bounds\n self.x = self.leftx(self.boundary.topright.x) # set right side to be at the right bound edge\n # print(\"setbound right\", self.boundary.topright.x)\n else:\n self.x += dirx\n# The self.onground will change according to changes in diry, so lines 169-179 of move() will set self.onground\n if diry >= 0: # Going down OR not going at all\n # print(self.boty(self.y + diry))\n # print(self.boundary.bottomleft.y)\n if self.boty(self.y + diry) >= self.boundary.bottomleft.y: # check if hypothetical pos_y of player's bottom side is > bottom bounds\n # print(\"hello\") \n self.y = self.topy(self.boundary.bottomleft.y) # set bottom side to be at the bottom bound edge\n self.onground = True\n else:\n # print(\"whyyy\")\n self.y += diry\n self.onground = False\n elif diry < 0: # Going up\n # print(self.topy(self.y + diry), self.boundary.topright.y)\n if self.topy(self.y + diry) < self.boundary.topright.y: # check if hypothetical pos_y of player's top side is < top bounds\n self.y = self.boty(self.boundary.topright.y) # set top side to be at the top bound edge\n else:\n self.y += diry\n \n # self.x += dirx\n # self.y += diry\n def animate(self, onground):\n # selects image to show based on action and animation timer\n if left_pressed:\n if onground:\n if animNum == 1:\n windowSurfaceObj.blit(animImages[5], (int(self.leftx()), int(self.topy())))\n elif animNum == 2:\n windowSurfaceObj.blit(animImages[6], (int(self.leftx()), int(self.topy())))\n elif not(onground):\n windowSurfaceObj.blit(animImages[4], (int(self.leftx()), int(self.topy())))\n elif right_pressed:\n if onground:\n if animNum == 1:\n windowSurfaceObj.blit(animImages[2], (int(self.leftx()), int(self.topy())))\n elif animNum == 2:\n windowSurfaceObj.blit(animImages[3], (int(self.leftx()), int(self.topy())))\n elif not(onground):\n windowSurfaceObj.blit(animImages[1], (int(self.leftx()), int(self.topy())))\n else:\n windowSurfaceObj.blit(animImages[0], (int(self.leftx()), int(self.topy())))\n\nclass Platform(Solids):\n def __init__(self, color, height, width, xset, yset, rightbound, topbound, leftbound, botbound,):\n super().__init__(color, height, width, xset, yset, rightbound, topbound, leftbound, botbound)\n\nclass Prize(Solids):\n def imgdraw(self):\n windowSurfaceObj.blit(prizeimage, (int(self.leftx()), int(self.topy())))\n\n def __init__(self, color, height, width, xset, yset, rightbound, topbound, leftbound, botbound,):\n super().__init__(color, height, width, xset, yset, rightbound, topbound, leftbound, botbound)\n\n# endregion\n\n# region character create\n\n# (color, height, width, xset, yset, rightbound, topbound, leftbound, botbound, speed, falling=False) # constructor\nplayer1 = Player(clrWhite, 100, 50, 25, 600, screendim[0], 0, 0, screendim[1], 10)\n\n# endregion\n\n# region Stages\n\nplatformsP1 = [[Platform(clrDrkGrey, 40, 100, 200, 600, screendim[0], 0, 0, screendim[1]), Platform(clrDrkGrey, 40, 100, 800, 500, screendim[0], 0, 0, screendim[1]), Platform(clrDrkGrey, 40, 100, 500, 575, screendim[0], 0, 0, screendim[1]), Platform(clrDrkGrey, 400, 40, screendim[0], 520, screendim[0], 0, 0, screendim[1]), Platform(clrDrkGrey, 40, 150, 1205, 320, screendim[0], 0, 0, screendim[1]), Platform(clrDrkGrey, 40, screendim[0], screendim[0]//2, screendim[1], screendim[0], 0, 0, screendim[1])], \\\n [Platform(clrDrkGrey, 40, 800, 600, 250, screendim[0], 0, 0, screendim[1]), Platform(clrDrkGrey, 40, 100, 100, 450, screendim[0], 0, 0, screendim[1]), Platform(clrDrkGrey, 40, 100, 800, 550, screendim[0], 0, 0, screendim[1]), Platform(clrDrkGrey, 40, 100, 500, 500, screendim[0], 0, 0, screendim[1]), Platform(clrDrkGrey, 500, 40, 1100, 470, screendim[0], 0, 0, screendim[1]), Platform(clrDrkGrey, 40, screendim[0], screendim[0]//2, screendim[1], screendim[0], 0, 0, screendim[1])], \\\n [Platform(clrDrkGrey, 40, 50, 200, 600, screendim[0], 0, 0, screendim[1]), Platform(clrDrkGrey, 40, 50, 800, 500, screendim[0], 0, 0, screendim[1]), Platform(clrDrkGrey, 40, 50, 500, 575, screendim[0], 0, 0, screendim[1]), Platform(clrDrkGrey, 400, 40, screendim[0], 520, screendim[0], 0, 0, screendim[1]), Platform(clrDrkGrey, 40, 500, 1100, 320, screendim[0], 0, 0, screendim[1]), Platform(clrDrkGrey, 600, 40, 870, 500, screendim[0], 0, 0, screendim[1]), Platform(clrDrkGrey, 40, 500, 501, 321, screendim[0], 0, 0, screendim[1]), Platform(clrDrkGrey, 40, 400, 690, 180, screendim[0], 0, 0, screendim[1]), Platform(clrDrkGrey, 40, screendim[0], screendim[0]//2, screendim[1], screendim[0], 0, 0, screendim[1])]]\\\n # creates array of stages and then a sub-array of platforms in that stage\n\nprizes = [Prize(clrGreen, 120, 60, 1205, 240, screendim[0], 0, 0, screendim[1]), Prize(clrGreen, 120, 60, 1205, 640, screendim[0], 0, 0, screendim[1]), Prize(clrGreen, 120, 60, 1205, 240, screendim[0], 0, 0, screendim[1])] #creates array of prize locations for each stage\nspawnlocation = [600, 25] # where the player will spawn in each level\n\n# endregion\n\n# region functions\n\ndef onGround(plr):\n platnum = 0\n for platform in platformsP1[stagenum]:\n platnum += 1\n # print(platnum)\n if platform.leftx() < plr.rightx() and platform.rightx() > plr.leftx():\n # print(\"in bounds\")\n # print(topplaty, int(platform.topy()))\n if highestplaty == platform.topy():\n if int(platform.topy()) == int(plr.boty()):\n # print(\"on plat\")\n return True\n else:\n pass\n # we don't yet know if the player isn't on another platform, so it is premature to return False\n return False # now we've checked all platforms, so it's safe to conclude player isn't on a platform\n\ndef jump(plr, jumpcount):\n plr.move(0, -jumpcount)\n \n# endregion\n\n# region main game\n\nwhile runGame:\n\n # region start\n\n if gameStart:\n windowSurfaceObj.blit(bckgrdImage, (0, 0))\n pygame.mixer.music.set_volume(0.05)\n else:\n if restart:\n windowSurfaceObj.blit(restartmenu, (0, 0))\n pygame.mixer.music.set_volume(0.1)\n else:\n windowSurfaceObj.blit(startmenu, (0, 0))\n\n # endregion\n\n # region events\n for event in pygame.event.get():\n if event.type == QUIT:\n runGame = False\n pygame.quit()\n elif event.type == KEYDOWN:\n if event.key == pygame.K_w:\n # print(\"\\'w\\' key was pressed\")\n up_pressed = True\n pygame.mixer.Channel(0).play(pygame.mixer.Sound(dir_path + r'\\Sounds\\Sound Effects\\jump.wav'))\n pygame.mixer.Channel(0).set_volume(0.3)\n elif event.key == pygame.K_a:\n # print(\"\\'a\\' key was pressed\")\n left_pressed = True\n elif event.key == pygame.K_s:\n # print(\"\\'s\\' key was pressed\")\n down_pressed = True\n elif event.key == pygame.K_d:\n # print(\"\\'d\\' key was pressed\")\n right_pressed = True\n elif event.key == pygame.K_RETURN:\n # print(\"\\'RETURN\\' key was pressed\")\n gameStart = True\n pygame.mixer.Channel(1).play(pygame.mixer.Sound(dir_path + r'\\Sounds\\Sound Effects\\start.wav'))\n pygame.mixer.Channel(1).set_volume(0.4)\n elif event.type == KEYUP:\n if event.key == pygame.K_w:\n # print(\"\\'w\\' key was let go\")\n up_pressed = False\n elif event.key == pygame.K_a:\n # print(\"\\'a\\' key was let go\")\n left_pressed = False\n elif event.key == pygame.K_s:\n # print(\"\\'s\\' key was let go\")\n down_pressed = False\n elif event.key == pygame.K_d:\n # print(\"\\'d\\' key was let go\")\n right_pressed = False\n # endregion\n\n # region movements\n\n vectorx = 0\n\n if left_pressed:\n vectorx = (-player1.speed)\n elif right_pressed:\n vectorx = (player1.speed)\n\n player1.move()\n # print(playerOnGround)\n\n if player1.onground and not(up_pressed): # player is on the ground\n player1.fallspeed = 5 # resets the velocity of fall\n player1.isfalling = False # player stops falling\n player1.jumpcounter = player1.jumptime # resets to the player's jump power\n elif up_pressed: # if player presses 'w'\n jump(player1, player1.jumpcounter)\n player1.jumpcounter -= 1\n elif not(player1.onground) and not(up_pressed): # if player is in air and lets go of 'w'\n player1.isfalling = True # player starts falling\n # player1.jumpcounter = -5 # prevents player from 'jumping' in the air\n\n # endregion\n\n # region bounds\n\n platnum2 = 0\n for platform in platformsP1[stagenum]:\n # platnum2 += 1\n\n if platform.leftx() < player1.rightx() and platform.rightx() > player1.leftx(): # check if player is within horizontal range of platform (the player can stand or hit head on the platform)\n if platform.topy() < player1.boundary.bottomleft.y: # highestplaty: # checks if platform is higher than highest platform in that X area\n if player1.boty() <= platform.topy(): # checks if player is actually above platform\n player1.boundary.bottomleft.y = platform.topy() # sets player bottom boundary\n # highestplaty = platform.topy() # sets highest platform to current platform Y\n # if highestplaty == platform.topy(): # checks if highest platform is the current platform\n # if player1.boty() <= platform.topy(): # checks if player is actually above platform\n # player1.boundary.bottomleft.y = highestplaty # sets player bottom boundary\n\n if platform.boty() > player1.boundary.topright.y: # lowestplaty: # checks if platform is lower than lowest platform in that X area\n if player1.topy() >= platform.boty(): # checks if player is actually below platform\n player1.boundary.topright.y = lowestplaty # sets player top boundary\n # lowestplaty = platform.boty() # sets lowest platform to current platform Y\n # if lowestplaty == platform.boty(): # checks if lowest platform is the current platform\n # if player1.topy() >= platform.boty(): # checks if player is actually below platform\n \n\n else: # if not in the bounds of specified platform\n if player1.boundary.bottomleft.y == platform.topy(): # checks if highest platform is current platform\n player1.boundary.bottomleft.y = screendim[1] # resets bottom boundary\n # highestplaty = screendim[1] # resets highest platform\n if player1.boundary.topright.y == platform.boty(): # checks if lowest platform is current platform\n player1.boundary.topright.y = 0 # resets top boundary\n # lowestplaty = 0 # resets lowest platform\n\n if player1.isfalling:\n player1.move(0, player1.fallspeed)\n player1.fallspeed += 1\n\n for platform in platformsP1[stagenum]:\n\n if platform.topy() < player1.boty() and platform.boty() > player1.topy(): # check if player is within vertical range of platform\n\n if platform.leftx() < player1.boundary.topright.x: # rightmostplatx: # checks if platform is further right than rightmost platform in that Y area\n if player1.rightx() <= platform.leftx(): # checks if player is actually to the left of platform\n player1.boundary.topright.x = platform.leftx() # sets rightmost platform to current platform X\n\n if platform.rightx() > player1.boundary.bottomleft.x: # leftmostplatx: # checks if platform is further left than leftmost platform in that Y area\n if player1.leftx() >= platform.rightx(): # checks if player is actually to the right of platform\n player1.boundary.bottomleft.x = platform.rightx() # sets leftmost platform to current platform X\n\n else: # if not in the bounds of specified platform\n if player1.boundary.bottomleft.x == platform.rightx(): # checks if leftmost platform is current platform\n player1.boundary.bottomleft.x = 0 # resets left boundary\n # leftmostplatx = 0 # resets leftmost platform\n \n if player1.boundary.topright.x == platform.leftx(): # checks if rightmost platform is current platform\n player1.boundary.topright.x = screendim[0] # resets right boundary\n # rightmostplatx = screendim[0] # resets rightmost platform\n\n # endregion\n\n #region collectprize\n\n if player1.x > prizes[stagenum].leftx() and player1.x < prizes[stagenum].rightx() and player1.y < prizes[stagenum].boty() and player1.y > prizes[stagenum].topy(): # if centre of player is in prize bounds\n pygame.mixer.Channel(2).play(pygame.mixer.Sound(dir_path + r'\\Sounds\\Sound Effects\\prizecollect.wav')) # play collection noise\n pygame.mixer.Channel(2).set_volume(0.3) # lower volume\n if stagenum + 1 >= len(prizes): # if the stage number is larger than the number of prizes\n gameStart = False # end game\n stagenum = -1 # reset stages\n restart = True # change title screen\n else: # if normal collection of prize\n stagenum += 1 # go to next stage \n player1.x = spawnlocation[1] # reset everything else\n player1.y = spawnlocation[0]\n player1.boundary.topright.x = screendim[0]\n player1.boundary.topright.y = 0\n player1.boundary.bottomleft.x = 0\n player1.boundary.bottomleft.y = platformsP1[stagenum][-1].topy()\n lowestplaty = 0\n highestplaty = screendim[1]\n leftmostplatx = 0\n rightmostplatx = screendim[0]\n \n # endregion\n\n # region draw\n\n if animTimer % 15 == 0:\n animTimer = 0\n if animNum % 2 == 0:\n animNum = 0\n animNum += 1\n animTimer += 1\n\n if gameStart:\n for x in range(0, len(platformsP1[stagenum])):\n platformsP1[stagenum][x-1].draw()\n prizes[stagenum].imgdraw()\n player1.move(vectorx)\n player1.animate(player1.onground)\n\n # endregion\n\n pygame.display.update()\n fpsClock.tick(60)\n\n# endregion","sub_path":"FinalProjectPlatformer/Platformer - Main.py","file_name":"Platformer - Main.py","file_ext":"py","file_size_in_byte":21750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"394453735","text":"\"\"\"\nThe pickle object is used for serializing and de-serializing a python object structure\n\"\"\"\nimport pickle\nfrom sklearn.linear_model import LinearRegression\n\nclf = LinearRegression()\n#clf.fit(X_train, y_train)\n\n\"\"\"\nAfter training the classifier will be saved using pickling\n\"\"\"\n# Saving classifier\nwith open('linear_regression.pickle', 'wb') as f:\n pickle.dump(clf, f)\n\n# Reading classifier\npickle_in = open('linear_regression.pickle', 'rb')\nclf = pickle.load(pickle_in)\n\n","sub_path":"DataScience/Data_Serialization/pickling.py","file_name":"pickling.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"277328801","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import ValidationError\nfrom odoo.exceptions import UserError, AccessError\nimport datetime\nimport logging\nfrom dateutil.relativedelta import *\nimport calendar\n\nclass rrhh_prestamo(models.Model):\n _name = 'rrhh.prestamo'\n _description = 'Prestamo'\n _rec_name = 'descripcion'\n\n employee_id = fields.Many2one('hr.employee','Empleado')\n fecha_inicio = fields.Date('Fecha inicio')\n numero_descuentos = fields.Integer('Numero de descuentos')\n total = fields.Float('Total')\n mensualidad = fields.Float('Mensualidad')\n prestamo_ids = fields.One2many('rrhh.prestamo.linea','prestamo_id',string='Lineas de prestamo')\n descripcion = fields.Char(string='Descripción',required=True)\n codigo = fields.Char(string='Código',required=True)\n estado = fields.Selection([\n ('nuevo', 'Nuevo'),\n ('proceso','Proceso'),\n ('pagado', 'Pagado')\n ], string='Status', help='Estado del prestamo',readonly=True, default='nuevo')\n pendiente_pagar_prestamo = fields.Float(compute='_compute_prestamo', string='Pendiente a pagar del prestamos', )\n\n def _compute_prestamo (self):\n for prestamo in self:\n total_prestamo = 0\n total_prestamo_pagado = 0\n nominas = 0\n for linea in prestamo.prestamo_ids:\n for nomina in linea.nomina_id:\n nominas += 1\n for nomina_entrada in nomina.input_line_ids:\n if prestamo.codigo == nomina_entrada.code:\n total_prestamo_pagado += nomina_entrada.amount\n total_prestamo += linea.monto\n prestamo.pendiente_pagar_prestamo = total_prestamo - total_prestamo_pagado\n if prestamo.pendiente_pagar_prestamo == 0 and nominas > 0:\n prestamo.estado = 'pagado'\n return True\n\n def generar_mensualidades(self):\n mes_inicial = self.fecha_inicio\n mes = 0\n if self.mensualidad > 0 and self.numero_descuentos > 0:\n total = self.mensualidad * self.numero_descuentos\n if self.mensualidad <= self.total:\n numero_pagos_mensualidad = self.total / self.mensualidad\n mes_final_pagos_mensuales = mes_inicial + relativedelta(months=int(numero_pagos_mensualidad) -1)\n anio_final = mes_final_pagos_mensuales.strftime('%Y')\n diferencias_meses = self.numero_descuentos - int(numero_pagos_mensualidad)\n contador = 0\n if diferencias_meses < 0:\n total_sumado = 0\n diferencia = (diferencias_meses*-1) + self.numero_descuentos\n while contador <= (self.numero_descuentos -1):\n mes = mes_inicial + relativedelta(months=contador)\n anio = mes.strftime('%Y')\n mes = int(mes.strftime('%m'))\n if contador < (self.numero_descuentos -1):\n total_sumado += self.mensualidad\n self.env['rrhh.prestamo.linea'].create({'prestamo_id': self.id,'mes': str(mes),'anio': anio,'monto': self.mensualidad})\n else:\n pago_restante = self.total - total_sumado\n ultimos_pagos_mensuales = pago_restante / diferencias_meses\n self.env['rrhh.prestamo.linea'].create({'prestamo_id': self.id,'mes': str(mes),'anio': anio,'monto': pago_restante})\n contador += 1\n else:\n while contador < (self.numero_descuentos):\n mes = mes_inicial + relativedelta(months=contador)\n anio = mes.strftime('%Y')\n mes = int(mes.strftime('%m'))\n if contador <= (int(numero_pagos_mensualidad) -1 ):\n self.env['rrhh.prestamo.linea'].create({'prestamo_id': self.id,'mes': str(mes),'anio': anio,'monto': self.mensualidad})\n else:\n pago_restante = self.total%self.mensualidad\n ultimos_pagos_mensuales = pago_restante / diferencias_meses\n self.env['rrhh.prestamo.linea'].create({'prestamo_id': self.id,'mes': str(mes),'anio': anio,'monto': ultimos_pagos_mensuales})\n contador += 1\n return True\n\n def prestamos(self):\n if self.prestamo_ids:\n cantidad_nominas = 0\n for nomina in self.prestamo_ids:\n if nomina.nomina_id:\n cantidad_nominas += 1\n if cantidad_nominas == 0:\n self.prestamo_ids.unlink()\n self.generar_mensualidades()\n else:\n raise ValidationError(_('No puede volver a generar mensualidades, por que ya existen nominas asociadas a este prestamo.'))\n else:\n self.generar_mensualidades()\n return True\n\n def cancelar(self):\n for prestamo in self:\n prestamo.estado = 'proceso'\n return True\n\n def unlink(self):\n for prestamo in self:\n if not prestamo.estado == 'nuevo':\n raise UserError(_('No puede eliminar prestamo, por que ya existen nominas asociadas'))\n return super(rrhh_prestamo, self).unlink()\n\nclass rrhh_prestamo_linea(models.Model):\n _name = 'rrhh.prestamo.linea'\n _description = 'Prestamo linea'\n\n mes = fields.Selection([\n ('1', 'Enero'),\n ('2', 'Febrero'),\n ('3', 'Marzo'),\n ('4', 'Abril'),\n ('5', 'Mayo'),\n ('6', 'Junio'),\n ('7', 'Julio'),\n ('8', 'Agosto'),\n ('9', 'Septiembre'),\n ('10', 'Octubre'),\n ('11', 'Noviembre'),\n ('12', 'Diciembre'),\n ], string='Mes')\n monto = fields.Float('Monto')\n anio = fields.Integer('Año')\n nomina_id = fields.Many2many('hr.payslip','prestamo_nominda_id_rel',string='Nomina')\n prestamo_id = fields.Many2one('rrhh.prestamo','Prestamo')\n\nclass rrhh_historial_salario(models.Model):\n _name = \"rrhh.historial_salario\"\n\n salario = fields.Float('Salario')\n fecha = fields.Date('Fecha')\n contrato_id = fields.Many2one('hr.contract','Contato')\n","sub_path":"rrhh/models/.ipynb_checkpoints/rrhh-checkpoint.py","file_name":"rrhh-checkpoint.py","file_ext":"py","file_size_in_byte":6362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"136218460","text":"\"\"\"\r\nYou can add your questions that are related and the \r\nquestions that I mention here are sample! \r\n\"\"\"\r\n\r\n# Global Variables\r\nQandA = {\"What is capital if India?\":\"C\",\r\n\"Who is the current President of India?\":\"A\",\r\n\"Who is the current President of US\":\"B\",\r\n\"Who is the current captain of India team?\":\"B\"}\r\nquestions = list(QandA.keys())\r\noptions = [[\"A. Kolkata\",\"B. Hyderabad\",\"C. New Delhi\"],\r\n[\"A. Ram Nath Kovind\",\"B. Narendra Modi\",\"C. Imran Khan\"],\r\n[\"A. Trump\",\"B. Joe Biden\",\"C. Obama\"],\r\n[\"A. MS Dhoni\",\"B. Virat Kohli\",\"C. Ajinkya Rahane\"]]\r\n\r\ndef new_game():\r\n score = 0\r\n print(\"Welcome to Quiz game!\")\r\n for i in range(len(questions)):\r\n print(\"-\"*15)\r\n print(questions[i])\r\n for j in options[i]:\r\n print(j)\r\n ans = input(\"Enter A or B or C: \")\r\n\r\n if ans.lower()==QandA[questions[i]].lower():\r\n print(\"Correct Answer!\")\r\n score+=1\r\n else:\r\n print(\"Wrong Answer!\")\r\n return score\r\n\r\n\r\nscore = new_game()\r\nprint(\"-\"*15)\r\nif score >= 2:\r\n print(f\"You passed🥳 Your score is {score}\")\r\nelse:\r\n print(f\"You failed😢 Your score is {score}\")\r\n","sub_path":"Quiz.py","file_name":"Quiz.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"436826257","text":"from tkinter import *\nimport numpy\nimport math\n\nroot = Tk()\nroot.title(\"Построение графиков\")\nroot.geometry('710x640')\n\ncount = 0\nlistOfDecisions = []\n\ny_zero = 160\n\ncanvas = Canvas(root, width=710, height=320)\ncanvas.pack(side='bottom')\n\n\ndef showAreaWithGraph():\n global canvas\n canvas.create_rectangle(0, 0, 710, 320, fill='#008080', outline='')\n\n # Линии сетки по вертикали\n for y in range(25):\n k = 30 * y\n canvas.create_line(10 + k, 310, 10 + k, 10, width=1, fill='#20B2AA')\n\n # Линии сетки по горизонтали\n for x in range(11):\n k = 30 * x\n canvas.create_line(10, 10 + k, 700, 10 + k, width=1, fill='#20B2AA')\n\n canvas.create_line(10, 10, 10, 310, width=1, fill='#FFFFFF') # ось у\n canvas.create_line(10, 160, 700, 160, width=1, fill='#FFFFFF') # ось х\n\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n\ndef parser(enter):\n result = 0\n buff = enter\n buff = buff.split()\n\n try:\n buff = buff[0] # Берем нулевой элемент в коллекции после сплита\n except IndexError:\n return result\n\n buff = buff.split(\",\")\n try:\n buff = buff[0] # Берем нулевой элемент в коллекции после сплита\n except IndexError:\n return result\n\n sign = buff[0] # Берем первый символ строки\n if sign == \"-\":\n result = float(buff)\n elif sign == \"+\":\n result = float(buff[1:])\n elif is_number(sign):\n if is_number(buff):\n result = float(buff)\n else:\n result = 0\n elif sign == \"\":\n result = 1\n\n return result\n\n\ndef clean():\n entry_1a1.delete(0, END)\n entry_1a2.delete(0, END)\n entry_1a3.delete(0, END)\n entry_1a4.delete(0, END)\n entry_b1.delete(0, END)\n\n entry_2a1.delete(0, END)\n entry_2a2.delete(0, END)\n entry_2a3.delete(0, END)\n entry_2a4.delete(0, END)\n entry_b2.delete(0, END)\n\n entry_3a1.delete(0, END)\n entry_3a2.delete(0, END)\n entry_3a3.delete(0, END)\n entry_3a4.delete(0, END)\n entry_b3.delete(0, END)\n\n entry_4a1.delete(0, END)\n entry_4a2.delete(0, END)\n entry_4a3.delete(0, END)\n entry_4a4.delete(0, END)\n entry_b4.delete(0, END)\n\n\ndef bildGraphOnDataFromTable(elementNumber):\n showAreaWithGraph()\n if (len(listOfDecisions) == 0):\n return\n global y_zero\n k = 30 # Клеточка\n width = k * 2 # Ширина промежутка между столбцами диаграммы и отступами между ними\n size = 30 # Сколько припадает на единицу ответа пикселей на экране\n\n max_num = listOfDecisions[elementNumber][0]\n min_num = listOfDecisions[elementNumber][0]\n\n for item in listOfDecisions[elementNumber]:\n if item > max_num:\n max_num = item\n if item < min_num:\n min_num = item\n print('max_num = %f' % max_num)\n print('min_num = %f' % min_num)\n\n result_num = max(abs(max_num), abs(min_num))\n print('result_num = %f' % result_num)\n\n flag = 0\n\n if 0 <= result_num <= 0.1: # Колличество клеточек (от 0 до 1)\n maximum = 50\n size *= maximum\n elif 0.1 <= result_num <= 0.5:\n maximum = 10\n size *= 10\n elif 0.5 <= result_num <= 1:\n maximum = 5\n size *= maximum\n # elif 1 <= result_num <= 2.5:\n # maximum = 2\n # size *= maximum\n else:\n maximum = int(math.ceil(result_num / 5))\n size /= maximum\n flag = 1\n\n print('maximum = %f' % maximum)\n print('size = %f' % size)\n\n x1_start = width + 10 + 3 * k\n x1_stop = x1_start + width\n y1_border = y_zero - listOfDecisions[elementNumber][0] * size\n\n if listOfDecisions[elementNumber][0] >= 0:\n canvas.create_rectangle(x1_start, y1_border, x1_stop, y_zero, fill='#4682B4', outline='#FFFFFF')\n else:\n canvas.create_rectangle(x1_start, y_zero, x1_stop, y1_border, fill='#4682B4', outline='#FFFFFF')\n\n x2_start = x1_stop + width\n x2_stop = x2_start + width\n y2_border = y_zero - listOfDecisions[elementNumber][1] * size\n\n if listOfDecisions[elementNumber][1] >= 0:\n canvas.create_rectangle(x2_start, y2_border, x2_stop, y_zero, fill='#4682B4', outline='#FFFFFF')\n else:\n canvas.create_rectangle(x2_start, y_zero, x2_stop, y2_border, fill='#4682B4', outline='#FFFFFF')\n\n x3_start = x2_stop + width\n x3_stop = x3_start + width\n y3_border = y_zero - listOfDecisions[elementNumber][2] * size\n\n if listOfDecisions[elementNumber][2] >= 0:\n canvas.create_rectangle(x3_start, y3_border, x3_stop, y_zero, fill='#4682B4', outline='#FFFFFF')\n else:\n canvas.create_rectangle(x3_start, y_zero, x3_stop, y3_border, fill='#4682B4', outline='#FFFFFF')\n\n x4_start = x3_stop + width\n x4_stop = x4_start + width\n y4_border = y_zero - listOfDecisions[elementNumber][3] * size\n\n if listOfDecisions[elementNumber][3] >= 0:\n canvas.create_rectangle(x4_start, y4_border, x4_stop, y_zero, fill='#4682B4', outline='#FFFFFF')\n else:\n canvas.create_rectangle(x4_start, y_zero, x4_stop, y4_border, fill='#4682B4', outline='#FFFFFF')\n\n x1 = Label(canvas, text='x1', fg='#FFFFFF', bg='#008080')\n x1.place(x=x1_start - k + 5, y=135)\n x2 = Label(canvas, text='x2', fg='#FFFFFF', bg='#008080')\n x2.place(x=x2_start - k + 5, y=135)\n x3 = Label(canvas, text='x3', fg='#FFFFFF', bg='#008080')\n x3.place(x=x3_start - k + 5, y=135)\n x4 = Label(canvas, text='x4', fg='#FFFFFF', bg='#008080')\n x4.place(x=x4_start - k + 5, y=135)\n\n\ndef putOnTheTable(result):\n global count\n count = count + 1\n text.insert(2.0, '\\nx1 = %f\\nx2 = %f\\nx3 = %f\\nx4 = %f\\n' % (result[0], result[1], result[2], result[3]))\n\n button = Button(text, text='График')\n button.bind('', lambda event, c=count: bildGraphOnDataFromTable(c - 1))\n text.window_create(2.6, window=button)\n\n text.insert(2.0, 'Рассчет №%d\\n' % count)\n text.insert(2.0, \"=================================\\n\")\n\n listOfDecisions.append(result)\n\n\ndef fillingArray():\n resultMatrix = numpy.array([[parser(str(entry_1a1.get())), parser(str(entry_1a2.get())),\n parser(str(entry_1a3.get())), parser(str(entry_1a4.get()))],\n [parser(str(entry_2a1.get())), parser(str(entry_2a2.get())),\n parser(str(entry_2a3.get())), parser(str(entry_2a4.get()))],\n [parser(str(entry_3a1.get())), parser(str(entry_3a2.get())),\n parser(str(entry_3a3.get())), parser(str(entry_3a4.get()))],\n [parser(str(entry_4a1.get())), parser(str(entry_4a2.get())),\n parser(str(entry_4a3.get())), parser(str(entry_4a4.get()))]])\n\n vector = numpy.array([parser(str(entry_b1.get())), parser(str(entry_b2.get())),\n parser(str(entry_b3.get())), parser(str(entry_b4.get()))])\n\n result = numpy.linalg.solve(resultMatrix, vector)\n\n print(result[0])\n print(result[1])\n print(result[2])\n print(result[3])\n\n putOnTheTable(result)\n\n\ndef outputInFile():\n print(listOfDecisions)\n\n if (len(listOfDecisions) == 0):\n print('Empty list')\n return\n\n file = open('dataFromTable.csv', 'w')\n\n for item in listOfDecisions:\n file.write(\"x1 = %s\\n\" % item[0])\n file.write(\"x2 = %s\\n\" % item[1])\n file.write(\"x3 = %s\\n\" % item[2])\n file.write(\"x4 = %s\\n\" % item[3])\n file.write(\"=================================\\n\")\n\n\ninputArea = Canvas(root, width=720, height=320, bd=0, highlightthickness=0)\ninputArea.pack()\n\nrect = Canvas(inputArea, width=325, height=190)\nrect.pack()\nrect.place(x=35, y=60)\n\n# Очень много кода, который показывает строки для заполнения\n# 1-я строка\nentry_1a1 = Entry(rect, justify=RIGHT)\nentry_1a1.place(x=10, y=10, width=40)\nlabel_1x1 = Label(rect, text='x1')\nlabel_1x1.place(x=50, y=13)\n\nentry_1a2 = Entry(rect, justify=RIGHT)\nentry_1a2.place(x=70, y=10, width=40)\nlabel_1x2 = Label(rect, text='x2')\nlabel_1x2.place(x=110, y=13)\n\nentry_1a3 = Entry(rect, justify=RIGHT)\nentry_1a3.place(x=130, y=10, width=40)\nlabel_1x3 = Label(rect, text='x3')\nlabel_1x3.place(x=170, y=13)\n\nentry_1a4 = Entry(rect, justify=RIGHT)\nentry_1a4.place(x=190, y=10, width=40)\nlabel_1x4 = Label(rect, text='x4')\nlabel_1x4.place(x=230, y=13)\n\nlabel_equals = Label(rect, text='=')\nlabel_equals.place(x=260, y=13)\n\nentry_b1 = Entry(rect, justify=RIGHT)\nentry_b1.place(x=280, y=10, width=40)\n\n\n# 2-я строка\nentry_2a1 = Entry(rect, justify=RIGHT)\nentry_2a1.place(x=10, y=40, width=40)\nlabel_2x1 = Label(rect, text='x1')\nlabel_2x1.place(x=50, y=43)\n\nentry_2a2 = Entry(rect, justify=RIGHT)\nentry_2a2.place(x=70, y=40, width=40)\nlabel_2x2 = Label(rect, text='x2')\nlabel_2x2.place(x=110, y=43)\n\nentry_2a3 = Entry(rect, justify=RIGHT)\nentry_2a3.place(x=130, y=40, width=40)\nlabel_2x3 = Label(rect, text='x3')\nlabel_2x3.place(x=170, y=43)\n\nentry_2a4 = Entry(rect, justify=RIGHT)\nentry_2a4.place(x=190, y=40, width=40)\nlabel_2x4 = Label(rect, text='x4')\nlabel_2x4.place(x=230, y=43)\n\nlabel_equals = Label(rect, text='=')\nlabel_equals.place(x=260, y=43)\n\nentry_b2 = Entry(rect, justify=RIGHT)\nentry_b2.place(x=280, y=40, width=40)\n\n\n# 3-я строка\nentry_3a1 = Entry(rect, justify=RIGHT)\nentry_3a1.place(x=10, y=70, width=40)\nlabel_3x1 = Label(rect, text='x1')\nlabel_3x1.place(x=50, y=73)\n\nentry_3a2 = Entry(rect, justify=RIGHT)\nentry_3a2.place(x=70, y=70, width=40)\nlabel_3x2 = Label(rect, text='x2')\nlabel_3x2.place(x=110, y=73)\n\nentry_3a3 = Entry(rect, justify=RIGHT)\nentry_3a3.place(x=130, y=70, width=40)\nlabel_3x3 = Label(rect, text='x3')\nlabel_3x3.place(x=170, y=73)\n\nentry_3a4 = Entry(rect, justify=RIGHT)\nentry_3a4.place(x=190, y=70, width=40)\nlabel_3x4 = Label(rect, text='x4')\nlabel_3x4.place(x=230, y=73)\n\nlabel_equals = Label(rect, text='=')\nlabel_equals.place(x=260, y=73)\n\nentry_b3 = Entry(rect, justify=RIGHT)\nentry_b3.place(x=280, y=70, width=40)\n\n\n# 4-я строка\nentry_4a1 = Entry(rect, justify=RIGHT)\nentry_4a1.place(x=10, y=100, width=40)\nlabel_4x1 = Label(rect, text='x1')\nlabel_4x1.place(x=50, y=103)\n\nentry_4a2 = Entry(rect, justify=RIGHT)\nentry_4a2.place(x=70, y=100, width=40)\nlabel_4x2 = Label(rect, text='x2')\nlabel_4x2.place(x=110, y=103)\n\nentry_4a3 = Entry(rect, justify=RIGHT)\nentry_4a3.place(x=130, y=100, width=40)\nlabel_4x3 = Label(rect, text='x3')\nlabel_4x3.place(x=170, y=103)\n\nentry_4a4 = Entry(rect, justify=RIGHT)\nentry_4a4.place(x=190, y=100, width=40)\nlabel_4x4 = Label(rect, text='x4')\nlabel_4x4.place(x=230, y=103)\n\nlabel_equals = Label(rect, text='=')\nlabel_equals.place(x=260, y=103)\n\nentry_b4 = Entry(rect, justify=RIGHT)\nentry_b4.place(x=280, y=100, width=40)\n\nshowAreaWithGraph()\n\nbtn_calc = Button(rect, text='Рассчитать')\nbtn_calc.bind('', lambda event: fillingArray())\nbtn_calc.place(x=40, y=150)\n\nbtn_clean = Button(rect, text='Очистить')\nbtn_clean.bind('', lambda event: clean())\nbtn_clean.place(x=180, y=150)\n\nbtn_clean = Button(root, text='Экспортировать в файл')\nbtn_clean.bind('', lambda event: outputInFile())\nbtn_clean.place(x=455, y=280)\n\ntext = Text(root)\ntext.config(font=('Arial', 15))\ntext.place(x=400, y=10, height=260, width=295)\ntext.insert(1.0, \"\\tТаблица результатов\\n=================================\\n\")\n\nroot.mainloop()\n","sub_path":"GaussMethodTkinterGUI.py","file_name":"GaussMethodTkinterGUI.py","file_ext":"py","file_size_in_byte":11771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"101428720","text":"\"\"\"\nhttps://www.hackerrank.com/challenges/array-left-rotation\n\"\"\"\n\ndef leftRotate(a, d):\n na = a[d:] + a[0:d]\n\n return na\n\nif __name__ == '__main__':\n nd = input().split()\n n = int(nd[0])\n d = int(nd[1])\n a = list(map(int, input().rstrip().split()))\n print(*leftRotate(a, d))\n\n\n","sub_path":"06-1_Arrays_LeftRotation.py","file_name":"06-1_Arrays_LeftRotation.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"34630397","text":"from numpy import *\nimport networkx as nx\nimport math\nimport random\nimport community\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn import svm\nfrom random import randint\nimport matplotlib.pyplot as plt\nimport scipy.io\nimport numpy as np\nimport operator\nimport glob\nimport statistics\nimport numpy as np\nimport numpy as np\nimport datetime\nimport matplotlib.pyplot as plt\n\n\n\n\n\n\ndef closure_coefficient(G, nodes=None):\n if nodes == None: nodes = G.nodes()\n output = 0\n for node in nodes:\n triangle = nx.triangles(G, node)\n degree = 0\n for neigh in G.neighbors(node):\n degree += G.degree(neigh)\n degree = degree - G.degree(node)\n output += float(triangle) / float(degree)\n\n return output / float(len(nodes))\n\n if nodes is None:\n nodes_nbrs = G.adj.items()\n else:\n nodes_nbrs = ((n, G[n]) for n in G.nbunch_iter(nodes))\n\n for v, v_nbrs in nodes_nbrs:\n vs = set(v_nbrs) - {v}\n gen_degree = Counter([len(vs & (set(G[w]) - {w})) for w in vs])\n ntriangles = sum([k * val for k, val in gen_degree.items()])\n\n return ntriangles\n\ndef measure(g, partition, ll):\n nodes = g.nodes()\n par = [0] * ll\n result1 = 0\n result2 = 0\n for node in nodes:\n x = g.node[node]['labels']\n y = g.node[node]['groups']\n if x != -1:\n par[y] += 1\n result2 += 1\n if x == y:\n result1 += 1\n if result2 == 0: result2 += 1\n # par[0] = float(par[0])/len(nodes)\n # par[1] = float(par[1])/len(nodes)\n return par, float(result1) / float(result2), float(result1) / float(len(nodes)), 0, 0, 0, [0], 0, 0, 0, 0, 0\n\n\ndef measure2(g, partition, ll):\n fraction = 0\n nodes = g.nodes()\n par = [0] * ll\n parpar = [0] * ll\n result1 = 0\n result2 = 0\n for node in nodes:\n x = g.node[node]['labels']\n y = g.node[node]['groups']\n if x != -1:\n par[y] += 1\n result2 += 1\n if x == y:\n result1 += 1\n if result2 == 0: result2 += 1\n var = np.var(par)\n for i in range(len(par)):\n parpar[i] = 10.0 * (float(par[i]) / len(nodes))\n if par[i] != 0:\n fraction += 1\n varvar = np.var(parpar)\n ###\n fraction2 = 0\n par2 = [0] * ll\n parpar2 = [0] * ll\n result12 = 0\n result22 = 0\n for node in nodes:\n x = g.node[node]['seen']\n y = g.node[node]['groups']\n if x != -1:\n par2[y] += 1\n result22 += 1\n if x == y:\n result12 += 1\n if result22 == 0: result22 += 1\n var2 = np.var(par2)\n for i in range(len(par2)):\n parpar2[i] = 10 * (float(par2[i]) / len(nodes))\n if par2[i] != 0:\n fraction2 += 1\n varvar2 = np.var(parpar2)\n\n return par, (float(result2) - float(result1)) / float(result2), float(result1) / float(\n len(nodes)), var, varvar, float(\n fraction) / ll, par2, (float(result22) - float(result12)) / float(result22), float(result12) / float(\n len(nodes)), var2, varvar2, float(fraction2) / ll\n","sub_path":"Code/CascadingMethods/Measure.py","file_name":"Measure.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"508625935","text":"#!/usr/bin/env python\n# coding=utf-8\nimport sys\nimport traceback\nimport pymongo\n\n\nclass MongodbManager(object):\n def __init__(self, **kwargs):\n try:\n self.conn = pymongo.MongoClient(kwargs['host'], kwargs['port'])\n self.db = self.conn[kwargs['database']] # connect db\n self.username = kwargs['username']\n self.password = kwargs['password']\n if self.username and self.password:\n self.connected = self.db.authenticate(self.username, self.password)\n else:\n self.connected = True\n except ConnectionError:\n print(traceback.format_exc())\n print('Connect Statics Database Fail.')\n sys.exit(1)\n\n def close(self):\n self.conn.close()\n\n def check_connected(self):\n # 检查是否连接成功\n if not self.connected:\n raise (NameError, 'stat:connected Error')\n\n def save(self, collection, value):\n # 一次操作一条记录,根据‘_id’是否存在,决定插入或更新记录\n try:\n self.check_connected()\n self.db[collection].save(value)\n except Exception:\n print(traceback.format_exc())\n finally:\n self.close()\n\n def insert(self, collection, value):\n # 可以使用insert直接一次性向mongoDB插入整个列表,也可以插入单条记录,但是'_id'重复会报错\n try:\n self.check_connected()\n self.db[collection].insert(value, continue_on_error=True)\n except Exception:\n print(traceback.format_exc())\n finally:\n self.close()\n\n def insert_many(self, data):\n \"\"\"\n 批量插入数据\n :param collection:\n :param data:\n :return:\n \"\"\"\n try:\n if self.get_state():\n result = self.db[self.collection].insert_many(data)\n return result.inserted_id\n else:\n return self.get_state()\n except Exception as e:\n print(\"Mongodb Error: %s\" % (e,))\n\n def update(self, collection, conditions, value, s_upsert=False, s_multi=False):\n try:\n self.check_connected()\n self.db[collection].update(conditions, {'$set': value}, upsert=s_upsert, multi=s_multi)\n except Exception:\n print(traceback.format_exc())\n finally:\n self.close()\n\n def upsert_many(self, collection, many_data):\n # 批量更新插入,根据‘_id’更新或插入多条记录。\n # 把'_id'值不存在的记录,插入数据库。'_id'值存在,则更新记录。\n # 如果更新的字段在mongo中不存在,则直接新增一个字段\n try:\n self.check_connected()\n bulk = self.db[collection].initialize_ordered_bulk_op()\n for data in many_data:\n _id = data['_id']\n bulk.find({'_id': _id}).upsert().update({'$set': data})\n bulk.execute()\n except Exception:\n print(traceback.format_exc())\n finally:\n self.close()\n\n def upsert_one(self, collection, data):\n # 更新插入,根据‘_id’更新一条记录,如果‘_id’的值不存在,则插入一条记录\n try:\n self.check_connected()\n query = {'_id': data.get('_id', '')}\n if not self.db[collection].find_one(query):\n self.db[collection].insert(data)\n else:\n data.pop('_id') # 删除'_id'键\n self.db[collection].update(query, {'$set': data})\n except Exception:\n print(traceback.format_exc())\n finally:\n self.close()\n\n def find_one(self, collection, value):\n # 根据条件进行查询,返回一条记录\n try:\n self.check_connected()\n return self.db[collection].find_one(value)\n except Exception:\n print(traceback.format_exc())\n finally:\n self.close()\n\n def find(self, collection, value):\n # 根据条件进行查询,返回所有记录\n try:\n self.check_connected()\n return self.db[collection].find(value)\n except Exception:\n print(traceback.format_exc())\n finally:\n self.close()\n\n def delete(self, collection, condition):\n \"\"\"\n 删除\n :param collection:\n :param condition:\n :return:\n \"\"\"\n try:\n self.check_connected()\n return self.db[collection].delete_many(filter=condition).deleted_count\n except Exception:\n print(traceback.format_exc())\n finally:\n self.close()\n\n","sub_path":"pytest_mongodb/mongodb_client.py","file_name":"mongodb_client.py","file_ext":"py","file_size_in_byte":4777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"530950082","text":"''' \nWrite code to convert a given number into words. Only consider numbers up to 4 digits, i.e., numbers from 0 to 9999. \nI/P: \"1234\" \nO/P: “one thousand two hundred thirty four\". \n'''\nfrom collections import deque\n\nclass Number2Words:\n\n\tdef __init__(self, n):\n\t\tself.n = n\n\t\tself.units = ['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']\n\t\tself.tens = ['ten', 'twenty', 'thirty', 'forty', 'fifty', 'sixty', 'seventy', 'eighty', 'ninety']\n\t\tself.special_tens = ['eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen']\n\n\tdef convert_to_words(self):\n\t\t'''\n\t\tThis method will generate words corresponding to the give no\n\t\t'''\n\t\tl = len(self.n)\n\t\tindex = l-1\n\t\t#Handle zero digit separately\n\t\tif l == 1 and self.n[0] == '0':\n\t\t\tprint('zero')\n\t\t\treturn\n\t\tresult = deque()\n\t\tresult.appendleft('')\n\t\twhile index >= 0:\n\t\t\tif self.n[index] != '0':\n\t\t\t\t# Position will tell us which place we are looking at (units, tens, hundred or thousand)\n\t\t\t\tposition = l - (index + 1)\n\t\t\t\tif position == 0:\n\t\t\t\t\tresult.appendleft(self.units[ord(self.n[index]) - 49])\n\t\t\t\telif position == 1:\n\t\t\t\t\t# Check if this belongs to the special ten case\n\t\t\t\t\tif self.n[index] == '1' and self.n[index+1] != '0':\n\t\t\t\t\t\tresult.popleft()\n\t\t\t\t\t\tresult.appendleft(self.special_tens[ord(self.n[index+1]) - 49])\n\t\t\t\t\telse:\n\t\t\t\t\t\tresult.appendleft(' ')\n\t\t\t\t\t\tresult.appendleft(self.tens[ord(self.n[index]) - 49])\n\t\t\t\telif position == 2:\n\t\t\t\t\tresult.appendleft(' ')\n\t\t\t\t\tresult.appendleft('hundred')\n\t\t\t\t\tresult.appendleft(' ')\n\t\t\t\t\tresult.appendleft(self.units[ord(self.n[index]) - 49])\n\t\t\t\telse:\n\t\t\t\t\tresult.appendleft(' ')\n\t\t\t\t\tresult.appendleft('thousand')\n\t\t\t\t\tresult.appendleft(' ')\n\t\t\t\t\tresult.appendleft(self.units[ord(self.n[index]) - 49])\n\t\t\tindex -= 1\n\t\tprint(''.join(result))\n\nif __name__ == '__main__':\n\tnum_words = Number2Words('1105')\n\tnum_words.convert_to_words()","sub_path":"Arrays_Strings/num2word.py","file_name":"num2word.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"286242913","text":"import pickle\nimport numpy as np\nimport n_gram_config\nimport config\nfrom embedding import Embedding\n\nclass FeatureExtractor:\n\n def __init__(self):\n self.n = n_gram_config.N\n self.sentences = self.__load_processed_training_data__()\n\n if n_gram_config.PAD_SENTENCES:\n self.sentences = self.__pad_sentences__()\n pickle.dump(self.sentences, open('padded_training_sentences', 'wb'), True)\n def __load_processed_training_data__(self):\n return pickle.load(open(config.PROCESSED_TRAIN_DATA_PATH, 'rb'))\n def __pad_sentences__(self):\n return list(map(lambda x: ['' * (self.n - 1)] + x + ['' * (self.n - 1)], self.sentences))\n\n def __embed_sentences__(self):\n pass\n def __extract_n_grams__(self):\n self.n_grams = []\n average_sentence_length = 0\n for sentence in self.sentences:\n sentence_ngrams = []\n average_sentence_length += len(sentence)\n for i in range(len(sentence) - self.n):\n sentence_ngrams.append(np.array([sentence[i] for i in range(i + self.n)]))\n self.n_grams.append(np.array(sentence_ngrams))\n\n\n\nextractor = FeatureExtractor()\nprint(extractor.sentences[0])\n\n\n","sub_path":"sentence-completion/n-gram-variation/feature_extractor.py","file_name":"feature_extractor.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"331763832","text":"\r\n\r\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\r\n\r\n\r\nclass HelloHandler(BaseHTTPRequestHandler):\r\n def do_GET(self):\r\n self.send_response(200)\r\n self.send_header(\"Content-Type\", \"text/html\")\r\n self.end_headers()\r\n self.wfile.write(\"

    Hello World!

    \\n\".encode(\"UTF-8\"))\r\n self.wfile.write(\" Rainy day today\".encode(\"UTF-8\"))\r\n\r\n\r\n\r\n\r\nPORT = 8080\r\n\r\nif __name__ == \"__main__\":\r\n server_address = (\"\", PORT)\r\n server = HTTPServer(server_address, HelloHandler)\r\n server.serve_forever()\r\n\r\n\r\n","sub_path":"In Class Code/2-16-19/in_class_190226/previous/hello_server.py","file_name":"hello_server.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"467799172","text":"class Solution(object):\n def subsets(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n res = []\n if not set:\n return res\n s = list(nums) # to char array\n self.helper(s, res, [], 0)\n return res\n\n def helper(self, s, res, path, idx):\n # base case\n if idx == len(s):\n res.append(list(path))\n return\n\n # not add cur idx\n self.helper(s, res, path, idx + 1)\n\n # add cur idx\n path.append(s[idx])\n self.helper(s, res, path, idx + 1)\n path.pop()","sub_path":"78. Subsets/ans.py","file_name":"ans.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"602800782","text":"from statistics import mean\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nstyle.use('fivethirtyeight')\n\n\"\"\"\nWe will be coding the best fit line from scratch\nThe equation for which is:\n\ny = mx + b\n\nBroken down:\n\nm is the best fit slope\nb is the y intercept\n\nm = (mean(x) * mean(y) - mean(x * y)) / (mean(x)^2 - mean(x^2))\nb = mean(y) - m * mean(x)\n\"\"\"\n\n# Define some simple data to work with\n\nxs = np.array([1, 2, 3, 4, 5, 6], dtype=np.float64)\nys = np.array([5, 4, 6, 5, 6, 7], dtype=np.float64)\n\n\ndef best_fit_slope(xs, ys):\n \"\"\"\n Calculate the best fit slope.\n m = (mean(x) * mean(y) - mean(x * y)) / (mean(x)^2 - mean(x^2))\n \"\"\"\n m = ((((mean(xs) * mean(ys))) - mean(xs * ys)) /\n ((mean(xs) ** 2) - mean(xs ** 2)))\n return m\n\n\ndef y_intercept(xs, ys):\n \"\"\"\n Calculate the y intercept.\n b = mean(y) - m * mean(x)\n \"\"\"\n b = mean(ys) - (best_fit_slope(xs, ys) * mean(xs))\n return b\n\n\ndef find_mb(xs, ys):\n \"\"\"\n One function for both!\n \"\"\"\n m = ((((mean(xs) * mean(ys))) - mean(xs * ys)) /\n ((mean(xs) ** 2) - mean(xs ** 2)))\n b = mean(ys) - (m * mean(xs))\n return m, b\n\n# m = best_fit_slope(xs, ys)\n# b = y_intercept(xs, ys)\n\nm, b = find_mb(xs, ys)\n\n# To make a prediction at, let's say, x = 8:\npredict_x = 8\npredict_y = (m * predict_x) + b\nprint(predict_x, predict_y)\n\nregression_line = [(m * x) + b for x in xs]\nplt.scatter(xs, ys)\nplt.plot(xs, regression_line)\n# plt.show()\n\n# R^2 AKA Coefficient of Determination\n\"\"\"\nDetermines how accurate our best fit line is\n\nE = distance from data point to y-hat (regression line)\nSE = Squared Error (E^2)\nR^2 = 1 - ((SE of y-hat) / (SE of mean(y)))\n\nWe want the R^2 value to be high. How high is kind of up to us\n\"\"\"\n\n\ndef squared_error(ys_orig, ys_line):\n \"\"\"\n ys_orig = original y points\n ys_line = y values of the regression line\n returns the squared error\n \"\"\"\n return sum((ys_line - ys_orig) ** 2)\n\n\ndef coefficient_of_determination(ys_orig, ys_line):\n y_mean_line = [mean(ys_orig) for y in ys_orig] # Would be a straight line\n squared_error_regr = squared_error(ys_orig, ys_line)\n squared_error_y_mean = squared_error(ys_orig, y_mean_line)\n return 1 - (squared_error_regr / squared_error_y_mean)\n\nr_squared = coefficient_of_determination(ys, regression_line)\nprint(r_squared)\n","sub_path":"intro_to_machine_learning/modules/regression/160501_regression_algo_fromscratch_p7.py","file_name":"160501_regression_algo_fromscratch_p7.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"143659674","text":"import googlemaps\r\n\r\ngmaps = googlemaps.Client(key='AIzaSyBnADqF1dM-C6aJFPait1LRKdCWKHdgHME')\r\n\r\nresult_file = \"Y:/LSAM/PotentialChallenges/TransTime/donor_to_airport_v4_special_cities.tsv\"\r\n\r\ncities = [\"Los Angeles, CA\", \"San Francisco, CA\", \"New York, NY\", \"Seattle, WA\", \"San Jose, CA\", \"Honolulu, HI\", \"Miami, FL\", \"Washington, DC\",\r\n \"Nashville, TN\", \"Columbia, SC\", \"Charlotte, NC\", \"Scottsdale, AZ\", \"Corpus Christi, TX\", \"Tampa, FL\", \"Tucson, AZ\", \"Las Vegas, NV\"]\r\n\r\ndonor2airs = []\r\nwith open(\"Y:/LSAM/PotentialChallenges/TransTime/donor_to_airport_v1.tsv\", encoding=\"latin1\", mode='r') as f:\r\n donor2airs = f.readlines()\r\n\r\nzonemap = {}\r\nwith open(result_file, 'w') as sw:\r\n sw.write(donor2airs[0].rstrip() + \"\\tdonor_city\\tdonor_city_timezone\\n\")\r\n for city in cities:\r\n for line in donor2airs:\r\n parts = line.split('\\t')\r\n if parts[6].find(city) != -1:\r\n if city in zonemap:\r\n zonevalue = zonemap[city]\r\n else:\r\n timezone = gmaps.timezone((float(parts[2]), float(parts[3])))\r\n if timezone[\"status\"] == 'OK':\r\n zonevalue = timezone[\"rawOffset\"] / 3600\r\n else:\r\n zonevalue = 0\r\n zonemap[city] = zonevalue\r\n sw.write( \"%s\\t%s\\t%d\\n\" % (line.rstrip(), city, int(zonevalue)))\r\n","sub_path":"shyr/20161020_lsam/step04_get_donor_air_cities.py","file_name":"step04_get_donor_air_cities.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"528950062","text":"# Written by Philip M. White \n# Copyright 2009.\n# Licensed under the BSD license.\n\nfrom CreditCard import *\n\nclass CreditCardCitiDividend(CreditCard):\n\tdef __init__(self, annual_special=None):\n\t\tself.name = \"Citi Dividend Platinum Select\"\n\t\tself.url = \"https://www.citicards.com/cards/wv/cardDetail.do?screenID=909&origincontentId=CC_CASH_BACK&CONTENT_TYPE=card_category_detail\"\n\t\tself.annual_fee = Money(0)\n\t\tself.reward_types = set(['cash'])\n\n\t\tif annual_special is not None:\n\t\t\tself.annual_special = annual_special\n\n\tdef getAnnualRewardsEarned(self, s):\n\t\tannual_therest = s - self.annual_special\n\t\treturn self.annual_special*0.02 + annual_therest*0.01\n","sub_path":"cards/CreditCardCitiDividend.py","file_name":"CreditCardCitiDividend.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"255364951","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import Post\nfrom django.db.models import Q\n\ndef index(request):\n allPosts = Post.objects.order_by('-datetimeOfCreation')\n\n # for now its the same\n topThreePosts = Post.objects.order_by('-datetimeOfCreation')\n\n query = request.GET.get(\"q\")\n if query is not None:\n allPosts = allPosts.filter(\n Q(title__icontains=query) |\n Q(text__icontains=query) |\n Q(author__icontains=query) |\n Q(slug__icontains=query) |\n Q(categories__icontains=query)\n )\n\n\n allCat=[]\n allPostsCat = Post.objects.order_by('-datetimeOfCreation')\n for ap in allPostsCat:\n if ap.categories not in allCat:\n allCat.append(ap.categories)\n return render(request, 'index.html', {'allPosts': allPosts, 'topThreePosts': topThreePosts,'allCat':allCat})\n\n\ndef detail(request, slug):\n post = get_object_or_404(Post, slug=slug)\n otherPost = Post.objects.filter(author=post.author).exclude(slug=post.slug).order_by('-datetimeOfCreation')\n\n # for now its the same\n topThreePosts = Post.objects.order_by('-datetimeOfCreation')\n\n query = request.GET.get(\"q\")\n if query is not None:\n allPosts = Post.objects.order_by('-datetimeOfCreation')\n allPosts = allPosts.filter(\n Q(title__icontains=query) |\n Q(text__icontains=query) |\n Q(author__icontains=query) |\n Q(slug__icontains=query) |\n Q(categories__icontains=query)\n )\n for apf in allPosts:\n apf.text = apf.text[:300] + ' ...'\n return render(request, 'index.html', {'allPosts': allPosts, 'topThreePosts': topThreePosts})\n\n return render(request, 'detail.html', {'post': post, 'topThreePosts': topThreePosts})\n\n\ndef about_author(request, author):\n pass\n\ndef archives(request):\n allPosts = Post.objects.order_by('-datetimeOfCreation')\n topThreePosts = Post.objects.order_by('-datetimeOfCreation')\n\n return render(request,'archives.html',{'allPosts':allPosts,'topThreePosts': topThreePosts})","sub_path":"BeHealthy/Blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"180466221","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\n#코레일 열차표 예매\n\nbrowser = webdriver.Chrome(r\"E:\\dev\\python_workspace\\chromedriver.exe\")\nbrowser.get(\"http://www.letskorail.com\")\n\nprint(browser.window_handles)\n\nbrowser.switch_to.window(browser.window_handles[1])\nbrowser.close()\nbrowser.switch_to.window(browser.window_handles[1])\nbrowser.close()\nbrowser.switch_to.window(browser.window_handles[0])\n\nelement = browser.find_element_by_xpath('//*[@id=\"txtGoEnd\"]')\nelement.click()\nelement.send_keys(Keys.BACK_SPACE)\nelement.send_keys(Keys.BACK_SPACE)\nelement.send_keys(\"포항\")\nelement.send_keys(Keys.ENTER)\nelement = browser.find_element_by_xpath('//*[@id=\"res_cont_tab01\"]/form/div/fieldset/ul[2]/li[1]/a/img')\nelement.click()\nbrowser.switch_to.window(browser.window_handles[1])\nbrowser.find_element_by_id('d20200822').click()\nbrowser.switch_to.window(browser.window_handles[0])\nbrowser.find_element_by_css_selector(\"#time > option:nth-child(13)\").click()\nbrowser.find_element_by_css_selector('#res_cont_tab01 > form > div > fieldset > p > a > img').click()\n\n\ntime.sleep(5)","sub_path":"W6D5/test03.py","file_name":"test03.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"187591774","text":"import numpy as np\nimport cv2; cv = cv2\nimport random\nimport math\nfrom PIL import Image, ImageFont, ImageDraw\n\ndef make_char(ch):\n\n\tfont = ImageFont.truetype(\"../tmp/SourceHanSerifTC-SemiBold.otf\",300)\n\tim = Image.new(\"L\",(512,512))\n\tdr = ImageDraw.Draw(im)\n\tdr.text((110,30),ch,200,font=font)\n\n\tim = np.array(im)\n\tim00 = im.copy()\n\tcv.imshow('',im);cv2.waitKey(0)\n\n\tim = im.astype(np.float32)/255;\n\n\tfor i in range(512):\n\t\tr = i*0.1*(0.95+0.05*math.sin(i*0.1))*0.7\n\t\t# r = random.random()*20\n\t\tim[52:460,i]=im[int(52+r):int(460+r),i]\n\tcv.imshow('',im);cv2.waitKey(0)\n\n\tim0 = im.copy()\n\n\tim -= np.random.random((512,512))*0.1\n\tm2 = cv2.GaussianBlur(np.random.random((64,64)),(11,11),0)\n\tcv.imshow('',m2);cv2.waitKey(0)\n\tim += cv2.GaussianBlur(cv2.resize(m2*1.1,(512,512),interpolation=cv2.INTER_AREA),(11,11),0)\n\n\n\tim = np.clip(im,0,1)\n\tim0 = cv2.dilate(im0,np.array([[0,1,0],[1,1,1],[0,1,0]],np.uint8),iterations=4)\n\tcv.imshow('',im);cv2.waitKey(0)\n\n\t_,im = cv2.threshold(im,0.5,1,cv2.THRESH_BINARY)\n\tim = cv2.erode(im,np.array([[0,1,0],[1,1,1],[0,1,0]],np.uint8))\n\n\tim*=im0\n\n\tim = cv2.GaussianBlur(im,(21,21),0)\n\t_,im = cv2.threshold(im,0.5,1,cv2.THRESH_BINARY)\n\tim = cv2.dilate(im,np.array([[0,1,0],[0,1,0],[0,1,0]],np.uint8),iterations=1)\n\tim = cv2.erode(im,np.array([[0,0,0],[1,1,1],[0,0,0]],np.uint8),iterations=1)\n\n\tm3 = cv2.GaussianBlur(cv2.resize(cv2.GaussianBlur(np.random.random((64,64)),(11,11),0),(512,512),interpolation=cv2.INTER_AREA),(51,51),0)\n\tm4 = cv2.GaussianBlur(cv2.resize(np.random.random((256,256)),(512,512),interpolation=cv2.INTER_AREA),(11,11),0)\n\t_,m5 = cv2.threshold(m3*m4,0.18,1,cv2.THRESH_BINARY)\n\tim = cv.morphologyEx(im, cv.MORPH_CLOSE, np.array([[0,1,0],[1,1,1],[0,1,0]],np.uint8))\n\n\tcv.imshow('',m5);cv2.waitKey(0)\n\t\n\n\t# im+=m3\n\tim *= m5\n\n\tim = (np.clip(1-im,0,1)*255).astype(np.uint8)\n\n\n\tcv.imshow('',255-im);cv2.waitKey(0)\n\n\tcv.imshow('',np.hstack((255-im00,im)));cv2.waitKey(0)\n\treturn im\nmake_char(\"姜\")\n\n\n# CH0 = 0x4e00\n# CH1 = 0x9feb\n\n# for i in range(CH0,CH1):\n# \tc = chr(i)\n# \tprint(c)\n# \tim = make_char(c)\n# \tcv2.imwrite(\"../output/fallback/\"+c+\".bmp\",im)\n\n\n","sub_path":"fallback/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"369143","text":"from datetime import timedelta\n\nfrom django.http import HttpResponse, HttpRequest, Http404, JsonResponse\nfrom django.shortcuts import render, get_object_or_404\nfrom django.utils import timezone\n\nfrom vsdk.polls.exceptions import NoCallerIDError\nfrom vsdk.polls.models import (VoteOption, Vote, Poll, PollResultsPresentation,\n AskPollDurationConfirmation, CreatePoll, ConfirmPollCreation,\n EndPoll,\n AskPollDuration)\nfrom vsdk.polls.models.custom_elements import PollDurationPresentation\nfrom vsdk.service_development.models import CallSession, VoiceService, Language\nfrom vsdk.service_development.views import choice_generate_context\n\n\ndef poll_duration_presentation(request: HttpRequest,\n element_id: int,\n session_id: int\n ) -> HttpResponse:\n \"\"\"\n Take the current active vote for the current user, and communicate its duration.\n In case there's no active vote, communicate this fact.\n \"\"\"\n element = get_object_or_404(PollDurationPresentation, pk=element_id)\n session = get_object_or_404(CallSession, pk=session_id)\n session.record_step(element)\n\n language: Language = session.language\n\n poll = getattr(element.service, 'poll', None)\n redirect_url = None\n\n # There's an active poll\n if poll and poll.active:\n prefix_url = element.get_voice_fragment_url(language)\n number_urls = language.generate_number(poll.remaining_days)\n days_url = element.days_label.get_voice_fragment_url(language)\n\n audio_urls = [prefix_url] + number_urls + [days_url]\n\n if not element.final_element and element.redirect:\n redirect_url = element.redirect.get_absolute_url(session)\n\n # Anything else (e.g. no active poll, no user attached to this session)\n else:\n audio_urls = [element.no_active_poll_label.get_voice_fragment_url(language)]\n\n if not element.final_element and element.no_active_poll_redirect:\n redirect_url = element.no_active_poll_redirect.get_absolute_url(session)\n\n context = {\n 'audio_urls': audio_urls,\n 'redirect_url': redirect_url\n }\n\n return render(request, 'multi_audio_message.xml', context, content_type='text/xml')\n\n\ndef handle_bip(request: HttpRequest, voice_service_id: int) -> HttpResponse:\n \"\"\"\n Handle an incoming bip.\n\n As we have access to only one number (3 May), bips are assigned to vote\n options randomly. It can be misleading, but simulates the real situation\n well enough.\n\n This view requires a `callerid` in GET parameters. If it's not the case,\n NoCallerIDError (a subclass of Http404) is thrown.\n\n It also requires the voice service to be active. If it's not active,\n Http404 is thrown.\n\n TODO: Stop assigning vote options randomly\n \"\"\"\n caller_id = request.GET.get('callerid', None)\n\n if not caller_id:\n raise NoCallerIDError()\n else:\n caller_id = caller_id.strip()\n\n voice_service = get_object_or_404(VoiceService, pk=voice_service_id)\n\n if not voice_service.active:\n raise Http404()\n\n # We're taking a random vote option, because we don't have access to multiple numbers yet\n vote_option = VoteOption.objects.filter(poll__voice_service=voice_service).order_by('?').first()\n\n Vote.objects.create(caller_id=caller_id, vote_option=vote_option)\n\n return HttpResponse(status=204)\n\n\ndef poll_results(request: HttpRequest, element_id: int, session_id: int) -> HttpResponse:\n \"\"\"\n Take the current active poll for the current voice service, and present its results.\n In case there's no active poll, communicate this fact.\n \"\"\"\n element = get_object_or_404(PollResultsPresentation, pk=element_id)\n session = get_object_or_404(CallSession, pk=session_id)\n session.record_step(element)\n\n language: Language = session.language\n\n poll: Poll = getattr(element.service, 'poll', None)\n\n if not poll or not poll.active:\n poll = Poll.objects.order_by('-start_date').first()\n\n redirect_url = None\n\n if poll:\n audio_urls = []\n\n if not poll.active:\n audio_urls.append(element.in_previous_vote_label.get_voice_fragment_url(language))\n\n for vote_result in poll.count_votes():\n count_urls = language.generate_number(vote_result.vote_count)\n audio_urls.extend(count_urls)\n\n voted_for_url = element.get_voice_fragment_url(language)\n audio_urls.append(voted_for_url)\n\n value_urls = language.generate_number(vote_result.vote_value)\n audio_urls.extend(value_urls)\n\n if poll.active and not element.final_element and element.redirect:\n redirect_url = element.redirect.get_absolute_url(session)\n elif not poll.active and not element.final_element and element.no_active_poll_redirect:\n redirect_url = element.no_active_poll_redirect.get_absolute_url(session)\n\n # If there's no previous poll, we don't say anything\n else:\n audio_urls = []\n\n if not element.final_element and element.no_active_poll_redirect:\n redirect_url = element.no_active_poll_redirect.get_absolute_url(session)\n\n context = {\n 'audio_urls': audio_urls,\n 'redirect_url': redirect_url\n }\n\n return render(request, 'multi_audio_message.xml', context, content_type='text/xml')\n\n\ndef ask_poll_duration(request: HttpRequest, element_id: int, session_id: int) -> HttpResponse:\n \"\"\"\n Ask for the duration of a poll, and redirect to the confirmation element.\n \"\"\"\n element = get_object_or_404(AskPollDuration, pk=element_id)\n session = get_object_or_404(CallSession, pk=session_id)\n session.record_step(element)\n\n if not element.final_element and element.redirect:\n redirect_url = element.redirect.get_absolute_url(session)\n else:\n redirect_url = None\n\n context = {\n 'label_url': element.get_voice_fragment_url(session.language),\n 'redirect_url': redirect_url\n }\n return render(request, 'ask_poll_duration.xml', context, content_type='text/xml')\n\n\ndef ask_poll_duration_confirmation(request: HttpRequest,\n element_id: int,\n session_id: int\n ) -> HttpResponse:\n \"\"\"\n Confirm the duration of a poll.\n\n To both choice options the duration is sent as a GET parameter.\n \"\"\"\n element = get_object_or_404(AskPollDurationConfirmation, pk=element_id)\n session = get_object_or_404(CallSession, pk=session_id)\n session.record_step(element)\n\n language: Language = session.language\n\n duration = int(request.GET['duration']) # in days\n\n context = choice_generate_context(element, session)\n context.update({\n 'duration': duration,\n 'duration_audio_urls': language.generate_number(duration),\n 'days_url': element.days_label.get_voice_fragment_url(language),\n 'duration_correct_url': element.duration_correct_label.get_voice_fragment_url(language)\n })\n\n return render(request, 'confirm_poll_duration.xml', context, content_type='text/xml')\n\n\ndef create_poll(request: HttpRequest, element_id: int, session_id: int) -> HttpResponse:\n \"\"\"\n Create a new poll, and attach it to the current voice service.\n\n Then, redirect to the next element (possibly confirmation).\n \"\"\"\n element = get_object_or_404(CreatePoll, pk=element_id)\n session = get_object_or_404(CallSession, pk=session_id)\n session.record_step(element)\n\n Poll.objects.filter(voice_service=session.service).update(voice_service=None)\n\n duration = int(request.GET['duration']) # in days\n poll = Poll.objects.create(voice_service=session.service, start_date=timezone.now(),\n duration=timedelta(days=duration))\n VoteOption.objects.create(poll=poll, value=1)\n VoteOption.objects.create(poll=poll, value=2)\n\n if not element.final_element and element.redirect:\n redirect_url = element.redirect.get_absolute_url(session)\n else:\n redirect_url = None\n\n context = {\n 'audio_urls': [],\n 'redirect_url': redirect_url\n }\n return render(request, 'multi_audio_message.xml', context, content_type='text/xml')\n\n\ndef confirm_poll_created(request: HttpRequest, element_id: int, session_id: int) -> HttpResponse:\n \"\"\"\n Confirm the duration of a freshly created poll.\n \"\"\"\n element = get_object_or_404(ConfirmPollCreation, pk=element_id)\n session = get_object_or_404(CallSession, pk=session_id)\n session.record_step(element)\n\n poll: Poll = session.service.poll\n language: Language = session.language\n\n audio_urls = [element.get_voice_fragment_url(language)]\n audio_urls.extend(language.generate_number(poll.duration.days))\n audio_urls.append(element.days_label.get_voice_fragment_url(language))\n\n if not element.final_element and element.redirect:\n redirect_url = element.redirect.get_absolute_url(session)\n else:\n redirect_url = None\n\n context = {\n 'audio_urls': audio_urls,\n 'redirect_url': redirect_url\n }\n return render(request, 'multi_audio_message.xml', context, content_type='text/xml')\n\n\ndef end_poll(request: HttpRequest, element_id: int, session_id: int) -> HttpResponse:\n \"\"\"\n End the current poll.\n \"\"\"\n element = get_object_or_404(EndPoll, pk=element_id)\n session = get_object_or_404(CallSession, pk=session_id)\n session.record_step(element)\n\n Poll.objects.filter(voice_service=session.service).update(voice_service=None)\n\n if not element.final_element and element.redirect:\n redirect_url = element.redirect.get_absolute_url(session)\n else:\n redirect_url = None\n\n context = {\n 'audio_urls': [],\n 'redirect_url': redirect_url\n }\n return render(request, 'multi_audio_message.xml', context, content_type='text/xml')\n\n\ndef votes_json(request: HttpRequest, poll_id: int) -> HttpResponse:\n \"\"\"\n Get the votes for a poll in a JSON format.\n\n Example:\n {\n \"options\": [1, 2, 3],\n \"votes\": [\n {\"id\": 1, \"nr\": \"+31611490678\", \"option\": 1, \"time\": 1526034800000},\n {\"id\": 2, \"nr\": \"+31611490678\", \"option\": 2, \"time\": 1526034800001},\n {\"id\": 3, \"nr\": \"+31611490679\", \"option\": 2, \"time\": 1526034800002},\n ]\n }\n\n `options` contains all possible options vor this poll, and `time` is a Unix timestamp.\n \"\"\"\n poll = get_object_or_404(Poll, pk=poll_id)\n votes = Vote.objects.filter(vote_option__poll=poll).prefetch_related('vote_option')\n\n results = {\n 'options': [vo.value for vo in poll.vote_options.all()],\n 'votes': [\n {\n 'id': vote.id,\n 'nr': vote.caller_id,\n 'option': vote.vote_option.value,\n 'time': int(vote.created.timestamp())\n }\n for vote in votes\n ]\n }\n\n return JsonResponse(results)\n","sub_path":"vsdk/polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"605409445","text":"\"\"\"\nThis is my attempt at learning how to use the Chudnovsky Algorithm to estimate the value of PI\nThanks to the author Pradipta (geekpradd) for her code\n\"\"\"\n\nimport math\nfrom decimal import *\nimport sys\n\nsys.setrecursionlimit(100000)\ngetcontext().rounding = ROUND_FLOOR\n\n\ndef factorial(n):\n if n == 0:\n return 1\n return n * factorial(n - 1)\n\n\ndef get_iter(k):\n k += 1\n getcontext().prec = k\n sum_iter = 0\n for k in range(k):\n up = factorial(6*k) * (545140134 * k + 13591409)\n down = factorial(3*k) * (factorial(k))**3 * (-262537412640768000) ** k\n sum_iter += (up / down)\n return Decimal(sum_iter)\n\n\ndef pi_value(k):\n up = 426880 * math.sqrt(10005)\n iter = get_iter(k)\n pi = Decimal(up) / iter\n return pi\n\n\ndef shell():\n \"\"\"\n function to create an interactive shell\n This only runs when the script is being run directly\n Otherwise this script can be imported and its function can be useful to calculate the value\n :return: None\n \"\"\"\n print(\"Welcome to the PI calculator. Enter the number of decimal places to estimate the value of pi or enter quit to exit\")\n while True:\n print(\">>>\", end=\"\")\n entry = input()\n if entry == \"quit\":\n print(\"Thanks...\")\n break\n elif not entry.isdigit():\n print(\"You did not enter a number, try again...\")\n else:\n print(pi_value(int(entry)))\n\n\nif __name__ == '__main__':\n shell()\n","sub_path":"PythonPI_Vin.py","file_name":"PythonPI_Vin.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"36328016","text":"def count(lst):\n res = 0\n for i in range(len(lst)):\n for j in range(len(lst[i])):\n if lst[i][j]:\n res += 1\n return res // 2\n\n\ncount_in = open('count.in', 'r')\ncount_out = open('count.out', 'w')\n\nn = int(count_in.readline())\nlst = [[]] * n\nfor i in range(n):\n lst[i] = list(map(int, count_in.readline().split()))\nprint(count(lst), file=count_out)\ncount_in.close()\ncount_out.close()\n","sub_path":"lKSH/day06/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"559363781","text":"from flask import Flask, render_template, request, url_for, redirect # Imports the flask framework.\nfrom flask_bootstrap import Bootstrap\nfrom . forms import TicketForm\n\napp = Flask(__name__)\napp.secret_key = \"SECRET\"\nBootstrap(app)\n\n@app.route('/', methods=(\"GET\", \"POST\"))\n@app.route('/home', methods=(\"GET\", \"POST\"))\ndef homepage():\n ticketform = TicketForm()\n if request.method == \"POST\":\n departure_location = request.form['current_city']\n destination = request.form['destination_location']\n departure_date = request.form['leave']\n return_date = request.form['returning']\n\n return redirect(url_for('pricespage'))\n return render_template('webpage.html', form=ticketform)\n\n\n@app.route('/prices', methods=(\"GET\", \"POST\"))\ndef pricespage():\n return render_template('pricespage.html')\n\nif __name__ == 'main':\n app.run(debug=True)","sub_path":"flaskmain.py","file_name":"flaskmain.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"379793306","text":"#Imports for MairaDB to MongoDB\r\nimport findspark\r\nfindspark.init('C:/spark')\r\nimport pymongo as mongo\r\nimport time\r\nfindspark.find()\r\nfrom pyspark import SparkContext, SparkConf\r\nfrom pyspark.sql import *\r\nfrom pyspark.sql.types import *\r\nfrom pyspark.sql.functions import *\r\nimport requests, os\r\nfrom kafka import KafkaProducer\r\nfrom pyspark.sql import SparkSession, Row\r\nconf_new = pyspark.SparkConf().setAppName('appName').setMaster('local[*]')\r\nos.environ['PYSPARK_SUBMIT_ARGS'] = '--packages org.apache.spark:spark-sql-kafka-0-10_2.11:2.4.1 pyspark-shell'\r\n\r\n#---------------------------------------------------------------------------\r\n#MARIA TRANSFER-------------------------------------------------------------\r\n#---------------------------------------------------------------------------\r\ndef mariaTransfer():\r\n #####For loop that grabs data from mariaDB###################################\r\n spark = SparkSession.builder.getOrCreate()\r\n print('---Loading Data from MariaDB---')\r\n def importFromMaria():\r\n tables = [\"CDW_SAPP_BRANCH\", \"CDW_SAPP_CREDITCARD\", \"CDW_SAPP_CUSTOMER\"]\r\n totalData = []\r\n for k in tables:\r\n df = spark.read.format(\"jdbc\").options(\r\n url = \"jdbc:mysql://localhost:3306/cdw_sapp\",\r\n driver = \"com.mysql.cj.jdbc.Driver\",\r\n dbtable = k,\r\n user = \"root\",\r\n password = \"root\"\r\n ).load()\r\n totalData.append(df)\r\n return totalData\r\n print('---Data Loaded into DataFrames---')\r\n ############################here we'll transform the dataframes###############\r\n\r\n #Branch Transformation--------------------------------------------------------\r\n #Giving the import a better name in case of later use\r\n branch_DF = importFromMaria()[0]\r\n print('Branch of CDW_SAPP transformation started')\r\n\r\n #Transform functions\r\n phoneNum = udf(lambda x: \"(\" + str(x)[0:3] + \")\" + str(x)[3:6] + \"-\" + str(x)[6:10])\r\n zipCheck = udf(lambda x: '9999' if x is 'Null' else x)\r\n\r\n #Place and Replace columns -- Phone Number Fix\r\n branch_cleanPhone = branch_DF.withColumn('BRANCH_PHONE_fix',phoneNum('BRANCH_PHONE'))\r\n branch_cleanedPhone = branch_cleanPhone.drop('BRANCH_PHONE').withColumnRenamed('BRANCH_PHONE_fix', 'BRANCH_PHONE')\r\n\r\n #Place and Replace columns -- Zip Code Check\r\n branch_cleanZip = branch_cleanedPhone.withColumn('BRANCH_ZIP_check',zipCheck('BRANCH_ZIP'))\r\n branch_final = branch_cleanZip.drop('BRANCH_ZIP').withColumnRenamed('BRANCH_ZIP_check','BRANCH_ZIP')\r\n\r\n print('Here is a single row sample of the Branch transfer')\r\n branch_final.show(1)\r\n\r\n\r\n #CreditCard Transformation====================================================\r\n #Giving the import a better name in case of later use\r\n creditCard_DF = importFromMaria()[1]\r\n print('Credit Card of CDW_SAPP transformation started')\r\n\r\n #Transform functions\r\n zerosInsert = udf(lambda x: '0'+ str(x) if len(str(x)) == 1 else str(x))\r\n\r\n #Place and Replace columns -- merging timeID like frankenstein\r\n timeID = creditCard_DF.select(concat(creditCard_DF.YEAR, lit('-'), zerosInsert(creditCard_DF.MONTH), lit('-'), zerosInsert(creditCard_DF.DAY)).alias(\"TIMEID\"))\r\n timeID = timeID.withColumn(\"id\", monotonically_increasing_id())\r\n creditCard_DF = creditCard_DF.withColumn('id', monotonically_increasing_id())\r\n merged = creditCard_DF.join(timeID, \"id\", \"outer\")\r\n creditCard_final = merged.drop('YEAR','MONTH','DAY','id')\r\n\r\n print('Here is a single row sample of the Credit Card transfer')\r\n creditCard_final.show(1)\r\n\r\n\r\n #Customer Transformation++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n #Giving the import a better name in case of later use\r\n customer_DF = importFromMaria()[2]\r\n print('Customer of CDW_SAPP transformation started')\r\n\r\n #Transform functions\r\n titleCase = udf(lambda x: x.title())\r\n lowerCase = udf(lambda x: x.lower())\r\n phoneNumSmall = udf(lambda x: str(x)[0:3] + \"-\" + str(x)[3:7])\r\n\r\n #Place and Replace columns -- first name to Title Case\r\n customer_cleanFirst = customer_DF.withColumn('FIRST_NAME_fix',titleCase('FIRST_NAME'))\r\n customer_cleanedFirst = customer_cleanFirst.drop('FIRST_NAME').withColumnRenamed('FIRST_NAME_fix', 'FIRST_NAME')\r\n\r\n #Place and Replace columns -- middle name to lower case\r\n customer_cleanMiddle = customer_cleanedFirst.withColumn('MIDDLE_NAME_fix',lowerCase('MIDDLE_NAME'))\r\n customer_cleanedMiddle = customer_cleanMiddle.drop('MIDDLE_NAME').withColumnRenamed('MIDDLE_NAME_fix', 'MIDDLE_NAME')\r\n\r\n #Place and Replace columns -- last name to Title Case\r\n customer_cleanLast = customer_cleanedMiddle.withColumn('LAST_NAME_fix',titleCase('LAST_NAME'))\r\n customer_cleanedLast = customer_cleanLast.drop('LAST_NAME').withColumnRenamed('LAST_NAME_fix', 'LAST_NAME')\r\n\r\n #Place and Replace columns -- convert Zip to int\r\n customer_intZip = customer_cleanedLast.withColumn(\"CUST_ZIP_fix\", customer_cleanedLast.CUST_ZIP.cast(IntegerType()))\r\n customer_zip = customer_intZip.drop(\"CUST_ZIP\").withColumnRenamed(\"CUST_ZIP_fix\", \"CUST_ZIP\")\r\n\r\n #Place and Replace columns -- transform small Phone Number\r\n customer_smallPhone = customer_zip.withColumn('CUST_PHONE_fix',phoneNumSmall('CUST_PHONE'))\r\n customer_cleanedSmallPhone = customer_smallPhone.drop('CUST_PHONE').withColumnRenamed('CUST_PHONE_fix', 'CUST_PHONE')\r\n\r\n #Unholy abomination that makes the joins possible\r\n customer_hold = customer_cleanedSmallPhone.select(concat(customer_cleanedSmallPhone.APT_NO, lit(', '), customer_cleanedSmallPhone.STREET_NAME).alias('CUST_STREET'))\r\n customer_hold = customer_hold.withColumn(\"id\", monotonically_increasing_id())\r\n customer_cold = customer_cleanedSmallPhone.withColumn(\"id\", monotonically_increasing_id())\r\n merged = customer_cold.join(customer_hold, \"id\", \"outer\")\r\n customer_final = merged.drop('APT_NO','STREET_NAME', 'id')\r\n\r\n print('Here is a single row sample of the Customer transfer')\r\n customer_final.show(1)\r\n\r\n #Loading here\r\n #For loop that puts the data frames into MongoDB\r\n print('Now Loading transformed data into MongoDB')\r\n collections = ['BRANCH', 'CUSTOMER','CREDITCARD']\r\n dataFrames = [branch_final, customer_final,creditCard_final]\r\n for k in range(len(collections)):\r\n uri = \"mongodb://127.0.0.1/CD_SAPP.dbs\"\r\n spark_mongodb = SparkSession.builder.config(\"spark.mongodb.input.uri\", uri).config(\"spark.mongodb.output.uri\",uri).getOrCreate()\r\n dataFrames[k].write.format(\"com.mongodb.spark.sql.DefaultSource\").mode('append').option('database','CDW_SAPP').option('collection', collections[k]).save()\r\n print('---Transformed Data Loaded---')\r\n\r\n#---------------------------------------------------------------------------\r\n#PLAN ATTRIBUTES PULL------------------------------------------------------- # This fucntion has the thought process comments\r\n#--------------------------------------------------------------------------- # Since it is very similar to the other 7 data tansfer functions I'll save time not writing on all of them\r\ndef planAttributesPull():\r\n\r\n #As this is a repeating type of code through out the program here I will put the comment explainations here\r\n\r\n #Function to make kafka planAttributes topic\r\n def kafka_prod_planAttributes():\r\n producer = KafkaProducer(bootstrap_servers='localhost:9092')#set producers conneciton\r\n response = requests.get(\"https://raw.githubusercontent.com/platformps/Healthcare-Insurance-Data/master/PlanAttributes.csv\")#pull data from github\r\n data_list = [data for data in response.text.splitlines()[1:]]#take away header and read each line and put into list\r\n\r\n for data in data_list:\r\n producer.send('PlanAttributes', data.encode('utf-8'))#for each item in the list(which is a string line) submit as message in kafka\r\n producer.flush()#close kafka sending\r\n\r\n #Function to connect spark to kafka and make data frames\r\n def spark_kafka_planAttribues():\r\n spark = SparkSession.builder.getOrCreate()#make spark session\r\n raw_kafka_df = spark.readStream.format(\"kafka\").option(\"kafka.bootstrap.servers\", \"localhost:9092\").option(\"subscribe\", 'PlanAttributes').option(\"startingOffsets\", \"earliest\").load()\r\n # ^ subscribe to the topic of the github pull and input everything from the earliest message and put into a one column dataframe\r\n kafka_value_df = raw_kafka_df.selectExpr(\"CAST(value AS STRING)\")#Make sure everything is put into a string\r\n output_query = kafka_value_df.writeStream.queryName(\"PlanAttributes\").format(\"memory\").start()#make into query-able dataframe\r\n\r\n output_query.awaitTermination(10)\r\n\r\n value_df = spark.sql(\"select * from PlanAttributes\")#take all from other earlier dataframe\r\n value_rdd = value_df.rdd.map(lambda i: i['value'].split(\"\\t\"))#split things from value_df dataframe and make more columns\r\n value_row_rdd = value_rdd.map(lambda i: Row(AttributesID=int(i[0]),\r\n BeginPrimaryCareCostSharingAfterNumberOfVisits=int(i[1]),\r\n BeginPrimaryCareDeductibleCoinsuranceAfterNumberOfCopays=int(i[2]),\r\n BenefitPackageId=int(i[3]),\r\n BusinessYear=int(i[4]),\r\n ChildOnlyOffering=i[5],\r\n CompositeRatingOffered=i[6],\r\n CSRVariationType=i[7],\r\n DentalOnlyPlan=i[8],\r\n DiseaseManagementProgramsOffered=i[9],\r\n FirstTierUtilization=i[10],\r\n HSAOrHRAEmployerContribution=i[11],\r\n HSAOrHRAEmployerContributionAmount=i[12],\r\n InpatientCopaymentMaximumDays=int(i[13]),\r\n IsGuaranteedRate=i[14],\r\n IsHSAEligible=i[15],\r\n IsNewPlan=i[16],\r\n IsNoticeRequiredForPregnancy=i[17],\r\n IsReferralRequiredForSpecialist=i[18],\r\n IssuerId=int(i[19]),\r\n MarketCoverage=i[20],\r\n MedicalDrugDeductiblesIntegrated=i[21],\r\n MedicalDrugMaximumOutofPocketIntegrated=i[22],\r\n MetalLevel=i[23],\r\n MultipleInNetworkTiers=i[24],\r\n NationalNetwork=i[25],\r\n NetworkId=i[26],\r\n OutOfCountryCoverage=i[27],\r\n OutOfServiceAreaCoverage=i[28],\r\n PlanEffectiveDate=i[29],\r\n PlanExpirationDate=i[30],\r\n PlanId=i[31],\r\n PlanLevelExclusions=i[32],\r\n PlanMarketingName=i[33],\r\n PlanType=i[34],\r\n QHPNonQHPTypeId=i[35],\r\n SecondTierUtilization=i[36],\r\n ServiceAreaId=i[37],\r\n sourcename=i[38],\r\n SpecialtyDrugMaximumCoinsurance=i[39],\r\n StandardComponentId=i[40],\r\n StateCode=i[41],\r\n WellnessProgramOffered=i[42])) #take each row item and apply column name like key to the value\r\n\r\n df = spark.createDataFrame(value_row_rdd)#make final dataframe from rdd above\r\n\r\n print(\"Here is a small sample of Plan Attributes Data\")\r\n df.show(2)#show the dataframe for user to see that it was made cleanly\r\n\r\n\r\n df.printSchema()#show schema of the dataframe\r\n df.write.format(\"com.mongodb.spark.sql.DefaultSource\") \\\r\n .mode('append') \\\r\n .option('database','HealthInsuranceMarketplace') \\\r\n .option('collection', 'PlanAttributes') \\\r\n .option('uri', \"mongodb://127.0.0.1/HealthInsuranceMarketplace.dbs\") \\\r\n .save()#write the data frame onto MongoDB\r\n\r\n kafka_prod_planAttributes()\r\n spark_kafka_planAttribues()\r\n\r\n#---------------------------------------------------------------------------\r\n#NETWORKING PULL -----------------------------------------------------------\r\n#---------------------------------------------------------------------------\r\ndef networkingPull():\r\n def kafka_prod_networkingPull():\r\n producer = KafkaProducer(bootstrap_servers='localhost:9092')\r\n response = requests.get(\"https://raw.githubusercontent.com/platformps/Healthcare-Insurance-Data/master/Network.csv\")\r\n data_list = [data for data in response.text.splitlines()[1:]]\r\n\r\n for data in data_list:\r\n producer.send('Network', data.encode('utf-8'))\r\n\r\n producer.flush()\r\n\r\n def spark_kafka_networkingPull():\r\n\r\n spark = SparkSession.builder.getOrCreate()\r\n raw_kafka_df = spark.readStream.format(\"kafka\").option(\"kafka.bootstrap.servers\", \"localhost:9092\").option(\"subscribe\", 'Network').option(\"startingOffsets\", \"earliest\").load()\r\n\r\n kafka_value_df = raw_kafka_df.selectExpr(\"CAST(value AS STRING)\")\r\n output_query = kafka_value_df.writeStream.queryName(\"Network\").format(\"memory\").start()\r\n\r\n output_query.awaitTermination(10)\r\n\r\n value_df = spark.sql(\"select * from Network\")\r\n value_rdd = value_df.rdd.map(lambda i: i['value'].split(\",\"))\r\n value_row_rdd = value_rdd.map(lambda i: Row(BusinessYear=int(i[0]),\r\n StateCode=i[1],\r\n IssuerId=int(i[2]),\r\n SourceName=i[3],\r\n VersionNum=int(i[4]),\r\n ImportDate=i[5],\r\n IssuerId2=int(i[6]),\r\n StateCode2=i[7],\r\n NetworkName=i[8],\r\n NetworkId=i[9],\r\n NetworkURL=i[10],\r\n RowNumber=i[11],\r\n MarketCoverage=i[12],\r\n DentalOnlyPlan=i[13]))\r\n\r\n df = spark.createDataFrame(value_row_rdd)\r\n print(\"Here is a small sample of Network Data\")\r\n df.show(2)\r\n df.printSchema()\r\n df.write.format(\"com.mongodb.spark.sql.DefaultSource\") \\\r\n .mode('append') \\\r\n .option('database','HealthInsuranceMarketplace') \\\r\n .option('collection', 'Network') \\\r\n .option('uri', \"mongodb://127.0.0.1/HealthInsuranceMarketplace.dbs\") \\\r\n .save()\r\n\r\n kafka_prod_networkingPull()\r\n spark_kafka_networkingPull()\r\n\r\n#---------------------------------------------------------------------------\r\n#SERVICE AREA PULL ---------------------------------------------------------\r\n#---------------------------------------------------------------------------\r\ndef serviceAreaPull():\r\n def kafka_prod_serviceAreaPull():\r\n producer = KafkaProducer(bootstrap_servers='localhost:9092')\r\n response = requests.get(\"https://raw.githubusercontent.com/platformps/Healthcare-Insurance-Data/master/ServiceArea.csv\")\r\n data_list = [data for data in response.text.splitlines()[1:]]\r\n #print(data_list)\r\n for data in data_list:\r\n #print(data)\r\n producer.send('ServiceArea', data.encode('utf-8'))\r\n producer.flush()\r\n\r\n def spark_kafka_serviceAreaPull():\r\n\r\n spark = SparkSession.builder.getOrCreate()\r\n raw_kafka_df = spark.readStream.format(\"kafka\").option(\"kafka.bootstrap.servers\", \"localhost:9092\").option(\"subscribe\", 'ServiceArea').option(\"startingOffsets\", \"earliest\").load()\r\n\r\n kafka_value_df = raw_kafka_df.selectExpr(\"CAST(value AS STRING)\")\r\n output_query = kafka_value_df.writeStream.queryName(\"ServiceArea\").format(\"memory\").start()\r\n\r\n output_query.awaitTermination(10)\r\n\r\n value_df = spark.sql(\"select * from ServiceArea\")\r\n value_rdd = value_df.rdd.map(lambda i: i['value'].split(\",\"))\r\n value_row_rdd = value_rdd.map(lambda i: Row(BusinessYear=int(i[0]),\r\n StateCode=i[1],\r\n IssuerId=int(i[2]),\r\n SourceName=i[3],\r\n VersionNum=i[4],\r\n ImportDate=i[5],\r\n IssuerId2=i[6],\r\n StateCode2=i[7],\r\n ServiceAreaId=i[8],\r\n ServiceAreaName=i[9],\r\n CoverEntireState=i[10],\r\n County=i[11],\r\n PartialCounty=i[12],\r\n ZipCodes=i[13],\r\n PartialCountyJustification=i[14],\r\n RowNumber=i[15],\r\n MarketCoverage=i[16],\r\n DentalOnlyPlan=i[17]))\r\n\r\n\r\n df = spark.createDataFrame(value_row_rdd)\r\n print(\"Here is a small sample of Service Area Data\")\r\n df.show(2)\r\n df.printSchema()\r\n df.write.format(\"com.mongodb.spark.sql.DefaultSource\") \\\r\n .mode('append') \\\r\n .option('database','HealthInsuranceMarketplace') \\\r\n .option('collection', 'ServiceArea') \\\r\n .option('uri', \"mongodb://127.0.0.1/HealthInsuranceMarketplace.dbs\") \\\r\n .save()\r\n\r\n kafka_prod_serviceAreaPull()\r\n spark_kafka_serviceAreaPull()\r\n\r\n#---------------------------------------------------------------------------\r\n#INSURANCE PULL ------------------------------------------------------------\r\n#---------------------------------------------------------------------------\r\ndef insurancePull():\r\n def kafka_prod_insurancePull():\r\n producer = KafkaProducer(bootstrap_servers='localhost:9092')\r\n response = requests.get(\"https://raw.githubusercontent.com/platformps/Healthcare-Insurance-Data/master/insurance.txt\")\r\n data_list = [data for data in response.text.splitlines()[1:]]\r\n #print(data_list)\r\n for data in data_list:\r\n #print(data)\r\n producer.send('insurance', data.encode('utf-8'))\r\n producer.flush()\r\n\r\n def spark_kafka_insurancePull():\r\n\r\n spark = SparkSession.builder.getOrCreate()\r\n raw_kafka_df = spark.readStream.format(\"kafka\").option(\"kafka.bootstrap.servers\", \"localhost:9092\").option(\"subscribe\", 'insurance').option(\"startingOffsets\", \"earliest\").load()\r\n\r\n kafka_value_df = raw_kafka_df.selectExpr(\"CAST(value AS STRING)\")\r\n output_query = kafka_value_df.writeStream.queryName(\"insurance\").format(\"memory\").start()\r\n\r\n output_query.awaitTermination(10)\r\n\r\n value_df = spark.sql(\"select * from insurance\")\r\n value_rdd = value_df.rdd.map(lambda i: i['value'].split(\"\\t\"))\r\n value_row_rdd = value_rdd.map(lambda i: Row(age=int(i[0]),\r\n sex=i[1],\r\n bmi=float(i[2]),\r\n children=int(i[3]),\r\n smoker=i[4],\r\n region=i[5],\r\n charges=float(i[6])))\r\n\r\n\r\n df = spark.createDataFrame(value_row_rdd)\r\n print(\"Here is a small sample of Insurance Data\")\r\n df.show(2)\r\n df.printSchema()\r\n df.write.format(\"com.mongodb.spark.sql.DefaultSource\") \\\r\n .mode('append') \\\r\n .option('database','HealthInsuranceMarketplace') \\\r\n .option('collection', 'Insurance') \\\r\n .option('uri', \"mongodb://127.0.0.1/HealthInsuranceMarketplace.dbs\") \\\r\n .save()\r\n\r\n kafka_prod_insurancePull()\r\n spark_kafka_insurancePull()\r\n\r\n#---------------------------------------------------------------------------\r\n#BENEFITS COST SHARING Pt 1 ------------------------------------------------\r\n#---------------------------------------------------------------------------\r\ndef benefitsCostSharing_partOnePull():\r\n def kafka_prod_benefitsCostSharing_partOnePull():\r\n producer = KafkaProducer(bootstrap_servers='localhost:9092')\r\n response = requests.get(\"https://raw.githubusercontent.com/platformps/Healthcare-Insurance-Data/master/BenefitsCostSharing_partOne.txt\")\r\n data_list = [data for data in response.text.splitlines()[1:]]\r\n #print(data_list)\r\n for data in data_list:\r\n #print(data)\r\n producer.send('BenefitsCostSharing_partOne', data.encode('utf-8'))\r\n producer.flush()\r\n\r\n def spark_kafka_benefitsCostSharing_partOnePull():\r\n\r\n spark = SparkSession.builder.getOrCreate()\r\n raw_kafka_df = spark.readStream.format(\"kafka\").option(\"kafka.bootstrap.servers\", \"localhost:9092\").option(\"subscribe\", 'BenefitsCostSharing_partOne').option(\"startingOffsets\", \"earliest\").load()\r\n\r\n kafka_value_df = raw_kafka_df.selectExpr(\"CAST(value AS STRING)\")\r\n output_query = kafka_value_df.writeStream.queryName(\"BenefitsCostSharing_partOne\").format(\"memory\").start()\r\n\r\n output_query.awaitTermination(10)\r\n\r\n value_df = spark.sql(\"select * from BenefitsCostSharing_partOne\")\r\n value_rdd = value_df.rdd.map(lambda i: i['value'].split(\"\\t\"))\r\n value_row_rdd = value_rdd.map(lambda i: Row(BenefitName=i[0],\r\n BusinessYear=i[1],\r\n EHBVarReason=i[2],\r\n IsCovered=i[3],\r\n IssuerId=i[4],\r\n LimitQty=i[5],\r\n LimitUnit=i[6],\r\n MinimumStay=i[7],\r\n PlanId=i[8],\r\n SourceName=i[9],\r\n StateCode=i[10]))\r\n df = spark.createDataFrame(value_row_rdd)\r\n print(\"--- Importing Part One ---\")\r\n df.write.format(\"com.mongodb.spark.sql.DefaultSource\") \\\r\n .mode('append') \\\r\n .option('database','HealthInsuranceMarketplace') \\\r\n .option('collection', 'BenefitsCostSharing') \\\r\n .option('uri', \"mongodb://127.0.0.1/test_db.dbs\") \\\r\n .save()\r\n\r\n kafka_prod_benefitsCostSharing_partOnePull()\r\n spark_kafka_benefitsCostSharing_partOnePull()\r\n\r\n#---------------------------------------------------------------------------\r\n#BENEFITS COST SHARING Pt 2 ------------------------------------------------\r\n#---------------------------------------------------------------------------\r\ndef benefitsCostSharing_partTwoPull():\r\n def kafka_prod_benefitsCostSharing_partTwoPull():\r\n producer = KafkaProducer(bootstrap_servers='localhost:9092')\r\n response = requests.get(\"https://raw.githubusercontent.com/platformps/Healthcare-Insurance-Data/master/BenefitsCostSharing_partTwo.txt\")\r\n data_list = [data for data in response.text.splitlines()[1:]]\r\n #print(data_list)\r\n for data in data_list:\r\n #print(data)\r\n producer.send('BenefitsCostSharing_partTwo', data.encode('utf-8'))\r\n producer.flush()\r\n\r\n def spark_kafka_benefitsCostSharing_partTwoPull():\r\n\r\n spark = SparkSession.builder.getOrCreate()\r\n raw_kafka_df = spark.readStream.format(\"kafka\").option(\"kafka.bootstrap.servers\", \"localhost:9092\").option(\"subscribe\", 'BenefitsCostSharing_partTwo').option(\"startingOffsets\", \"earliest\").load()\r\n\r\n kafka_value_df = raw_kafka_df.selectExpr(\"CAST(value AS STRING)\")\r\n output_query = kafka_value_df.writeStream.queryName(\"BenefitsCostSharing_partTwo\").format(\"memory\").start()\r\n\r\n output_query.awaitTermination(10)\r\n\r\n value_df = spark.sql(\"select * from BenefitsCostSharing_partTwo\")\r\n value_rdd = value_df.rdd.map(lambda i: i['value'].split(\"\\t\"))\r\n value_row_rdd = value_rdd.map(lambda i: Row(BenefitName=i[0],\r\n BusinessYear=i[1],\r\n EHBVarReason=i[2],\r\n IsCovered=i[3],\r\n IssuerId=i[4],\r\n LimitQty=i[5],\r\n LimitUnit=i[6],\r\n MinimumStay=i[7],\r\n PlanId=i[8],\r\n SourceName=i[9],\r\n StateCode=i[10]))\r\n df = spark.createDataFrame(value_row_rdd)\r\n print(\"--- Importing Part Two ---\")\r\n df.write.format(\"com.mongodb.spark.sql.DefaultSource\") \\\r\n .mode('append') \\\r\n .option('database','HealthInsuranceMarketplace') \\\r\n .option('collection', 'BenefitsCostSharing') \\\r\n .option('uri', \"mongodb://127.0.0.1/test_db.dbs\") \\\r\n .save()\r\n\r\n kafka_prod_benefitsCostSharing_partTwoPull()\r\n spark_kafka_benefitsCostSharing_partTwoPull()\r\n\r\n#---------------------------------------------------------------------------\r\n#BENEFITS COST SHARING Pt 3 ------------------------------------------------\r\n#---------------------------------------------------------------------------\r\ndef benefitsCostSharing_partThreePull():\r\n def kafka_prod_benefitsCostSharing_partThreePull():\r\n producer = KafkaProducer(bootstrap_servers='localhost:9092')\r\n response = requests.get(\"https://raw.githubusercontent.com/platformps/Healthcare-Insurance-Data/master/BenefitsCostSharing_partThree.txt\")\r\n data_list = [data for data in response.text.splitlines()[1:]]\r\n #print(data_list)\r\n for data in data_list:\r\n #print(data)\r\n producer.send('BenefitsCostSharing_partThree', data.encode('utf-8'))\r\n producer.flush()\r\n\r\n def spark_kafka_benefitsCostSharing_partThreePull():\r\n\r\n spark = SparkSession.builder.getOrCreate()\r\n raw_kafka_df = spark.readStream.format(\"kafka\").option(\"kafka.bootstrap.servers\", \"localhost:9092\").option(\"subscribe\", 'BenefitsCostSharing_partThree').option(\"startingOffsets\", \"earliest\").load()\r\n\r\n kafka_value_df = raw_kafka_df.selectExpr(\"CAST(value AS STRING)\")\r\n output_query = kafka_value_df.writeStream.queryName(\"BenefitsCostSharing_partThree\").format(\"memory\").start()\r\n\r\n output_query.awaitTermination(10)\r\n\r\n value_df = spark.sql(\"select * from BenefitsCostSharing_partThree\")\r\n value_rdd = value_df.rdd.map(lambda i: i['value'].split(\"\\t\"))\r\n value_row_rdd = value_rdd.map(lambda i: Row(BenefitName=i[0],\r\n BusinessYear=i[1],\r\n EHBVarReason=i[2],\r\n IsCovered=i[3],\r\n IssuerId=i[4],\r\n LimitQty=i[5],\r\n LimitUnit=i[6],\r\n MinimumStay=i[7],\r\n PlanId=i[8],\r\n SourceName=i[9],\r\n StateCode=i[10]))\r\n df = spark.createDataFrame(value_row_rdd)\r\n print(\"--- Importing Part Three ---\")\r\n df.write.format(\"com.mongodb.spark.sql.DefaultSource\") \\\r\n .mode('append') \\\r\n .option('database','HealthInsuranceMarketplace') \\\r\n .option('collection', 'BenefitsCostSharing') \\\r\n .option('uri', \"mongodb://127.0.0.1/test_db.dbs\") \\\r\n .save()\r\n\r\n kafka_prod_benefitsCostSharing_partThreePull()\r\n spark_kafka_benefitsCostSharing_partThreePull()\r\n\r\n#---------------------------------------------------------------------------\r\n#BENEFITS COST SHARING Pt 4 ------------------------------------------------\r\n#---------------------------------------------------------------------------\r\ndef benefitsCostSharing_partFourPull():\r\n def kafka_prod_benefitsCostSharing_partFourPull():\r\n producer = KafkaProducer(bootstrap_servers='localhost:9092')\r\n response = requests.get(\"https://raw.githubusercontent.com/platformps/Healthcare-Insurance-Data/master/BenefitsCostSharing_partFour.txt\")\r\n data_list = [data for data in response.text.splitlines()[1:]]\r\n #print(data_list)\r\n for data in data_list:\r\n #print(data)\r\n producer.send('BenefitsCostSharing_partFour', data.encode('utf-8'))\r\n producer.flush()\r\n\r\n def spark_kafka_benefitsCostSharing_partFourPull():\r\n\r\n spark = SparkSession.builder.getOrCreate()\r\n raw_kafka_df = spark.readStream.format(\"kafka\").option(\"kafka.bootstrap.servers\", \"localhost:9092\").option(\"subscribe\", 'BenefitsCostSharing_partFour').option(\"startingOffsets\", \"earliest\").load()\r\n\r\n kafka_value_df = raw_kafka_df.selectExpr(\"CAST(value AS STRING)\")\r\n output_query = kafka_value_df.writeStream.queryName(\"BenefitsCostSharing_partFour\").format(\"memory\").start()\r\n\r\n output_query.awaitTermination(10)\r\n\r\n value_df = spark.sql(\"select * from BenefitsCostSharing_partFour\")\r\n value_rdd = value_df.rdd.map(lambda i: i['value'].split(\"\\t\"))\r\n value_row_rdd = value_rdd.map(lambda i: Row(BenefitName=i[0],\r\n BusinessYear=i[1],\r\n EHBVarReason=i[2],\r\n IsCovered=i[3],\r\n IssuerId=i[4],\r\n LimitQty=i[5],\r\n LimitUnit=i[6],\r\n MinimumStay=i[7],\r\n PlanId=i[8],\r\n SourceName=i[9],\r\n StateCode=i[10]))\r\n df = spark.createDataFrame(value_row_rdd)\r\n print(\"--- Importing Part Four ---\")\r\n print(\"Here is a small sample of Benefits Cost Sharing Data\")\r\n df.show(2)\r\n df.printSchema()\r\n df.write.format(\"com.mongodb.spark.sql.DefaultSource\") \\\r\n .mode('append') \\\r\n .option('database','HealthInsuranceMarketplace') \\\r\n .option('collection', 'BenefitsCostSharing') \\\r\n .option('uri', \"mongodb://127.0.0.1/test_db.dbs\") \\\r\n .save()\r\n\r\n kafka_prod_benefitsCostSharing_partFourPull()\r\n spark_kafka_benefitsCostSharing_partFourPull()\r\n\r\ndef main():\r\n\r\n #---------------------------------------------------------------------------------------------------------------------------\r\n # So this is a pretty simple nested while/if loop selection screen: whatever the user chooses it runs that specific function\r\n #---------------------------------------------------------------------------------------------------------------------------\r\n\r\n print(\"Hello and welcome to CDW_SAPP Data and HealthInsuranceMarketplace Data Transfer selection\")\r\n while True:\r\n print(\"Would you like to impot CDW_SAPP Data or HealthInsuranceMarketplace Data into MongoDB?\")\r\n print(\"1) CDW_SAPP\")\r\n print(\"2) HealthInsuranceMarketplace Data\")\r\n print(\"3) Quit\")\r\n\r\n startSelection = int(input(\"Selection: \"))\r\n\r\n if startSelection == 1:\r\n print(\"Starting full transfer from Maria DB to MongoDB.\")\r\n mariaTransfer()\r\n print(\"MongoDB now has the Transformed MariaDB Data.\")\r\n elif startSelection == 2:\r\n while True:\r\n print(\"Witch part of the HealthInsuranceMarketplace Data do you want to import into MongoDB?\")\r\n print(\"1) Plan Attributes Table\")\r\n print(\"2) Networking Table\")\r\n print(\"3) Service Area Table\")\r\n print(\"4) Insurance Table\")\r\n print(\"5) Benefits Cost Sharing Table\")\r\n print(\"6) Back to earlier menu\")\r\n innerSelection = int(input(\"Selection: \"))\r\n\r\n if innerSelection == 1:\r\n print(\"Getting Plan Attributes Table into MongoDB\")\r\n planAttributesPull()\r\n print(\"--- Plan Attributes now in MongoDB ---\")\r\n elif innerSelection == 2:\r\n print(\"Getting Networking Table into MongoDB\")\r\n networkingPull()\r\n print(\"--- Networking Table in MonogDB ---\")\r\n elif innerSelection == 3:\r\n print(\"Getting Service Area Table into MongoDB\")\r\n serviceAreaPull()\r\n print(\"--- Service Area Table in MongoDB ---\")\r\n elif innerSelection == 4:\r\n print(\"Getting Insurance Table into MongoDB\")\r\n insurancePull()\r\n print(\"--- Insurance Table in MongoDB ---\")\r\n elif innerSelection == 5:\r\n print(\"===Benefits Cost Sharing table is large so it must be done in for parts, please wait for input when needed===\")\r\n for i in range(4):\r\n if i == 0:\r\n print(\"---Benefits Cost Sharing Table Part One---\")\r\n benefitsCostSharing_partOnePull()\r\n print(\"---Benefits Cost Sharing Table Part One in MongoDB---\")\r\n time.sleep(25)\r\n elif i == 1:\r\n print(\"---Benefits Cost Sharing Table Part Two---\")\r\n benefitsCostSharing_partTwoPull()\r\n print(\"---Benefits Cost Sharing Table Part Two in MongoDB---\")\r\n time.sleep(25)\r\n elif i == 2:\r\n print(\"---Benefits Cost Sharing Table Part Three---\")\r\n benefitsCostSharing_partThreePull()\r\n print(\"---Benefits Cost Sharing Table Part Three in MongoDB---\")\r\n time.sleep(25)\r\n elif i == 3:\r\n print(\"---Benefits Cost Sharing Table Part Four---\")\r\n benefitsCostSharing_partFourPull()\r\n print(\"---Benefits Cost Sharing Table Part Four in MongoDB---\")\r\n else:\r\n break\r\n elif innerSelection == 6:\r\n print(\"---Ok backing out---\")\r\n break\r\n else:\r\n print(\"That is not a valid option try again.\")\r\n\r\n\r\n elif startSelection == 3:\r\n print(\"Thank you for your time.\")\r\n break\r\n else:\r\n print('That is not a valid option try again.')\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"DataTransfer/TackettJesse_MainTransfer.py","file_name":"TackettJesse_MainTransfer.py","file_ext":"py","file_size_in_byte":37190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"604711056","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nmyfont = fm.FontProperties(fname=u'C:\\Windows\\Fonts\\simsun.ttc',size=10)\n\nfrom sklearn.model_selection import train_test_split,cross_val_score\nfrom sklearn import datasets as ds\nfrom sklearn.svm import LinearSVC,SVC\nfrom sklearn.metrics import classification_report\n#%% 加载数据\ndef load_classification_data(databaseName):\n target_names = []\n feature_names = []\n if databaseName == 'iris': \n raw_data = ds.load_iris()\n target_names = raw_data.target_names\n feature_names = raw_data.feature_names\n \n elif databaseName == 'digits':\n raw_data = ds.load_digits()\n target_names = np.array(raw_data.target_names).astype('str') \n feature_names = None\n \n elif databaseName == 'breast_cance':\n raw_data = ds.load_breast_cancer()\n target_names = raw_data.target_names\n feature_names = raw_data.feature_names\n \n elif databaseName == 'wine':\n raw_data = ds.load_wine()\n target_names = raw_data.target_names\n feature_names = raw_data.feature_names\n pass \n \n X = raw_data.data\n y = raw_data.target\n \n x_train,x_test,y_train,y_test = train_test_split(X,y,\n random_state=0,\n test_size=0.3,\n shuffle = True, # 打乱样本\n stratify=y # 保持类别比例\n )\n return x_train,x_test,y_train,y_test,target_names,feature_names \n\n#%% 测试linearSVC在不同的数据集上的表现\ndef test_linearSVC(dataBaseNames): \n dataBaseNames = dataBaseNames\n scores = []\n for dataBaseName in dataBaseNames:\n print('数据集:',dataBaseName)\n clf = LinearSVC()\n x_train,x_test,y_train,y_test,target_names,feature_names = load_classification_data(dataBaseName)\n clf.fit(x_train,y_train)\n scores.append( cross_val_score(clf,x_test,y_test,cv=10) )\n print(classification_report(y_test,\n clf.predict(x_test),\n target_names=target_names))\n pass\n return np.array(scores)\n#%% 测试SVC在wine数据集上的表现\ndef test_SVC(datasetName):\n # 加载训练集与测试集等信息\n x_train,x_test,y_train,y_test,target_names,feature_names = load_classification_data(datasetName)\n y_train = y_train + 1\n y_test = y_test + 1\n # 构建分类器,并训练\n clf = SVC() \n clf.fit(x_train,y_train)\n # 输出分类性能报告\n print(classification_report(y_test,\n clf.predict(x_test),\n target_names=target_names))\n # 进行10折交叉验证\n scores = cross_val_score(clf,x_test,y_test,cv=10)\n print(\"%15s : \\tAccuracy= %0.2f, \\tstd= %0.2f, \\tmin=%.2f, \\tmax=%.2f\" \n % (datasetName,scores.mean(), scores.std(),scores.min(),scores.max() ) )\n return \n#%%\nif __name__ == '__main__':\n# dataBaseNames = ['iris','digits','breast_cance','wine']\n# scores =test_linearSVC(dataBaseNames)\n# for score,name in zip(scores,dataBaseNames):\n# print(\"%15s : \\tAccuracy= %0.2f, \\tstd= %0.2f, \\tmin=%.2f, \\tmax=%.2f\" \n# % (name,score.mean(), score.std(),score.min(),score.max() ) )\n# pass\n test_SVC('iris')\n \n ","sub_path":"书稿/SVC_线性_demo.py","file_name":"SVC_线性_demo.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"402284721","text":"#!/usr/bin/python\n\n## @file\n# Contains classes SceneObject and InteractiveObject.\n\n# import avango-guacamole libraries\nimport avango\nimport avango.gua\nimport avango.script\nfrom avango.script import field_has_changed\n\n# import framework libraries\nfrom Visualization import *\nfrom InteractiveObject import *\n\n## Abstract base class to represent a scene which is a collection of interactive objects.\n# Not to be instantiated.\nclass SceneObject:\n\n ## Default constructor.\n # @param NAME Name to be given to the scene to be created.\n # @param SCENE_MANAGER Reference to the SceneManager instance which is used.\n # @param SCENEGRAPH Reference to the scenegraph in which the scene is existing.\n # @param NET_TRANS_NODE Reference to the nettrans node to append the scene to.\n def __init__(self, NAME, SCENE_MANAGER, SCENEGRAPH, NET_TRANS_NODE):\n\n # references\n ## @var SCENE_MANAGER\n # Reference to the SceneManager instance which is used.\n self.SCENE_MANAGER = SCENE_MANAGER\n\n ## @var SCENEGRAPH\n # Reference to the scenegraph in which the scene is existing.\n self.SCENEGRAPH = SCENEGRAPH\n\n ## @var NET_TRANS_NODE\n # Reference to the nettrans node to append the scene to.\n self.NET_TRANS_NODE = NET_TRANS_NODE\n\n # variables\n ## @var objects\n # List of InteractiveObject instances that belong to this scene.\n self.objects = []\n\n ## @var name\n # Name to be given to the scene.\n self.name = NAME\n\n self.SCENE_MANAGER.scenes.append(self)\n\n # nodes\n ## @var scene_root\n # Root node of this scene.\n self.scene_root = avango.gua.nodes.TransformNode(Name = self.name)\n NET_TRANS_NODE.Children.value.append(self.scene_root)\n\n\n ## Creates and initializes a geometry node in the scene.\n # @param NAME The name of the new node.\n # @param FILENAME Path to the object file to be loaded.\n # @param MATRIX The transformation matrix of the new node.\n # @param MATERIAL Material string to be used for the geometry.\n # @param GROUNDFOLLOWING_PICK_FLAG Boolean indicating if the new geometry should be pickable for GroundFollowing purposes.\n # @param MANIPULATION_PICK_FLAG Boolean indicating if the new geometry should be pickable for manipulation purposes.\n # @param PARENT_NODE Scenegraph node to append the geometry to.\n def init_geometry(self, NAME, FILENAME, MATRIX, MATERIAL, GROUNDFOLLOWING_PICK_FLAG, MANIPULATION_PICK_FLAG, PARENT_NODE):\n\n _loader = avango.gua.nodes.GeometryLoader()\n\n _loader_flags = \"avango.gua.LoaderFlags.DEFAULTS | avango.gua.LoaderFlags.OPTIMIZE_GEOMETRY\"\n\n if MATERIAL == None: # no material defined --> get materials from file description\n _loader_flags += \" | avango.gua.LoaderFlags.LOAD_MATERIALS\"\n MATERIAL = \"data/materials/White.gmd\" # default material\n\n if GROUNDFOLLOWING_PICK_FLAG == True or MANIPULATION_PICK_FLAG == True:\n _loader_flags += \" | avango.gua.LoaderFlags.MAKE_PICKABLE\"\n\n _node = _loader.create_geometry_from_file(NAME, FILENAME, MATERIAL, eval(_loader_flags))\n _node.Transform.value = MATRIX\n \n self.init_objects(_node, PARENT_NODE, GROUNDFOLLOWING_PICK_FLAG, MANIPULATION_PICK_FLAG)\n \n ## Creates and initializes a light node in the scene.\n # @param TYPE Type of the new light. 0 sun light, 1 point light, 2 spot light\n # @param NAME The name of the new node.\n # @param COLOR The color to be assigned to the light.\n # @param MATRIX The transformation matrix of the new node.\n # @param PARENT_NODE Scenegraph node to append the geometry to.\n def init_light(self, TYPE, NAME, COLOR, MATRIX, PARENT_NODE):\n\n # sun light node\n if TYPE == 0:\n _node = avango.gua.nodes.SunLightNode()\n _node.EnableShadows.value = True\n _node.ShadowMapSize.value = 2048\n _node.ShadowOffset.value = 0.001\n\n # point light node\n elif TYPE == 1:\n _node = avango.gua.nodes.PointLightNode()\n _node.Falloff.value = 1.0 # exponent\n\n # spot light node\n elif TYPE == 2:\n _node = avango.gua.nodes.SpotLightNode()\n _node.EnableShadows.value = True\n _node.ShadowMapSize.value = 2048\n _node.ShadowOffset.value = 0.001\n _node.Softness.value = 1.0 # exponent\n _node.Falloff.value = 1.0 # exponent\n\n \n _node.Name.value = NAME\n _node.Color.value = COLOR\n _node.Transform.value = MATRIX\n _node.EnableDiffuseShading.value = True\n _node.EnableSpecularShading.value = True\n _node.EnableGodrays.value = True\n\n self.init_objects(_node, PARENT_NODE, False, True)\n\n ## Initializes InteractiveObject instances assigned to scenegraph nodes.\n # @param NODE Scenegraph node for which an interactive object is to be created.\n # @param PARENT_OBJECT Parent object of NODE.\n # @param GROUNDFOLLOWING_PICK_FLAG Boolean indicating if the new geometry should be pickable for GroundFollowing purposes.\n # @param MANIPULATION_PICK_FLAG Boolean indicating if the new geometry should be pickable for manipulation purposes.\n def init_objects(self, NODE, PARENT_OBJECT, GROUNDFOLLOWING_PICK_FLAG, MANIPULATION_PICK_FLAG):\n\n if NODE.get_type() == 'av::gua::TransformNode' and len(NODE.Children.value) > 0: # group node \n\n _object = InteractiveObject()\n _object.my_constructor(self.SCENE_MANAGER, NODE, PARENT_OBJECT, self.SCENEGRAPH, self.NET_TRANS_NODE, GROUNDFOLLOWING_PICK_FLAG, MANIPULATION_PICK_FLAG)\n\n self.objects.append(_object)\n\n for _child in NODE.Children.value:\n self.init_objects(_child, _object, GROUNDFOLLOWING_PICK_FLAG, MANIPULATION_PICK_FLAG)\n \n elif NODE.get_type() == 'av::gua::GeometryNode' or NODE.get_type() == 'av::gua::SunLightNode' or NODE.get_type() == 'av::gua::PointLightNode' or NODE.get_type() == 'av::gua::SpotLightNode':\n\n _object = InteractiveObject()\n _object.my_constructor(self.SCENE_MANAGER, NODE, PARENT_OBJECT, self.SCENEGRAPH, self.NET_TRANS_NODE, GROUNDFOLLOWING_PICK_FLAG, MANIPULATION_PICK_FLAG)\n\n self.objects.append(_object)\n\n ## Enables all objects in the scene.\n # @param FLAG Boolean indicating if all objects should be reset first.\n def enable_scene(self, FLAG):\n \n if FLAG == True:\n self.reset()\n \n for _object in self.objects:\n _object.enable_object(FLAG)\n \n ## Resets all objects in the scene.\n def reset(self):\n \n for _object in self.objects:\n _object.reset()\n\n\n\n \n","sub_path":"lib-server/SceneObject.py","file_name":"SceneObject.py","file_ext":"py","file_size_in_byte":6315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"39929934","text":"#!/usr/bin/python3\nimport argparse\nimport os\nimport sys\nimport GwentUtils\n\nfrom datetime import datetime\nimport CardData\nimport KeywordData\nimport CategoryData\n\nparser = argparse.ArgumentParser(description=\"Transform the Gwent card data contained in xml files into a \"\n \"standardised JSON format. See README for more info.\",\n formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\"inputFolder\", help=\"unzipped data_definitions.zip. Folder containing the xml files.\")\nparser.add_argument(\"-l\", \"--language\", help=\"Includes just the translations for the selected language. Results in much smaller json files. Choose from: en-US, de-DE, es-ES, es-MX, fr-FR, it-IT, ja-JP, ko-KR, pl-PL, pt-BR, ru-RU, zh-CN, zh-TW\")\nargs = parser.parse_args()\nrawFolder = args.inputFolder\nlocale = args.language\nif locale:\n GwentUtils.LOCALES = [locale]\n\n# Add a backslash on the end if it doesn't exist.\nif rawFolder[-1] != \"/\":\n rawFolder = rawFolder + \"/\"\n\nif not os.path.isdir(rawFolder):\n print(rawFolder + \" is not a valid directory\")\n exit()\n\ngwentDataHelper = GwentUtils.GwentDataHelper(rawFolder)\n\nBASE_FILENAME = datetime.utcnow().strftime(\"%Y-%m-%d\") + \".json\"\n\nprint(\"Creating keyword JSON...\")\nkeywordsJson = KeywordData.create_keyword_json(gwentDataHelper)\nfilename = \"keywords_\" + BASE_FILENAME\nfilepath = os.path.join(rawFolder + \"../\" + filename)\nGwentUtils.save_json(filepath, keywordsJson)\n\nprint(\"Creating categories JSON...\")\ncategoriesJson = CategoryData.create_category_json(gwentDataHelper)\nfilename = \"categories_\" + BASE_FILENAME\nfilepath = os.path.join(rawFolder + \"../\" + filename)\nGwentUtils.save_json(filepath, categoriesJson)\n\nprint(\"Creating card data JSON...\")\ncardsJson = CardData.create_card_json(gwentDataHelper)\nfilename = \"cards_\" + BASE_FILENAME\nfilepath = os.path.join(rawFolder + \"../\" + filename)\nprint(\"Found %s cards.\" % (len(cardsJson)))\nGwentUtils.save_json(filepath, cardsJson)\n","sub_path":"gwent.py","file_name":"gwent.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"103573639","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 20 11:58:00 2019\n\n@author: kewilliams\n\"\"\"\n\nfrom collections import defaultdict\n\ndataFile = \"/home/kewilliams/Documents/GitHub/CSC-450/Data_Sets/extracted_pubmed19n0001.txt\"\ntermFile = \"/home/kewilliams/Documents/GitHub/CSC-450/Term_Files/disease2pubtator_processed\"\noutFile = \"/home/kewilliams/Documents/GitHub/CSC-450/Data_Sets/pubmed19n0001_data_terms.txt\"\n\ndef splitLine (line):\n return line.strip('\\n').split('\\t')\n\npmidTermDict = defaultdict(list)\n\nwith open(dataFile) as inFile:\n for line in inFile:\n pmidTermDict[eval(splitLine(line)[0])] = []\n\n\nwith open(termFile) as inFile:\n inFile.readline() #ignore first line (explainatory text)\n for line in inFile:\n data = splitLine(line)\n if data[0] in pmidTermDict:\n pmidTermDict[data[0]].append(data[1])\n\n\n#count = 0\n\nwith open(outFile, 'w') as writeFile:\n with open(dataFile) as inFile:\n for line in inFile:\n data = splitLine(line)\n termList = pmidTermDict[eval(data[0])]\n terms = ('\\t').join(termList)\n writeFile.write(eval(data[0]) + '\\t' + eval(data[1]) + '\\t' + eval(data[5]) + '\\t' + terms + '\\n')\n# count += 1\n# if count >= 100:\n# break\n ","sub_path":"Preprocessing/data_terms_to_file.py","file_name":"data_terms_to_file.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"648122742","text":"## Script (Python) \"metadata\"\n##bind container=container\n##bind context=context\n##bind namespace=\n##bind script=script\n##bind subpath=traverse_subpath\n##parameters=self\n##title=\n##\n\n# -*- coding: utf-8 -*-\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFDefault.utils import toUnicode\n\nrequest = container.REQUEST\nresponse = request.RESPONSE\n\nfedora = getToolByName(self, \"fedora\")\nbibtool = getToolByName(self, \"bibtool\")\ntranslate = context.translate\n\nPID = self.PID\n\npublisher = self.portal_properties.metadata_properties.publisher\npdf = self.getFulltextPdf().get('url',None)\n\ndef fallback(target_format):\n context.plone_utils.addPortalMessage(translate('no_supported_metadata_format', default=\"'${fmt}' is kein unterstütztes Format.\", mapping={u'fmt':target_format}, domain='dipp'))\n response.redirect('%s/citation' % context.absolute_url())\n\ndef set_headers(id, mime, ext):\n mt = \"%s;charset=utf-8\" % mime\n cd = 'attachment; filename=%s.%s' % (id, ext)\n response.setHeader(\"Content-type\", mt)\n response.setHeader(\"Content-Transfer-Encoding\", \"8bit\")\n response.setHeader('Content-Disposition', cd)\n\ntry:\n issn = self.issn\nexcept:\n issn = self.portal_properties.metadata_properties.issn\n\ncontext.plone_log(issn)\n\nsupported = dict((type, (name, mime, extension)) for (name, type, mime, extension) in bibtool.formats())\ncontext.plone_log(traverse_subpath)\n\ntry:\n target_format = request.traverse_subpath[0]\nexcept IndexError:\n target_format = None\n fallback(target_format)\n\nqdc = fedora.getQualifiedDCMetadata(PID)\nbc = qdc['bibliographicCitation'][0]\nyear = DateTime(bc[\"journalIssueDate\"]).strftime('%Y')\nid = qdc['creatorPerson'][0][\"lastName\"].lower() + str(year)\n\nif target_format in supported.keys():\n citation = bibtool.convert(qdc, PID, target_format)\n ext = supported[target_format][2]\n mime = supported[target_format][1]\n \n set_headers(id, mime, ext)\n return citation\n\nelif target_format == \"datacite\" and self.DOI:\n doi = self.DOI\n citation = bibtool.datacite_xml(PID,issn=issn,publisher=publisher,pdf=pdf)\n set_headers(doi.replace('/','_'), 'text/xml', 'xml')\n return citation\n\nelif target_format == \"xepicur\":\n urn = self.URN\n citation = bibtool.xepicur_xml(PID,url=self.absolute_url(),issn=issn,publisher=publisher,pdf=pdf)\n set_headers(id, 'text/xml', 'xml')\n return citation\n\nelif target_format == \"doaj\":\n urn = self.URN\n citation = bibtool.doaj_xml(PID,issn=issn,publisher=publisher,pdf=pdf)\n set_headers(urn.replace('/','_'), 'text/xml', 'xml')\n return citation\n\nelse:\n fallback(target_format)\n\n","sub_path":"Products/DiPP/skins/dipp_scripts/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"428505817","text":"import sys\nimport os\nimport numpy as np\nimport csv\nimport matplotlib.pyplot as plt\nimport keras\nfrom agents.helper import mv_file_to_dir_with_date\n\nclass Runner():\n def __init__(self,\n task,\n agent):\n self.task = task\n self.agent = agent\n self.labels = ['time', 'x', 'y', 'z', 'phi', 'theta', 'psi', 'x_velocity',\n 'y_velocity', 'z_velocity', 'phi_velocity', 'theta_velocity',\n 'psi_velocity', 'rotor_speed1', 'rotor_speed2', 'rotor_speed3', 'rotor_speed4', 'reward', 'episode']\n #self.labels = ['time', 'x', 'y', 'reward', 'episode'] # mountaincar\n self.labels_per_episode = ['episode', 'mean_reward']\n\n def run(self,\n runtime=100,\n display_graph=True,\n display_freq=5,\n should_write_results_to_file=False,\n experiences_to_mimic=None,\n results_file_output='data',\n episodic_results_file_output='episodic_data',\n outputs_folder='data_outputs',\n num_episode=10,\n weights_directory='weights_backup',\n load_weights_from_file=False,\n save_weights=False):\n\n self._setup_figures_for_dynamic_plots()\n\n results = {x : [] for x in self.labels}\n episode_results = {x: [] for x in self.labels_per_episode}\n\n max_time_steps = int(runtime)\n\n if experiences_to_mimic is not None and hasattr(self.agent, \"mimic\"):\n self.agent.mimic(experiences_to_mimic)\n\n done = False\n\n if load_weights_from_file:\n self.agent.load_weights(location=weights_directory)\n\n self._mv_file_to_dir_with_date(results_file_output, outputs_folder)\n self._mv_file_to_dir_with_date(episodic_results_file_output, outputs_folder)\n\n with open(results_file_output, 'w') as csvfile, open(episodic_results_file_output, 'w') as episodic_csvfile:\n writer = csv.writer(csvfile)\n episode_writer = csv.writer(episodic_csvfile)\n writer.writerow(self.labels)\n episode_writer.writerow(self.labels_per_episode)\n for i_episode in range(1, num_episode + 1):\n state = self.agent.reset_episode()\n self.task.reset()\n episode_rewards = []\n results_per_episode = {x : [] for x in self.labels}\n for i, t in enumerate(range(max_time_steps)):\n rotor_speeds = self.agent.act(state)\n next_state, reward, done = self.task.step(rotor_speeds)\n self.agent.step(rotor_speeds, reward, next_state, done)\n\n step_results = [self.task.sim.time] + list(self.task.sim.pose) + list(self.task.sim.v) + list(self.task.sim.angular_v) + list(rotor_speeds)\n #step_results = [t] + list(next_state[-len(next_state)//self.task.action_repeat:]) # with mountaincar\n step_results.append(reward)\n step_results.append(i_episode)\n for ii in range(len(self.labels)):\n results[self.labels[ii]].append(step_results[ii])\n results_per_episode[self.labels[ii]].append(step_results[ii])\n self._write(step_results, writer, should_write_results_to_file)\n\n episode_rewards.append(reward)\n\n state = next_state\n\n if done or i == max_time_steps-1:\n episode_step_result = [i_episode, np.mean(episode_rewards)]\n for ii in range(len(self.labels_per_episode)):\n episode_results[self.labels_per_episode[ii]].append(episode_step_result[ii])\n self._write(episode_step_result, episode_writer, should_write_results_to_file)\n if display_graph:\n self._plt_dynamic_reward(results)\n self._plt_dynamic_reward_means(episode_results)\n self._plt_dynamic_x_y_z(results_per_episode)\n self._plt_dynamic_rotors(results_per_episode) # comment out for mountaincar\n break\n else:\n if t % display_freq == 0 and display_graph:\n self._plt_dynamic_reward(results)\n self._mv_file_to_dir_with_date(results_file_output, outputs_folder)\n self._mv_file_to_dir_with_date(episodic_results_file_output, outputs_folder)\n\n if save_weights:\n self.agent.save_weights(location=weights_directory)\n\n def _mv_file_to_dir_with_date(self, filename, outputs_folder):\n cwd = os.getcwd()\n origin_file_path = os.path.join(cwd, filename)\n\n destination_path = os.path.join(cwd, outputs_folder)\n\n mv_file_to_dir_with_date(origin_file_path, destination_path)\n\n def _setup_figures_for_dynamic_plots(self):\n fig1, (ax11, ax12, ax_x, ax_rotors) = plt.subplots(4, 1)\n\n ax11.set_title(\"Rewards\")\n ax12.set_title(\"Average rewards\")\n ax_x.set_title(\"x, y, z\")\n ax_rotors.set_title(\"rotor speeds\")\n\n fig1.tight_layout(pad=4.0, w_pad=1.0, h_pad=0.1)\n fig1.set_size_inches(4, 6)\n fig1.show()\n\n self.fig1 = fig1\n self.ax_rotors = ax_rotors\n self.ax_x = ax_x\n self.ax11 = ax11\n self.ax12 = ax12\n\n def _plt_dynamic_reward(self, results):\n self.ax11.plot(results['reward'])\n self.fig1.canvas.draw()\n\n def _plt_dynamic_reward_means(self, episode_results):\n self.ax12.plot(episode_results['mean_reward'])\n self.fig1.canvas.draw()\n\n def _plt_dynamic_x_y_z(self, results_per_episode):\n self.ax_x.clear()\n self.ax_x.plot(results_per_episode['time'], results_per_episode['x'], label='x', color='green')\n self.ax_x.plot(results_per_episode['time'], results_per_episode['y'], label='y', color='red')\n if 'z' in results_per_episode:\n self.ax_x.plot(results_per_episode['time'], results_per_episode['z'], label='z', color='blue')\n self.fig1.canvas.draw()\n\n def _plt_dynamic_rotors(self, results_per_episode):\n self.ax_rotors.clear()\n self.ax_rotors.plot(results_per_episode['time'], results_per_episode['rotor_speed1'], label='1', color='green')\n self.ax_rotors.plot(results_per_episode['time'], results_per_episode['rotor_speed2'], label='2', color='red')\n self.ax_rotors.plot(results_per_episode['time'], results_per_episode['rotor_speed3'], label='3', color='blue')\n self.ax_rotors.plot(results_per_episode['time'], results_per_episode['rotor_speed4'], label='4', color='magenta')\n self.fig1.canvas.draw()\n\n def _write(self, step_results, writer, should_write_results_to_file=False):\n if should_write_results_to_file:\n writer.writerow(step_results)\n\n","sub_path":"runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":6933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"219359352","text":"class Solution:\n def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:\n if not preorder:\n return\n self.index = 0\n\n def helper(start=0, end=len(inorder) - 1):\n if start > end:\n return\n root_val = preorder[self.index]\n inorder_index = find_index(root_val, start, end)\n if inorder_index == -1:\n return\n else:\n self.index += 1\n node = TreeNode(root_val)\n node.left = helper(start, inorder_index - 1)\n node.right = helper(inorder_index + 1, end)\n return node\n\n def find_index(root_val, start, end):\n for i in range(start, end + 1):\n if inorder[i] == root_val:\n return i\n return -1\n return helper()\n","sub_path":"105/105.construct-binary-tree-from-preorder-and-inorder-traversal.747473007.Accepted.leetcode.python3.py","file_name":"105.construct-binary-tree-from-preorder-and-inorder-traversal.747473007.Accepted.leetcode.python3.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"623716267","text":"import requests\nimport json\n\ntoken = os.environ['SHINOBI_TOKEN']\nheaders = {\n 'Authorization': f'Bearer {token}',\n 'content-type': 'application/json'\n}\nbase_url = 'http://home.lan'\napi_url = f'{base_url}/api'\nservice_url = f'{api_url}/services/homeassistant'\nstate_url = f'{api_url}/states'\n\ndef GetState(entity_id):\n res = requests.get(f'{state_url}/{entity_id}', headers=headers)\n if res.status_code != 200:\n print(res.content)\n assert res.status_code == 200\n return res.json()\n\ndef CallService(service, payload):\n res = requests.post(f'{service_url}/{service}', headers=headers, data=json.dumps(payload))\n if res.status_code != 200:\n print(res.content)\n assert res.status_code == 200\n return res.json()\n","sub_path":"PyTasks/src/HomeAssistant.py","file_name":"HomeAssistant.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"513469289","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport json\nfrom optparse import OptionParser\nfrom mglib import *\n\nprehelp = \"\"\"\nNAME\n mg-select-significance\n\nVERSION\n %s\n\nSYNOPSIS\n mg-select-significance [ --help, --input , --order , --direction , --cols , --rows ]\n\nDESCRIPTION\n Tool to order and subselect grouped metagenomic abundace profiles with significance statistics.\n\"\"\"\n\nposthelp = \"\"\"\nInput\n Tab-delimited table of abundance profiles with significance statistics\n\nOutput\n Altered tab-delimited table based on input and options.\n\nEXAMPLES\n mg-compare-taxa --ids 'mgm4441679.3,mgm4441680.3,mgm4441681.3,mgm4441682.3' --level class --source RefSeq --format text | mg-group-significance --input - --format text --groups '{\"group1\":[\"mgm4441679.3\",\"mgm4441680.3\"],\"group2\":[\"mgm4441681.3\",\"mgm4441682.3\"]}' --stat_test Kruskal-Wallis | mg-select-significance --input - --cols 4 --rows 10\n\nSEE ALSO\n -\n\nAUTHORS\n %s\n\"\"\"\n\ndef opt2int(opt, x):\n try:\n i = int(x)\n except:\n sys.stderr.write(\"ERROR: --%s must be an integer\\n\"%opt)\n sys.exit(1)\n return i\n\ndef main(args):\n OptionParser.format_description = lambda self, formatter: self.description\n OptionParser.format_epilog = lambda self, formatter: self.epilog\n parser = OptionParser(usage='', description=prehelp%VERSION, epilog=posthelp%AUTH_LIST)\n parser.add_option(\"\", \"--input\", dest=\"input\", default='-', help=\"input: filename or stdin (-), default is stdin\")\n parser.add_option(\"\", \"--order\", dest=\"order\", default=None, help=\"column number to order output by (0 for last column), default is no ordering\")\n parser.add_option(\"\", \"--direction\", dest=\"direction\", default=\"desc\", help=\"direction of order. 'asc' for ascending order, 'desc' for descending order, default is desc\")\n parser.add_option(\"\", \"--cols\", dest=\"cols\", default=None, help=\"number of columns from the left to return from input table, default is all\")\n parser.add_option(\"\", \"--rows\", dest=\"rows\", default=None, help=\"number of rows from the top to return from input table, default is all\")\n \n # get inputs\n (opts, args) = parser.parse_args()\n if (opts.input != '-') and (not os.path.isfile(opts.input)):\n sys.stderr.write(\"ERROR: input data missing\\n\")\n return 1\n if opts.direction not in ['asc', 'desc']:\n sys.stderr.write(\"ERROR: invalid order direction\\n\")\n return 1\n \n # parse inputs\n try:\n indata = sys.stdin.read() if opts.input == '-' else open(opts.input, 'r').read()\n rows, cols, datatemp = tab_to_matrix(indata)\n data = []\n for r in datatemp:\n data.append( map(lambda x: int(x) if x.isdigit() else float(x), r) )\n except:\n sys.stderr.write(\"ERROR: unable to load input data\\n\")\n return 1\n \n # first we sort\n if opts.order is not None:\n rev_order = True if opts.direction == 'desc' else False\n order_col = opt2int('order', opts.order)\n if order_col > len(cols):\n sys.stderr.write(\"ERROR: --order value is greater than number of columns in table\\n\")\n order_col = order_col - 1\n rd_merged = zip(rows, data)\n rd_sorted = sorted(rd_merged, key=lambda x: x[1][order_col], reverse=rev_order)\n rows, data = zip(*rd_sorted)\n \n # subselect rows\n if opts.rows is not None:\n subrow = opt2int('rows', opts.rows)\n rows = rows[:subrow]\n data = data[:subrow]\n if opts.cols is not None:\n subcol = opt2int('cols', opts.cols)\n cols = cols[:subcol]\n data = sub_matrix(data, subcol)\n \n # output data\n safe_print( \"\\t%s\\n\" %\"\\t\".join(cols) )\n for i, d in enumerate(data):\n safe_print( \"%s\\t%s\\n\" %(rows[i], \"\\t\".join(map(str, d))) )\n \n return 0\n \n\nif __name__ == \"__main__\":\n sys.exit( main(sys.argv) )\n","sub_path":"tools/bin/mg-select-significance.py","file_name":"mg-select-significance.py","file_ext":"py","file_size_in_byte":3958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"281304694","text":"import requests\r\nimport lxml.html\r\nimport asyncio\r\nimport threading\r\nimport traceback\r\nimport re\r\nimport pymongo\r\n\r\nhost = 'http://www.imdb.cn'\r\nurl_1 = 'http://www.imdb.cn/IMDB250/'\r\nurl_2 = 'http://www.imdb.cn/imdb250/{}'\r\nheader = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'}\r\nthread_list = []\r\n\r\n\r\ndef main_parse():\r\n error_num = 0\r\n for i in range(100, 252):\r\n try:\r\n if i == 1:\r\n url = url_1\r\n else:\r\n url = url_2.format(i)\r\n print(url)\r\n temp = requests.get(url, headers=header).content.decode()\r\n selector = lxml.html.fromstring(temp)\r\n url_list = selector.xpath('//div[@class=\"ss-3 clear\"]/a/@href')\r\n for u in url_list:\r\n print(u)\r\n thread = threading.Thread(target=detail(host + u))\r\n thread_list.append(thread)\r\n thread.setDaemon(daemonic=True)\r\n thread.start()\r\n\r\n except:\r\n error_num += 1\r\n if error_num >= 20:\r\n return\r\n traceback.print_exc()\r\n\r\n\r\ndef process_info(li: list) -> str:\r\n l = '' # concentrate with ;\r\n l = ';'.join(li)\r\n return l\r\n\r\n\r\ndef detail(url):\r\n temp = requests.get(url).content.decode()\r\n sel = lxml.html.fromstring(temp)\r\n\r\n de_list = sel.xpath('//div[@class=\"bdd clear\"]/ul')[0].xpath('li')\r\n\r\n stars = sel.xpath('//div[@class=\"hdd\"]/span/i/text()')[0]\r\n\r\n movie_name = de_list[0].xpath('a/text()')[0]\r\n el_name = de_list[1].xpath('a/text()')[0]\r\n\r\n director = process_info(de_list[2].xpath('a/text()'))\r\n main_actors = process_info(de_list[3].xpath('a/text()'))\r\n\r\n time_len = de_list[4].xpath('text()')[0].replace(' ', '')\r\n\r\n language = process_info(de_list[4].xpath('a/text()'))\r\n\r\n t = de_list[5].text_content().replace(' ', '').replace('\\t', '').replace('\\xa0', '')\r\n\r\n try:\r\n first_show = re.search('上映时间:(\\d{4})', t).group(1)\r\n except:\r\n first_show=''\r\n try:\r\n category = re.search('类型:(.*) 分级', t).group(1)\r\n except:\r\n category=''\r\n try:\r\n levels = re.search('分级:(.*)颜色', t).group(1)\r\n\r\n except:\r\n levels=''\r\n try:\r\n color = re.search('颜色:(.*)', t).group(1)\r\n\r\n except:\r\n color=''\r\n try:\r\n b = de_list[6].text_content().replace('\\r\\n', '').replace('\\t', '').replace('\\xa0', '')\r\n country = re.search('国家:(.*?)声音', b).group(1)\r\n sound = re.search('声音:(.*)', b).group(1)\r\n except:\r\n country=''\r\n sound=''\r\n\r\n item = {\r\n 'stars': stars,\r\n 'movie_name': movie_name,\r\n 'el_name': el_name,\r\n 'director': director,\r\n 'main_act': main_actors,\r\n 'time_len': time_len,\r\n 'language': language,\r\n 'first_show': first_show,\r\n 'category': category,\r\n 'levels': levels,\r\n 'color': color,\r\n 'country': country,\r\n 'sound': sound\r\n }\r\n print(item)\r\n handler.insert_one(item)\r\n\r\n\r\nclient = pymongo.MongoClient()\r\nhandler = client.spider.imdb\r\nmain_parse()\r\nfor t in thread_list:\r\n if t.is_alive():\r\n print('not end')\r\nclient.close()\r\n\r\nimport csv\r\nfp=open('imdb250.csv','w',encoding='utf-8')\r\nwri=csv.DictWriter(fp,fieldnames=[\r\n 'stars',\r\n 'movie_name',\r\n 'el_name',\r\n 'director',\r\n 'main_act',\r\n 'time_len',\r\n 'language',\r\n 'first_show',\r\n 'category',\r\n 'levels',\r\n 'color',\r\n 'country',\r\n 'sound'\r\n ])","sub_path":"PyspiderSingle/下载imdb250.py","file_name":"下载imdb250.py","file_ext":"py","file_size_in_byte":3705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"362495802","text":"print('''东汉建安五年,\n曹操与袁绍军相持官渡。\n曹操:\n袁绍兵多将广,该如何取胜之?甚是烦恼啊。。。。\n那么该进攻哪里呢?1.延津 2.乌巢''')\na=input(\"\")\ninput('曹操:谁愿前往?')\nif a==\"1\":\n input('关羽:关羽愿战以报曹公厚德')\nelif a==\"2\":\n input('徐晃:末将,徐晃愿往。')\n\nelse:\n print('尔等依计袭敌粮道,断其后援')\n#===========================================\ninput('''却说袁绍兴兵,望官渡进发。夏侯□发书告急。\n曹操起军七万,前往迎敌,留荀□守许都。''')\ninput('绍兵临发,田丰从狱中上书谏曰:')\nprint('“今且宜静守以待天时,不可妄兴大兵,恐有不利。')\nif a==\"1\":\n input('关羽:关羽愿战以报曹公厚德')\nelif a==\"2\":\n input('逢纪谮曰:')\n input('主公兴仁义之师,田丰何得出此不祥之语!')\n\nelse:\n print('尔等依计袭敌粮道,断其后援')\n#===========================================\ninput('''绍因怒,欲斩田丰。众官告免。''')\ninput('绍恨曰:')\ninput('待吾破了曹操,明正其罪!')\ninput('遂催军进发,旌旗遍野,刀剑如林。行至阳武,下定寨栅。')\nif a==\"1\":\n input('关羽:关羽愿战以报曹公厚德')\nelif a==\"2\":\n input('沮授曰:')\n input('''我军虽众,而勇猛不及彼军;彼军虽精,而粮草不如我军。\n彼军无粮,利在急战;我军有粮,宜且缓守。若能旷以日月,则彼军不战自败矣。''')\n\nelse:\n print('尔等依计袭敌粮道,断其后援')\n","sub_path":"学习/sanguo.py","file_name":"sanguo.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"61795264","text":"\"\"\"Forms for checkout\"\"\"\nfrom django import forms\nfrom .models import Order\n\nclass MakePaymentForm(forms.Form):\n \"\"\"Form for making payments\"\"\"\n\n MONTH_CHOICES = [(i, i) for i in range(1, 13)]\n YEAR_CHOICES = [(i, i) for i in range(2018, 2037)]\n\n # Required == False --> for security - plain text is not transmitted through the browser\n credit_card_number = forms.CharField(label='Credit card number', required=False)\n cvc = forms.CharField(label='Security code (CVV)', required=False)\n expiry_month = forms.ChoiceField(label='Month', choices=MONTH_CHOICES, required=False)\n expiry_year = forms.ChoiceField(label='Month', choices=YEAR_CHOICES, required=False)\n stripe_id = forms.CharField(widget=forms.HiddenInput)\n\nclass OrderForm(forms.ModelForm):\n \"\"\"Form for order\"\"\"\n class Meta:\n model = Order\n fields = ('full_name', 'phone_number', 'country', 'postcode', 'town_or_city', 'street_address1', 'street_address2', 'county')\n","sub_path":"checkout/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"378197100","text":"from sys import stdin\r\ndata=stdin.readlines()\r\nT=len(data)//2\r\nfor _ in range(T):\r\n n=int(data.pop(0).strip('\\r\\n'))\r\n scores=data.pop(0).strip('\\r\\n').split()\r\n for i in range(n):scores[i]=int(scores[i])\r\n\r\n lowest='worst case'\r\n highest='best case'\r\n scorestr=''\r\n switch=True\r\n\r\n scores.sort()\r\n\r\n for i in scores:\r\n if i<60:highest=i\r\n elif i>=60 and switch:\r\n lowest=i\r\n switch=False\r\n scorestr+=str(i)+' '\r\n print(scorestr[:-1])\r\n print(highest)\r\n print(lowest)\r\n\r\n","sub_path":"三月第一題.py","file_name":"三月第一題.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"510879770","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport time\nimport sys\nimport os\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\n\nimport multiprocessing as mp\n\nimport mlfinlab as ml\n\nnp.random.seed(42)\n\n\n# module to substitute in 'mlfinlab' package\ndef new_batch_run(self, verbose=True, to_csv=False, output_path=None):\n \"\"\"\n Reads a csv file in batches and then constructs the financial data structure in the form of a DataFrame.\n The csv file must have only 3 columns: date_time, price, & volume.\n :param verbose: (Boolean) Flag whether to print message on each processed batch or not\n :param to_csv: (Boolean) Flag for writing the results of bars generation to local csv file, or to in-memory DataFrame\n :param output_path: (Boolean) Path to results file, if to_csv = True\n\n :return: (DataFrame or None) Financial data structure\n \"\"\"\n\n # for parquet\n if \".gzip\" in self.file_path:\n parquet = pd.read_parquet(self.file_path, engine='fastparquet')\n n_batches = len(parquet) // self.batch_size\n iterations = np.array_split(parquet, n_batches)\n else:\n # Read in the first row & assert format\n first_row = pd.read_csv(self.file_path, nrows=1)\n self._assert_csv(first_row)\n iterations = pd.read_csv(self.file_path, chunksize=self.batch_size)\n\n if to_csv is True:\n header = True # if to_csv is True, header should written on the first batch only\n open(output_path, 'w').close() # clean output csv file\n\n if verbose: # pragma: no cover\n print('Reading data in batches:')\n\n # Read csv in batches\n count = 0\n final_bars = []\n cols = ['date_time', 'open', 'high', 'low', 'close', 'volume']\n for batch in iterations:\n if verbose: # pragma: no cover\n print('Batch number:', count)\n\n list_bars = self._extract_bars(data=batch)\n\n if to_csv is True:\n pd.DataFrame(list_bars, columns=cols).to_csv(output_path, header=header, index=False, mode='a')\n header = False\n else:\n # Append to bars list\n final_bars += list_bars\n count += 1\n\n # Set flag to True: notify function to use cache\n self.flag = True\n\n if verbose: # pragma: no cover\n print('Returning bars \\n')\n\n # Return a DataFrame\n if final_bars:\n bars_df = pd.DataFrame(final_bars, columns=cols)\n return bars_df\n\n # Processed DataFrame is stored in .csv file, return None\n return None\n\n\n# update imported package to deal with advanced data structure and adjust it to reas 'parquet'\nml.data_structures.base_bars.BaseBars.batch_run = new_batch_run\n\n\nclass TrippleBarrier(object):\n \"\"\"This class is to create indicators (features) to feed ML trading algorithm.\n The content of this class was sourced from 'mlfinlab' package.\n Objectification was made in order to run this class within 'QuantConnect' platform.\n \"\"\"\n\n def __init__(self):\n # tbd\n return\n\n def get_daily_vol(self, close, lookback=100):\n \"\"\"\n Snippet 3.1, page 44, Daily Volatility Estimates\n Computes the daily volatility at intraday estimation points.\n In practice we want to set profit taking and stop-loss limits that are a function of the risks involved\n in a bet. Otherwise, sometimes we will be aiming too high (tao ≫ sigma_t_i,0), and sometimes too low\n (tao ≪ sigma_t_i,0 ), considering the prevailing volatility. Snippet 3.1 computes the daily volatility\n at intraday estimation points, applying a span of lookback days to an exponentially weighted moving\n standard deviation.\n See the pandas documentation for details on the pandas.Series.ewm function.\n Note: This function is used to compute dynamic thresholds for profit taking and stop loss limits.\n :param close: Closing prices\n :param lookback: lookback period to compute volatility\n :return: series of daily volatility value\n \"\"\"\n # daily vol re-indexed to close\n df0 = close.index.searchsorted(close.index - pd.Timedelta(days=1))\n df0 = df0[df0 > 0]\n df0 = (pd.Series(close.index[df0 - 1], index=close.index[close.shape[0] - df0.shape[0]:]))\n\n df0 = close.loc[df0.index] / close.loc[df0.values].values - 1 # daily returns\n df0 = df0.ewm(span=lookback).std()\n return df0\n\n # Snippet 2.4, page 39, The Symmetric CUSUM Filter.\n def cusum_filter(self, raw_time_series, threshold, time_stamps=True):\n \"\"\"\n Snippet 2.4, page 39, The Symmetric Dynamic/Fixed CUSUM Filter.\n The CUSUM filter is a quality-control method, designed to detect a shift in the\n mean value of a measured quantity away from a target value. The filter is set up to\n identify a sequence of upside or downside divergences from any reset level zero.\n We sample a bar t if and only if S_t >= threshold, at which point S_t is reset to 0.\n One practical aspect that makes CUSUM filters appealing is that multiple events are not\n triggered by raw_time_series hovering around a threshold level, which is a flaw suffered by popular\n market signals such as Bollinger Bands. It will require a full run of length threshold for\n raw_time_series to trigger an event.\n Once we have obtained this subset of event-driven bars, we will let the ML algorithm determine\n whether the occurrence of such events constitutes actionable intelligence.\n Below is an implementation of the Symmetric CUSUM filter.\n Note: As per the book this filter is applied to closing prices but we extended it to also work on other\n time series such as volatility.\n :param raw_time_series: (series) of close prices (or other time series, e.g. volatility).\n :param threshold: (float or pd.Series) when the abs(change) is larger than the threshold, the function captures\n it as an event, can be dynamic if threshold is pd.Series\n :param time_stamps: (bool) default is to return a DateTimeIndex, change to false to have it return a list.\n :return: (datetime index vector) vector of datetimes when the events occurred. This is used later to sample.\n \"\"\"\n\n t_events = []\n s_pos = 0\n s_neg = 0\n\n # log returns\n raw_time_series = pd.DataFrame(raw_time_series) # Convert to DataFrame\n raw_time_series.columns = ['price']\n raw_time_series['log_ret'] = raw_time_series.price.apply(np.log).diff()\n if isinstance(threshold, (float, int)):\n raw_time_series['threshold'] = threshold\n elif isinstance(threshold, pd.Series):\n raw_time_series.loc[threshold.index, 'threshold'] = threshold\n else:\n raise ValueError('threshold is neither float nor pd.Series!')\n\n raw_time_series = raw_time_series.iloc[1:] # Drop first na values\n\n # Get event time stamps for the entire series\n for tup in raw_time_series.itertuples():\n thresh = tup.threshold\n pos = float(s_pos + tup.log_ret)\n neg = float(s_neg + tup.log_ret)\n s_pos = max(0.0, pos)\n s_neg = min(0.0, neg)\n\n if s_neg < -thresh:\n s_neg = 0\n t_events.append(tup.Index)\n\n elif s_pos > thresh:\n s_pos = 0\n t_events.append(tup.Index)\n\n # Return DatetimeIndex or list\n if time_stamps:\n event_timestamps = pd.DatetimeIndex(t_events)\n return event_timestamps\n\n return t_events\n\n # Snippet 3.4 page 49, Adding a Vertical Barrier\n def add_vertical_barrier(self, t_events, close, num_days=0, num_hours=0, num_minutes=0, num_seconds=0):\n \"\"\"\n Snippet 3.4 page 49, Adding a Vertical Barrier\n For each index in t_events, it finds the timestamp of the next price bar at or immediately after\n a number of days num_days. This vertical barrier can be passed as an optional argument t1 in get_events.\n This function creates a series that has all the timestamps of when the vertical barrier would be reached.\n :param t_events: (series) series of events (symmetric CUSUM filter)\n :param close: (series) close prices\n :param num_days: (int) number of days to add for vertical barrier\n :param num_hours: (int) number of hours to add for vertical barrier\n :param num_minutes: (int) number of minutes to add for vertical barrier\n :param num_seconds: (int) number of seconds to add for vertical barrier\n :return: (series) timestamps of vertical barriers\n \"\"\"\n timedelta = pd.Timedelta(\n '{} days, {} hours, {} minutes, {} seconds'.format(num_days, num_hours, num_minutes, num_seconds))\n # Find index to closest to vertical barrier\n nearest_index = close.index.searchsorted(t_events + timedelta)\n\n # Exclude indexes which are outside the range of close price index\n nearest_index = nearest_index[nearest_index < close.shape[0]]\n\n # Find price index closest to vertical barrier time stamp\n nearest_timestamp = close.index[nearest_index]\n filtered_events = t_events[:nearest_index.shape[0]]\n\n vertical_barriers = pd.Series(data=nearest_timestamp, index=filtered_events)\n return vertical_barriers\n\n # Snippet 20.5 (page 306), the lin_parts function\n def lin_parts(self, num_atoms, num_threads):\n \"\"\"\n Snippet 20.5 (page 306), the lin_parts function\n The simplest way to form molecules is to partition a list of atoms in subsets of equal size,\n where the number of subsets is the minimum between the number of processors and the number\n of atoms. For N subsets we need to find the N+1 indices that enclose the partitions.\n This logic is demonstrated in Snippet 20.5.\n This function partitions a list of atoms in subsets (molecules) of equal size.\n An atom is a set of indivisible set of tasks.\n \"\"\"\n # Partition of atoms with a single loop\n parts = np.linspace(0, num_atoms, min(num_threads, num_atoms) + 1)\n parts = np.ceil(parts).astype(int)\n return parts\n\n # Snippet 3.2, page 45, Triple Barrier Labeling Method\n def apply_pt_sl_on_t1(self, close, events, pt_sl, molecule): # pragma: no cover\n \"\"\"\n Snippet 3.2, page 45, Triple Barrier Labeling Method\n This function applies the triple-barrier labeling method. It works on a set of\n datetime index values (molecule). This allows the program to parallelize the processing.\n Mainly it returns a DataFrame of timestamps regarding the time when the first barriers were reached.\n :param close: (series) close prices\n :param events: (series) of indices that signify \"events\" (see cusum_filter function\n for more details)\n :param pt_sl: (array) element 0, indicates the profit taking level; element 1 is stop loss level\n :param molecule: (an array) a set of datetime index values for processing\n :return: DataFrame of timestamps of when first barrier was touched\n \"\"\"\n # Apply stop loss/profit taking, if it takes place before t1 (end of event)\n events_ = events.loc[molecule]\n out = events_[['t1']].copy(deep=True)\n\n profit_taking_multiple = pt_sl[0]\n stop_loss_multiple = pt_sl[1]\n\n # Profit taking active\n if profit_taking_multiple > 0:\n profit_taking = profit_taking_multiple * events_['trgt']\n else:\n profit_taking = pd.Series(index=events.index) # NaNs\n\n # Stop loss active\n if stop_loss_multiple > 0:\n stop_loss = -stop_loss_multiple * events_['trgt']\n else:\n stop_loss = pd.Series(index=events.index) # NaNs\n\n # Get events\n for loc, vertical_barrier in events_['t1'].fillna(close.index[-1]).iteritems():\n closing_prices = close[loc: vertical_barrier] # Path prices for a given trade\n cum_returns = (closing_prices / close[loc] - 1) * events_.at[loc, 'side'] # Path returns\n out.loc[loc, 'sl'] = cum_returns[cum_returns < stop_loss[loc]].index.min() # Earliest stop loss date\n out.loc[loc, 'pt'] = cum_returns[\n cum_returns > profit_taking[loc]].index.min() # Earliest profit taking date\n\n return out\n\n # Snippet 20.7 (page 310), The mpPandasObj, used at various points in the book\n def mp_pandas_obj(self, func, pd_obj, num_threads=24, mp_batches=1, lin_mols=True, **kargs):\n \"\"\"\n Snippet 20.7 (page 310), The mpPandasObj, used at various points in the book\n Parallelize jobs, return a dataframe or series.\n Example: df1=mp_pandas_obj(func,('molecule',df0.index),24,**kwds)\n First, atoms are grouped into molecules, using linParts (equal number of atoms per molecule)\n or nestedParts (atoms distributed in a lower-triangular structure). When mpBatches is greater\n than 1, there will be more molecules than cores. Suppose that we divide a task into 10 molecules,\n where molecule 1 takes twice as long as the rest. If we run this process in 10 cores, 9 of the\n cores will be idle half of the runtime, waiting for the first core to process molecule 1.\n Alternatively, we could set mpBatches=10 so as to divide that task in 100 molecules. In doing so,\n every core will receive equal workload, even though the first 10 molecules take as much time as the\n next 20 molecules. In this example, the run with mpBatches=10 will take half of the time consumed by\n mpBatches=1.\n Second, we form a list of jobs. A job is a dictionary containing all the information needed to process\n a molecule, that is, the callback function, its keyword arguments, and the subset of atoms that form\n the molecule.\n Third, we will process the jobs sequentially if numThreads==1 (see Snippet 20.8), and in parallel\n otherwise (see Section 20.5.2). The reason that we want the option to run jobs sequentially is for\n debugging purposes. It is not easy to catch a bug when programs are run in multiple processors.\n Once the code is debugged, we will want to use numThreads>1.\n Fourth, we stitch together the output from every molecule into a single list, series, or dataframe.\n :param func: A callback function, which will be executed in parallel\n :param pd_obj: (tuple) Element 0: The name of the argument used to pass molecules to the callback function\n Element 1: A list of indivisible tasks (atoms), which will be grouped into molecules\n :param num_threads: (int) The number of threads that will be used in parallel (one processor per thread)\n :param mp_batches: (int) Number of parallel batches (jobs per core)\n :param lin_mols: (bool) Tells if the method should use linear or nested partitioning\n :param kargs: (var args) Keyword arguments needed by func\n :return: (data frame) of results\n \"\"\"\n\n if lin_mols:\n parts = self.lin_parts(len(pd_obj[1]), num_threads * mp_batches)\n else:\n print(\"nested parts... to fix\")\n # parts = nested_parts(len(pd_obj[1]), num_threads * mp_batches)\n\n jobs = []\n for i in range(1, len(parts)):\n job = {pd_obj[0]: pd_obj[1][parts[i - 1]:parts[i]], 'func': func}\n job.update(kargs)\n jobs.append(job)\n\n if num_threads == 1:\n out = self.process_jobs_(jobs)\n else:\n out = self.process_jobs(jobs, num_threads=num_threads)\n\n if isinstance(out[0], pd.DataFrame):\n df0 = pd.DataFrame()\n elif isinstance(out[0], pd.Series):\n df0 = pd.Series()\n else:\n return out\n\n for i in out:\n df0 = df0.append(i)\n\n df0 = df0.sort_index()\n return df0\n\n # Snippet 20.8, pg 311, Single thread execution, for debugging\n def process_jobs_(self, jobs):\n \"\"\"\n # Snippet 20.8, pg 311, Single thread execution, for debugging\n Run jobs sequentially, for debugging\n \"\"\"\n out = []\n for job in jobs:\n out_ = self.expand_call(job)\n out.append(out_)\n\n return out\n\n # Snippet 20.9.2, pg 312, Example of Asynchronous call to pythons multiprocessing library\n def process_jobs(self, jobs, task=None, num_threads=24):\n \"\"\"\n Snippet 20.9.2, pg 312, Example of Asynchronous call to pythons multiprocessing library\n Run in parallel. jobs must contain a 'func' callback, for expand_call\n \"\"\"\n\n if task is None:\n task = jobs[0]['func'].__name__\n\n pool = mp.Pool(processes=num_threads)\n outputs = pool.imap_unordered(self.expand_call, jobs)\n out = []\n time0 = time.time()\n\n # Process asynchronous output, report progress\n for i, out_ in enumerate(outputs, 1):\n out.append(out_)\n self.report_progress(i, len(jobs), time0, task)\n\n pool.close()\n pool.join() # This is needed to prevent memory leaks\n return out\n\n # Snippet 20.10 Passing the job (molecule) to the callback function\n def expand_call(self, kargs):\n \"\"\"\n Snippet 20.10 Passing the job (molecule) to the callback function\n Expand the arguments of a callback function, kargs['func']\n \"\"\"\n func = kargs['func']\n del kargs['func']\n out = func(**kargs)\n return out\n\n # Snippet 20.9.1, pg 312, Example of Asynchronous call to pythons multiprocessing library\n def report_progress(self, job_num, num_jobs, time0, task):\n \"\"\"\n Snippet 20.9.1, pg 312, Example of Asynchronous call to pythons multiprocessing library\n \"\"\"\n # Report progress as asynch jobs are completed\n msg = [float(job_num) / num_jobs, (time.time() - time0) / 60.0]\n msg.append(msg[1] * (1 / msg[0] - 1))\n time_stamp = str(dt.datetime.fromtimestamp(time.time()))\n\n msg = time_stamp + ' ' + str(round(msg[0] * 100, 2)) + '% ' + task + ' done after ' + str(\n round(msg[1], 2)) + ' minutes. Remaining ' + str(round(msg[2], 2)) + ' minutes.'\n\n if job_num < num_jobs:\n sys.stderr.write(msg + '\\r')\n else:\n sys.stderr.write(msg + '\\n')\n\n # Snippet 3.3 -> 3.6 page 50, Getting the Time of the First Touch, with Meta Labels\n def get_events(self, close, t_events, pt_sl, target, min_ret, num_threads, vertical_barrier_times=False,\n side_prediction=None):\n \"\"\"\n Snippet 3.6 page 50, Getting the Time of the First Touch, with Meta Labels\n This function is orchestrator to meta-label the data, in conjunction with the Triple Barrier Method.\n :param close: (series) Close prices\n :param t_events: (series) of t_events. These are timestamps that will seed every triple barrier.\n These are the timestamps selected by the sampling procedures discussed in Chapter 2, Section 2.5.\n Eg: CUSUM Filter\n :param pt_sl: (2 element array) element 0, indicates the profit taking level; element 1 is stop loss level.\n A non-negative float that sets the width of the two barriers. A 0 value means that the respective\n horizontal barrier (profit taking and/or stop loss) will be disabled.\n :param target: (series) of values that are used (in conjunction with pt_sl) to determine the width\n of the barrier. In this program this is daily volatility series.\n :param min_ret: (float) The minimum target return required for running a triple barrier search.\n :param num_threads: (int) The number of threads concurrently used by the function.\n :param vertical_barrier_times: (series) A pandas series with the timestamps of the vertical barriers.\n We pass a False when we want to disable vertical barriers.\n :param side_prediction: (series) Side of the bet (long/short) as decided by the primary model\n :return: (data frame) of events\n -events.index is event's starttime\n -events['t1'] is event's endtime\n -events['trgt'] is event's target\n -events['side'] (optional) implies the algo's position side\n -events['pt'] Profit taking multiple\n -events['sl'] Stop loss multiple\n \"\"\"\n\n # 1) Get target\n target = target.loc[t_events]\n target = target[target > min_ret] # min_ret\n\n # 2) Get vertical barrier (max holding period)\n if vertical_barrier_times is False:\n vertical_barrier_times = pd.Series(pd.NaT, index=t_events)\n\n # 3) Form events object, apply stop loss on vertical barrier\n if side_prediction is None:\n side_ = pd.Series(1.0, index=target.index)\n pt_sl_ = [pt_sl[0], pt_sl[0]]\n else:\n side_ = side_prediction.loc[target.index] # Subset side_prediction on target index.\n pt_sl_ = pt_sl[:2]\n\n # Create a new df with [v_barrier, target, side] and drop rows that are NA in target\n events = pd.concat({'t1': vertical_barrier_times, 'trgt': target, 'side': side_}, axis=1)\n events = events.dropna(subset=['trgt'])\n\n # Apply Triple Barrier\n first_touch_dates = self.mp_pandas_obj(func=self.apply_pt_sl_on_t1,\n pd_obj=('molecule', events.index),\n num_threads=num_threads,\n close=close,\n events=events,\n pt_sl=pt_sl_)\n\n events['t1'] = first_touch_dates.dropna(how='all').min(axis=1) # pd.min ignores nan\n\n if side_prediction is None:\n events = events.drop('side', axis=1)\n\n # Add profit taking and stop loss multiples for vertical barrier calculations\n events['pt'] = pt_sl[0]\n events['sl'] = pt_sl[1]\n\n return events\n\n # Snippet 3.9, pg 55, Question 3.3\n def barrier_touched(self, out_df, events):\n \"\"\"\n Snippet 3.9, pg 55, Question 3.3\n Adjust the getBins function (Snippet 3.7) to return a 0 whenever the vertical barrier is the one touched first.\n Top horizontal barrier: 1\n Bottom horizontal barrier: -1\n Vertical barrier: 0\n :param out_df: (DataFrame) containing the returns and target\n :param events: (DataFrame) The original events data frame. Contains the pt sl multiples needed here.\n :return: (DataFrame) containing returns, target, and labels\n \"\"\"\n store = []\n for date_time, values in out_df.iterrows():\n ret = values['ret']\n target = values['trgt']\n\n pt_level_reached = ret > target * events.loc[date_time, 'pt']\n sl_level_reached = ret < -target * events.loc[date_time, 'sl']\n\n if ret > 0.0 and pt_level_reached:\n # Top barrier reached\n store.append(1)\n elif ret < 0.0 and sl_level_reached:\n # Bottom barrier reached\n store.append(-1)\n else:\n # Vertical barrier reached\n store.append(0)\n\n # Save to 'bin' column and return\n out_df['bin'] = store\n return out_df\n\n # Snippet 3.4 -> 3.7, page 51, Labeling for Side & Size with Meta Labels\n def get_bins(self, triple_barrier_events, close):\n \"\"\"\n Snippet 3.7, page 51, Labeling for Side & Size with Meta Labels\n Compute event's outcome (including side information, if provided).\n events is a DataFrame where:\n Now the possible values for labels in out['bin'] are {0,1}, as opposed to whether to take the bet or pass,\n a purely binary prediction. When the predicted label the previous feasible values {−1,0,1}.\n The ML algorithm will be trained to decide is 1, we can use the probability of this secondary prediction\n to derive the size of the bet, where the side (sign) of the position has been set by the primary model.\n :param triple_barrier_events: (data frame)\n -events.index is event's starttime\n -events['t1'] is event's endtime\n -events['trgt'] is event's target\n -events['side'] (optional) implies the algo's position side\n Case 1: ('side' not in events): bin in (-1,1) <-label by price action\n Case 2: ('side' in events): bin in (0,1) <-label by pnl (meta-labeling)\n :param close: (series) close prices\n :return: (data frame) of meta-labeled events\n \"\"\"\n\n # 1) Align prices with their respective events\n events_ = triple_barrier_events.dropna(subset=['t1'])\n all_dates = events_.index.union(other=events_['t1'].values).drop_duplicates()\n prices = close.reindex(all_dates, method='bfill')\n\n # 2) Create out DataFrame\n out_df = pd.DataFrame(index=events_.index)\n # Need to take the log returns, else your results will be skewed for short positions\n out_df['ret'] = np.log(prices.loc[events_['t1'].values].values) - np.log(prices.loc[events_.index])\n out_df['trgt'] = events_['trgt']\n\n # Meta labeling: Events that were correct will have pos returns\n if 'side' in events_:\n out_df['ret'] = out_df['ret'] * events_['side'] # meta-labeling\n\n # Added code: label 0 when vertical barrier reached\n out_df = self.barrier_touched(out_df, triple_barrier_events)\n\n # Meta labeling: label incorrect events with a 0\n if 'side' in events_:\n out_df.loc[out_df['ret'] <= 0, 'bin'] = 0\n\n # Transform the log returns back to normal returns.\n out_df['ret'] = np.exp(out_df['ret']) - 1\n\n # Add the side to the output. This is useful for when a meta label model must be fit\n tb_cols = triple_barrier_events.columns\n if 'side' in tb_cols:\n out_df['side'] = triple_barrier_events['side']\n\n return out_df\n\n\ndef get_side(data):\n fast_window = fast\n slow_window = slow\n\n data['fast_mavg'] = data['close'].rolling(window=fast_window, min_periods=fast_window, center=False).mean()\n data['slow_mavg'] = data['close'].rolling(window=slow_window, min_periods=slow_window, center=False).mean()\n data.head()\n\n # Compute sides\n data['side'] = np.nan\n\n long_signals = data['fast_mavg'] >= data['slow_mavg']\n short_signals = data['fast_mavg'] < data['slow_mavg']\n data.loc[long_signals, 'side'] = 1\n data.loc[short_signals, 'side'] = -1\n\n # Remove Look ahead biase by lagging the signal\n data['side'] = data['side'].shift(1)\n\n return data\n\n\ndef get_indicators(raw_data):\n # Log Returns\n raw_data['log_ret'] = np.log(raw_data['close']).diff()\n\n # Momentum\n raw_data['mom1'] = raw_data['close'].pct_change(periods=1)\n raw_data['mom2'] = raw_data['close'].pct_change(periods=2)\n raw_data['mom3'] = raw_data['close'].pct_change(periods=3)\n raw_data['mom4'] = raw_data['close'].pct_change(periods=4)\n raw_data['mom5'] = raw_data['close'].pct_change(periods=5)\n\n # Volatility\n raw_data['volatility_50'] = raw_data['log_ret'].rolling(window=50, min_periods=50, center=False).std()\n raw_data['volatility_31'] = raw_data['log_ret'].rolling(window=31, min_periods=31, center=False).std()\n raw_data['volatility_15'] = raw_data['log_ret'].rolling(window=15, min_periods=15, center=False).std()\n\n # Serial Correlation (Takes about 4 minutes)\n # GBM data is lack of serial correlation, thus disabled\n\n window_autocorr = 50\n\n raw_data['autocorr_1'] = raw_data['log_ret'].rolling(window=window_autocorr, min_periods=window_autocorr,\n center=False).apply(lambda x: x.autocorr(lag=1), raw=False)\n raw_data['autocorr_2'] = raw_data['log_ret'].rolling(window=window_autocorr, min_periods=window_autocorr,\n center=False).apply(lambda x: x.autocorr(lag=2), raw=False)\n raw_data['autocorr_3'] = raw_data['log_ret'].rolling(window=window_autocorr, min_periods=window_autocorr,\n center=False).apply(lambda x: x.autocorr(lag=3), raw=False)\n raw_data['autocorr_4'] = raw_data['log_ret'].rolling(window=window_autocorr, min_periods=window_autocorr,\n center=False).apply(lambda x: x.autocorr(lag=4), raw=False)\n raw_data['autocorr_5'] = raw_data['log_ret'].rolling(window=window_autocorr, min_periods=window_autocorr,\n center=False).apply(lambda x: x.autocorr(lag=5), raw=False)\n\n # Get the various log -t returns\n raw_data['log_t1'] = raw_data['log_ret'].shift(1)\n raw_data['log_t2'] = raw_data['log_ret'].shift(2)\n raw_data['log_t3'] = raw_data['log_ret'].shift(3)\n raw_data['log_t4'] = raw_data['log_ret'].shift(4)\n raw_data['log_t5'] = raw_data['log_ret'].shift(5)\n\n return raw_data\n\n\n# source folders\nmy_dir = os.getcwd()\nticks_folder = os.path.join(my_dir, \"data/5_AdjTicks\")\n\n# destination folder / path to files with dollar bars\ndollar_bars_folder = os.path.join(my_dir, \"data/6_DollarBars\")\nif os.path.basename(dollar_bars_folder) not in os.listdir(os.path.dirname(dollar_bars_folder)):\n os.mkdir(dollar_bars_folder)\n\n# destination folder / path to files with indicators\nindicators_folder = os.path.join(my_dir, \"data/7_Indicators\")\nif os.path.basename(indicators_folder) not in os.listdir(os.path.dirname(indicators_folder)):\n os.mkdir(indicators_folder)\n\nkeys = [key[:4] for key in os.listdir(ticks_folder) if not key.startswith(\".\")]\nprint(keys)\n\n# Input parameters\n\nest_ticks = 10 # per day\n\n# vertical_barrier_days = 5 # days\n\n# the following parameters need to be adjusted for particular case\npt_sl = [1, 2]\n# min_ret = 1 / 100 # triple_barrier_boundary\n\n# sma\nfast = 20\nslow = 50\n\nget_dollar_bars_file_name = lambda key, est_ticks: f\"{key}_{str(est_ticks)}_dollar_bars.csv\"\n\nfor key in keys:\n\n ticks_file = [f for f in os.listdir(ticks_folder) if key in f][0]\n ticks_file_path = os.path.join(ticks_folder, ticks_file)\n\n dollar_bars_path = os.path.join(dollar_bars_folder, get_dollar_bars_file_name(key, est_ticks))\n\n if os.path.basename(dollar_bars_path) not in os.listdir(os.path.dirname(dollar_bars_path)):\n # indicators_path = os.path.join(indicators_folder, (key+ '_indicators.csv'))\n\n # Select DollarBar size\n ticks = pd.read_parquet(ticks_file_path)\n\n # In[6]:\n\n # overall traded volume\n N = ticks[['price', 'volume']].prod(axis=1).sum()\n\n # number of days traded\n D = np.unique(ticks.date_time.values.astype('M8[D]')).shape[0]\n\n # estimated threshold wrt estimated dayly amount of ticks\n threshold = np.round((N / D) / est_ticks)\n print('Creating Dollar Bars for ', key)\n print(\"N of ticks: \", N, \"trading days: \", D, \"dollars of trade in dollar bar: \", threshold)\n\n # ## Create dollar bars\n dollar = ml.data_structures.get_dollar_bars(ticks_file_path,\n threshold=threshold, batch_size=5000000,\n verbose=True, to_csv=True,\n output_path=dollar_bars_path)\n\n# get_indicators_name = lambda key, vbd, minret, fast, slow: f\"{key}_{str(vbd)}_{str(minret * 1000)}_{str(fast)}-{str(slow)}_ind.csv\"\n\n\nfor vertical_barrier_days in [1, 3, 5, 7, 10, 15, 20, 25, 35, 50, 70]:\n for min_ret in [.1, .5, 1, 1.5, 2, 3]:\n min_ret *= .01\n for fast, slow in [(20, 50)]: # , (50, 200)\n\n for key in keys:\n dollar_bars_path = os.path.join(dollar_bars_folder, get_dollar_bars_file_name(key, est_ticks))\n indicators_path = os.path.join(indicators_folder,\n f\"{key}_{str(vertical_barrier_days)}_{str(min_ret * 1000)}_{str(fast)}-{str(slow)}_ind.csv\")\n\n if os.path.basename(indicators_path) not in os.listdir(os.path.dirname(indicators_path)):\n data = pd.read_csv(dollar_bars_path, index_col=0, parse_dates=True)\n print(\"data shape for \", key, \" - \", data.shape)\n\n # data heads: ['open', 'high', 'low', 'close'] ?? cum_vol cum_dollar cum_ticks\n ############ get indicators:###########################################\n data = get_side(data)\n\n ################## build bins ###################################\n # Save the raw data\n raw_data = data.copy()\n\n # Drop the NaN values from our data set\n data.dropna(axis=0, how='any', inplace=True)\n\n trplbr = TrippleBarrier()\n\n # Compute daily volatility\n daily_vol = trplbr.get_daily_vol(close=data['close'], lookback=50)\n\n # Apply Symmetric CUSUM Filter and get timestamps for events\n # Note: Only the CUSUM filter needs a point estimate for volatility\n cusum_events = trplbr.cusum_filter(data['close'], threshold=daily_vol.mean() * 0.5)\n\n # Compute vertical barrier\n vertical_barriers = trplbr.add_vertical_barrier(t_events=cusum_events, close=data['close'],\n num_days=vertical_barrier_days)\n\n # the following parameters need to be adjusted for particular case\n # pt_sl = [1, 2]\n # min_ret = 0.0005\n triple_barrier_events = trplbr.get_events(close=data['close'],\n t_events=cusum_events,\n pt_sl=pt_sl,\n target=daily_vol,\n min_ret=min_ret,\n num_threads=3,\n vertical_barrier_times=vertical_barriers,\n side_prediction=data['side'])\n\n # labels = ml.labeling.get_bins(triple_barrier_events, data['close'])\n labels = trplbr.get_bins(triple_barrier_events, data['close'])\n\n print(\"shape of labels :\", labels.shape)\n\n ###################### get other indicators ####################################\n if 'volatility_15' not in raw_data.index:\n raw_data = get_indicators(raw_data)\n\n #### Now get the data at the specified events\n\n df = pd.concat([raw_data, labels], axis=1, sort=False)\n\n df[~df.slow_mavg.isna()].to_csv(indicators_path)\n","sub_path":"src/tick_data_strategies/5_dollar_bars_triple_barrier_indicators_multi.py","file_name":"5_dollar_bars_triple_barrier_indicators_multi.py","file_ext":"py","file_size_in_byte":35430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"410513124","text":"from google.appengine.ext import webapp\nimport controllers\nimport re\nimport logging\n\nclass RestHandler(webapp.RequestHandler):\n def get(self):\n self.dispatch_request()\n \n def put(self):\n self.dispatch_request()\n \n def post(self):\n self.dispatch_request()\n \n def delete(self):\n self.dispatch_request()\n\n resource_map = {\n 'sets': controllers.SetsController,\n 'cards': controllers.CardsController\n }\n\n def dispatch_request(self):\n http_method = self.request.method\n if http_method == 'POST' and self.request.get('_method'):\n http_method = self.request.get('_method')\n\n match = re.match(r'^/api/(?P\\w+)/(?P\\w+)?', self.request.path)\n resource_name = match.group('resource') \n resource_key = match.group('key')\n controller = self.resource_map[resource_name](self.request)\n\n if http_method == 'GET':\n if resource_key:\n meth = lambda : controller.show(resource_key)\n else:\n meth = controller.index\n\n if http_method == 'POST':\n meth = controller.create\n \n if http_method == 'PUT':\n meth = lambda : controller.update(resource_key)\n \n if http_method == 'DELETE':\n meth = lambda : controller.delete(resource_key)\n\n resp = meth()\n self.response.out.write(resp)\n\n","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"426062971","text":"from main.decorators import render_to\nfrom portfolio.models import Project\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.conf import settings\n\ntdir = 'portfolio/project'\n\n\n@render_to('list.html', tdir)\ndef list(request, **params):\n from portfolio.services import ProjectListService\n\n service = ProjectListService(request, params)\n projects, project_paginator = service.getData()\n return {\n 'projects': projects,\n 'project_paginator': project_paginator,\n 'page_title': settings.SITE_NAME + ' | Portfolio',\n 'metakeywords': '',\n 'metadescription': ''\n }\n\n\n@render_to('_items.html', tdir)\ndef items(request):\n from portfolio.services import ProjectListService\n\n service = ProjectListService(request, request.GET)\n projects, project_paginator = service.getData()\n\n return {\n 'projects': projects,\n 'project_paginator': project_paginator\n }\n\n\n@render_to('view.html', tdir)\ndef view(request, id):\n project = Project.objects.get(pk=id)\n return {\n 'project': project,\n 'page_title': settings.SITE_NAME + ' | ' + project.title,\n 'metakeywords': '',\n 'metadescription': ''\n }","sub_path":"apps/portfolio/views/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"248321270","text":"\"\"\"\nExamples corresponding to sandbox.stats.multicomp\n\"\"\"\nimport numpy as np\nfrom scipy import stats\n\nfrom statsmodels.compat.python import lzip\nfrom statsmodels.iolib.table import SimpleTable\nfrom statsmodels.stats.multitest import multipletests, _ecdf as ecdf\n\nfrom statsmodels.sandbox.stats.multicomp import (\n maxzero, maxzerodown, fdrcorrection_bak\n)\n\n\ndef example_fdr_bonferroni():\n x1 = [1, 1, 1, 0, -1, -1, -1, 0, 1, 1, -1, 1]\n print(lzip(np.arange(len(x1)), x1))\n print(maxzero(x1))\n # Expected output from these last two prints:\n # [(0, 1), (1, 1), (2, 1), (3, 0), (4, -1), (5, -1), (6, -1), (7, 0), \\\n # (8, 1), (9, 1), (10, -1), (11, 1)]\n # (11, array([ 3, 7, 11]))\n\n print(maxzerodown(-np.array(x1)))\n\n locs = np.linspace(0, 1, 10)\n locs = np.array([0.]*6 + [0.75]*4)\n rvs = locs + stats.norm.rvs(size=(20, 10))\n tt, tpval = stats.ttest_1samp(rvs, 0)\n tpval_sortind = np.argsort(tpval)\n tpval_sorted = tpval[tpval_sortind]\n\n reject = tpval_sorted < ecdf(tpval_sorted)*0.05\n reject2 = max(np.nonzero(reject))\n print(reject)\n print(reject2)\n\n res = np.array(lzip(np.round(rvs.mean(0), 4),\n np.round(tpval, 4),\n reject[tpval_sortind.argsort()]),\n dtype=[('mean', float),\n ('pval', float),\n ('reject', np.bool8)])\n print(SimpleTable(res, headers=res.dtype.names))\n print(fdrcorrection_bak(tpval, alpha=0.05))\n print(reject)\n\n print('\\nrandom example')\n print('bonf', multipletests(tpval, alpha=0.05, method='bonf'))\n print('sidak', multipletests(tpval, alpha=0.05, method='sidak'))\n print('hs', multipletests(tpval, alpha=0.05, method='hs'))\n print('sh', multipletests(tpval, alpha=0.05, method='sh'))\n pvals = np.array([0.002, 0.0045, 0.006, 0.008, 0.0085,\n 0.009, 0.0175, 0.025, 0.1055, 0.535])\n print('\\nexample from lecture notes')\n for meth in ['bonf', 'sidak', 'hs', 'sh']:\n print(meth)\n print(multipletests(pvals, alpha=0.05, method=meth))\n","sub_path":"site-packages/statsmodels/sandbox/stats/ex_multicomp.py","file_name":"ex_multicomp.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"187731034","text":"x = int(input(\"Adj meg egy számot: \"))\nresult = 0\nfor result in range(0, abs(x)+1):\n if result**3 == abs(x):\n break\nif result**3 != abs(x):\n print(\"A számnak nincs egész köbgyöke.\")\nelse:\n if x < 0:\n result = - result\n print(x,\" köbgyöke \",result)\n","sub_path":"week-5/thursday/pld9.py","file_name":"pld9.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"270906586","text":"\n\nfrom xai.brain.wordbase.verbs._emote import _EMOTE\n\n#calss header\nclass _EMOTING(_EMOTE, ):\n\tdef __init__(self,): \n\t\t_EMOTE.__init__(self)\n\t\tself.name = \"EMOTING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"emote\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_emoting.py","file_name":"_emoting.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"281055166","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 19 13:18:54 2021\n\n@author: shubham.sharma1\n\"\"\"\n\n\nfrom flask import Flask, request\napp = Flask(__name__)\n\n\n\n@app.route('/enc/',methods=['GET'])\ndef he(s):\n a = ''\n n = int(request.args.get('n'))\n for i in s:\n x = chr(ord(i) + n)\n a = a + x\n print(a)\n return(a)\n\n\n\nif __name__ == '__main__':\n app.run(debug=False, host='0.0.0.0')","sub_path":"basics/json.py","file_name":"json.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"91885576","text":"#!/usr/bin/python\nimport sys\nword=sys.argv[1]\ndef vowelcounter(word):\n\tword=word.lower()\n\tvowels=['a','e','i','o','u']\n\tfor i in vowels:\n\t\tif i in word:\n\t\t\tcount=word.count(i)\n\t\t\tprint (i,count)\nprint(\"Now printing number of vowels\")\nvowelcounter(word)\n","sub_path":"vowelcounting.py","file_name":"vowelcounting.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"525298534","text":"\n# -*- coding:utf-8 -*-\n\n# производная неявной функции\n\n# вычислить производную функции x**2 + 2xy + 2y**2 = 1\n\nfrom sympy import*\n\nx = symbols('x')\ny = Function('y')(x)\nz = symbols('z')\n\neq = x**2 + 2*x*y + 2*y**2 - 1\n\n# Make a differential equation\nd_eq = diff(eq,x)\npprint (d_eq)\n\n","sub_path":"2017/Sympy/производная_неявной_функции/proizvonaya_neyavnoy_1.py","file_name":"proizvonaya_neyavnoy_1.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"178883318","text":"import numpy as np\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nfrom rdkit.Chem import DataStructs\nfrom mol2vec.features import mol2alt_sentence, MolSentence, DfVec, sentences2vec, mol2sentence\nfrom mol2vec.helpers import depict_identifier, mol_to_svg, IdentifierTable, plot_2D_vectors\n\n#from rdkit.Chem import AllChem as Chem\nfrom rdkit.Chem.Draw import IPythonConsole\nfrom rdkit.Chem import PandasTools\nfrom rdkit.Chem import Draw\nfrom rdkit.Chem import rdDepictor\nfrom rdkit.Chem.Draw import rdMolDraw2D\nfrom IPython.core.display import HTML\nfrom IPython.display import SVG\nIPythonConsole.ipython_useSVG=True\nimport re\n\n\n\n\ndef get_ordered_fingerprint_string_new(SMILE, radius = 2):\n\n try:\n fingerprints=mol2alt_sentence(Chem.MolFromSmiles(SMILE), radius)\n return ' '.join([str(i) for i in list(fingerprints)])\n \n except Exception as e:\n \n print(e)\n print(SMILE)\n return '' \n \ndef get_lenfingerprint(SMILE, radius = 2):\n\n try:\n return len(set(mol2alt_sentence(Chem.MolFromSmiles(SMILE), radius)))\n \n except Exception as e:\n \n print(e)\n print(SMILE)\n return ''\n\ndef get_lenfingerprint2(SMILE, radius = 2):\n\n try:\n return len(AllChem.GetMorganFingerprint(Chem.MolFromSmiles(SMILE),radius,bitInfo=info).GetNonzeroElements())\n except Exception as e:\n \n print(e)\n print(SMILE)\n return ''\n \n \n# explicit representation\ndef get_binary_representation(SMILE, radius = 2, nBits=2048):\n try:\n array = np.zeros((0, ))\n DataStructs.ConvertToNumpyArray(AllChem.GetMorganFingerprintAsBitVect(Chem.MolFromSmiles(SMILE),radius, nBits), array)\n return array\n \n except Exception as e:\n \n print(e)\n print(SMILE)\n return ''\n \ndef get_count_representation(SMILE, radius = 2, nBits=2048):\n try:\n array = np.zeros((0, ))\n DataStructs.ConvertToNumpyArray(AllChem.GetHashedMorganFingerprint(Chem.MolFromSmiles(SMILE),radius, nBits), array)\n return array\n \n except Exception as e:\n \n print(e)\n print(SMILE)\n return ''\n \n \ndef get_bit_info(SMILE, radius = 2, nBits=2048): \n try:\n bi = {}\n fp = AllChem.GetMorganFingerprintAsBitVect(Chem.MolFromSmiles(SMILE), radius, nBits,bitInfo=bi)\n return bi\n\n except Exception as e:\n print(e)\n print(SMILE)\n return '' \n \n\n \n \ndef get_smile_fragment(SMILE, atomid, radius): \n mol=Chem.MolFromSmiles(SMILE)\n \n \n if radius >0:\n env = Chem.FindAtomEnvironmentOfRadiusN(mol,radius,atomid)\n atoms=set()\n for bidx in env:\n atoms.add(mol.GetBondWithIdx(bidx).GetBeginAtomIdx())\n atoms.add(mol.GetBondWithIdx(bidx).GetEndAtomIdx())\n \n return Chem.MolFragmentToSmiles(mol,atomsToUse=list(atoms),bondsToUse=env,rootedAtAtom=atomid)\n else:\n return '['+mol.GetAtomWithIdx(atomid).GetSymbol()+']'\n \n \n\n# from rdkit blog\n\n\ndef includeRingMembership(s, n):\n r=';R]'\n d=\"]\"\n return r.join([d.join(s.split(d)[:n]),d.join(s.split(d)[n:])])\n \ndef includeDegree(s, n, d):\n r=';D'+str(d)+']'\n d=\"]\"\n return r.join([d.join(s.split(d)[:n]),d.join(s.split(d)[n:])])\n \ndef writePropsToSmiles(mol,smi,order):\n #finalsmi = copy.deepcopy(smi)\n finalsmi = smi\n for i,a in enumerate(order):\n atom = mol.GetAtomWithIdx(a)\n if atom.IsInRing():\n finalsmi = includeRingMembership(finalsmi, i+1)\n finalsmi = includeDegree(finalsmi, i+1, atom.GetDegree())\n return finalsmi\n\n \ndef getSubstructSmi(SMILE,atomID,radius):\n mol=Chem.MolFromSmiles(SMILE)\n \n if radius>0:\n env = Chem.FindAtomEnvironmentOfRadiusN(mol,radius,atomID)\n atomsToUse=[]\n for b in env:\n atomsToUse.append(mol.GetBondWithIdx(b).GetBeginAtomIdx())\n atomsToUse.append(mol.GetBondWithIdx(b).GetEndAtomIdx())\n atomsToUse = list(set(atomsToUse))\n else:\n atomsToUse = [atomID]\n env=None\n smi = Chem.MolFragmentToSmiles(mol,atomsToUse,bondsToUse=env,allHsExplicit=True, allBondsExplicit=True, rootedAtAtom=atomID)\n order = eval(mol.GetProp(\"_smilesAtomOutputOrder\"))\n smi2 = writePropsToSmiles(mol,smi,order)\n return smi,smi2\n\n\ndef _prepareMol(mol,kekulize):\n mc = Chem.Mol(mol.ToBinary())\n if kekulize:\n try:\n Chem.Kekulize(mc)\n except:\n mc = Chem.Mol(mol.ToBinary())\n if not mc.GetNumConformers():\n rdDepictor.Compute2DCoords(mc)\n return mc\ndef moltosvg(mol,molSize=(450,200),kekulize=True,drawer=None,**kwargs):\n mc = _prepareMol(mol,kekulize)\n if drawer is None:\n drawer = rdMolDraw2D.MolDraw2DSVG(molSize[0],molSize[1])\n drawer.DrawMolecule(mc,**kwargs)\n drawer.FinishDrawing()\n svg = drawer.GetDrawingText()\n # It seems that the svg renderer used doesn't quite hit the spec.\n # Here are some fixes to make it work in the notebook, although I think\n # the underlying issue needs to be resolved at the generation step\n return SVG(svg.replace('svg:',''))\n\n\n# do a depiction where the atom environment is highlighted normally and the central atom\n# is highlighted in blue\ndef getSubstructDepiction(mol,atomID,radius,molSize=(450,200)):\n if radius>0:\n env = Chem.FindAtomEnvironmentOfRadiusN(mol,radius,atomID)\n atomsToUse=[]\n for b in env:\n atomsToUse.append(mol.GetBondWithIdx(b).GetBeginAtomIdx())\n atomsToUse.append(mol.GetBondWithIdx(b).GetEndAtomIdx())\n atomsToUse = list(set(atomsToUse)) \n else:\n atomsToUse = [atomID]\n env=None\n return moltosvg(mol,molSize=molSize,highlightAtoms=atomsToUse,highlightAtomColors={atomID:(0.3,0.3,1)})\n\ndef depictBit(bitId,SMILE,molSize=(450,200),radius = 2, nBits=2048):\n mol=Chem.MolFromSmiles(SMILE)\n info={}\n fp = AllChem.GetMorganFingerprintAsBitVect(mol,radius, nBits,bitInfo=info)\n aid,rad = info[bitId][0]\n return getSubstructDepiction(mol,aid,rad,molSize=molSize)\n\ndef depictBit_index(bitId,SMILE,molSize=(450,200),radius = 2, nBits=2048, index=0):\n mol=Chem.MolFromSmiles(SMILE)\n info={}\n fp = AllChem.GetMorganFingerprintAsBitVect(mol,radius, nBits,bitInfo=info)\n aid,rad = info[bitId][index]\n return getSubstructDepiction(mol,aid,rad,molSize=molSize)\n\n\ndef NumberAtomsInFragment(SMILE):\n return len(re.sub('[hH]','', \"\".join(re.findall(\"[a-zA-Z]+\", SMILE))))","sub_path":"code/functions_structural_similarity.py","file_name":"functions_structural_similarity.py","file_ext":"py","file_size_in_byte":6582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"364191090","text":"# Libraries\r\nimport pandas as pd\r\nfrom collections import OrderedDict\r\nfrom Recommendation import AnimeRecommendation\r\nfrom flask import make_response, jsonify\r\nfrom model import Ratings\r\n\r\n# Load the Dataset\r\ndataframe = pd.read_csv('data/cleaned_anime_data.csv')\r\ndataframe = dataframe.reset_index()\r\ndataframe = dataframe.drop('index', axis = 1)\r\n\r\n# Initialize the Class\r\nanime = AnimeRecommendation(dataframe)\r\n\r\n# Get the Mappings\r\nindices = anime.getMapping()\r\n\r\n# Get Similarity Matrix\r\nsimMatrix = anime.getSimilartiyMatrix()\r\n\r\n# Create a Ratings\r\nratings = Ratings()\r\n\r\ndef homePage():\r\n\t'''\r\n\t\t:return:\r\n\r\n\t'''\r\n\r\n\thomepage_animes = OrderedDict([('animes', list())])\r\n\r\n\ttry:\r\n\t\tanime_idxs = anime.getAnimeSample()\r\n\r\n\t\tfor idx in anime_idxs:\r\n\t\t\thomepage_animes['animes'].append(anime.build_AnimeDict(idx))\r\n\r\n\t\treturn homepage_animes\r\n\texcept Exception as e:\r\n\t\treturn make_response(jsonify({'Success': False}), 404)\r\n\r\n\r\ndef returnRecommended(anime_name):\r\n\t'''\r\n\t\tParameters:\r\n\t\t\tanime_name: Name of the Anime to get Recommendation.\r\n\r\n\t\t:return:\r\n\t\t\tJSON Formatted response of recommended anime.\r\n\t'''\r\n\r\n\trecommended_animes = OrderedDict()\r\n\r\n\trecommended_animes['input'] = list()\r\n\trecommended_animes['output'] = OrderedDict()\r\n\trecommended_animes['output']['animes'] = list()\r\n\r\n\tanime_name = anime_name.lower()\r\n\r\n\tanime_Idx = anime.getID(anime_name)#dataframe[dataframe.Title.str.lower().str.contains(anime_name)]['Anime_ID'].values\r\n\r\n\ttry:\r\n\t\tif len(anime_Idx) > 1:\r\n\t\t\tfor idx in anime_Idx:\r\n\t\t\t\trecommended_animes['output']['animes'].append(anime.build_AnimeDict(idx))\r\n\r\n\t\telse:\r\n\t\t\t# Get Anime ID\r\n\t\t\tanime_id = anime.getID(anime_name)[0]\r\n\t\t\trecommended_animes['input'].append(anime.build_AnimeDict(anime_id))\r\n\r\n\t\t\tg = anime.getRecommendation(anime_id, simMatrix, indices)\r\n\r\n\t\t\tfor idx in g:\r\n\t\t\t\trecommended_animes['output']['animes'].append(anime.build_AnimeDict(idx))\r\n\r\n\t\treturn recommended_animes\r\n\texcept Exception as e:\r\n\t\treturn make_response(jsonify({'Success': False}), 404)\r\n\r\ndef readGenre(genre):\r\n\t'''\r\n\t\tParameters:\r\n\t\t\tgenre: One of the Genres.\r\n\r\n\t\t:return:\r\n\t\t\tJSON Formatted reponse with animes.\r\n\t'''\r\n\tanimes_by_genre = OrderedDict([\r\n\t\t('output', OrderedDict([\r\n\t\t\t('animes', list())\r\n\t\t]))\r\n\t])\r\n\tgenre = genre.lower()\r\n\r\n\ttry:\r\n\t\tanime_idxs = anime.getAnime_byGenre(genre)\r\n\r\n\t\tfor idx in anime_idxs:\r\n\t\t\tanimes_by_genre['output']['animes'].append(anime.build_AnimeDict(idx))\r\n\r\n\t\treturn animes_by_genre\r\n\texcept Exception as e:\r\n\t\treturn make_response(jsonify({'Success': False}), 404)\r\n\r\ndef createRatings(anime_ratings):\r\n\t'''\r\n\t\tCreate Ratings for the given Anime and recommended Anime.\r\n\t\t\t- 1: Both are Similar\r\n\t\t\t- 0: Not Similar\r\n\r\n\t\tParameters:\r\n\t\t\tanime_ratings: JSON Formatted Data with\r\n\t\t\t\t\t\t\t- Name of the given Anime\r\n\t\t\t\t\t\t\t- Name of the recommended Anime\r\n\t\t\t\t\t\t\t- Rating (1 or 0).\r\n\r\n\t\t:return:\r\n\t\t\t201 Succes or 400 Error\r\n\t'''\r\n\r\n\tmain_anime = anime_ratings.get('main_anime_name', None)\r\n\trecomm_anime = anime_ratings.get('recomm_anime_name', None)\r\n\trating = anime_ratings.get('rating', None)\r\n\r\n\ttry:\r\n\t\t# Get the ID\r\n\t\tidx_1 = anime.getID(main_anime.lower())[0]\r\n\t\tidx_2 = anime.getID(recomm_anime.lower())[0]\r\n\r\n\t\tcounts = ratings.addRating(rating_data = (idx_1, idx_2, rating))\r\n\r\n\t\tif counts == 10:\r\n\t\t\tratings.saveRating()\r\n\t\t\tratings.__reset__()\r\n\r\n\t\treturn 201\r\n\texcept Exception as e:\r\n\t\treturn make_response(jsonify({'Success': False}), 400)\r\n","sub_path":"Anime.py","file_name":"Anime.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"17466777","text":"import sys\n\n\ndef main():\n for arg in sys.argv[1:]:\n file_name = arg\n\n file = open(file_name, 'r')\n content = file.read().split('\\n')\n\n for line in content:\n print(line)\n\n file.close()\n\nif __name__ == '__main__':\n main()\n","sub_path":"solutions3/cat2.py","file_name":"cat2.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"462599476","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef main():\n n_samples = 500\n data1 = np.random.multivariate_normal(mean=np.array([0, 0]),\n cov=np.array([[1, 0.25],\n [0.25, 0.05]]),\n size=n_samples)\n data2 = np.random.multivariate_normal(mean=np.array([0.7, 1]),\n cov=np.array([[0.1, -0.25],\n [-0.25, 1]]),\n size=n_samples)\n data3 = np.random.multivariate_normal(mean=np.array([-2, 4]),\n cov=np.array([[0.2, 0.25],\n [0.25, 1]]),\n size=n_samples)\n\n data4 = np.array([[-1, 1],\n [-3, -2],\n [3, 3],\n [3, -2],\n ])\n data = np.concatenate((data1, data2, data3, data4), axis=0)\n\n\n distances = get_distances(data=data)\n\n k = 4\n n_samples = distances.shape[0]\n local_outlier_factors = np.zeros(shape=n_samples)\n for index_sample in range(0, n_samples):\n local_outlier_factors[index_sample] = get_local_outlier_factor(distances=distances, index_query=index_sample,\n k=k)\n\n # Make figure\n plt.figure(figsize=(16, 9))\n plt.scatter(data[:, 0], data[:, 1], s=1)\n for index_sample in range(n_samples):\n threshold = 3.5\n if local_outlier_factors[index_sample] > threshold:\n color = (1, 0, 0)\n else:\n color = (0.4, 0.4, 0.8)\n plt.text(data[index_sample, 0], data[index_sample, 1],\n f' {local_outlier_factors[index_sample]:.2f}',\n fontsize=8,\n color=color)\n plt.show()\n print('\\nScript finished')\n return\n\ndef get_local_outlier_factor(distances, index_query, k):\n \"\"\"Computes the local outlier factor according to E.Alaydin (2014), section 8.7\"\"\"\n # Get index of index_query's k nearest neighbors\n indexes_knn = get_indexes_of_k_nearest_neighbors(index_query=index_query, k=k, distances=distances)\n\n # For each neighbor (specified by indexes_knn), compute the distance to its respective k-nearest neighbor and store\n # the result in distances_knn\n distances_knn = np.zeros(shape=k)\n for i, index_neighbor in enumerate(indexes_knn, 0):\n indexes_knn_current_neighbor = get_indexes_of_k_nearest_neighbors(index_query=index_neighbor, k=k,\n distances=distances)\n distances_knn[i] = distances[index_neighbor, indexes_knn_current_neighbor[-1]]\n\n # Compute local outlier factor of sample index_query. Compare to equation (8.23) on p.200 in E.Alpaydin (2014)\n return distances[index_query, indexes_knn[-1]] / np.mean(distances_knn)\n\n\ndef get_distances(data, method='euclidean'):\n n_samples = data.shape[0]\n distances = np.zeros((n_samples, n_samples))\n for i in range(0, n_samples):\n for j in range(i, n_samples):\n if method == 'euclidean':\n distances[i, j] = np.sqrt(np.sum((data[i, :] - data[j, :]) ** 2))\n distances[j, i] = distances[i, j]\n return distances\n\n\ndef get_distance_to_k_nearest_neighbor(index_query, k, distances):\n \"\"\"Get distance to k-nearest neighbor\"\"\"\n indexes_sorted = np.argsort(distances[index_query, :])\n index_knn = indexes_sorted[k+1]\n return distances[index_query, index_knn]\n\n\ndef get_indexes_of_k_nearest_neighbors(index_query, k, distances):\n \"\"\"Get indexes of k-nearest neighbors\"\"\"\n indexes_sorted = np.argsort(distances[index_query, :])\n return indexes_sorted[1: k+1]\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pystatistics/outlier_detection/local_outlier_factor.py","file_name":"local_outlier_factor.py","file_ext":"py","file_size_in_byte":3927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"179805798","text":"#!/usr/bin/env python3\n\nimport re, os, sys, random\nfrom urllib import request\n\nBASE = 'https://www.simonstalenhag.se/'\nIMAGES_DIR = os.path.expanduser('~/Pictures/Stålenhag/')\n\ndef check_dirs():\n if not os.path.isdir(IMAGES_DIR):\n os.mkdir(IMAGES_DIR[:-1])\n\ndef local_exists(filename):\n return os.path.isfile(IMAGES_DIR + filename)\n\ndef get_images_list():\n contents = request.urlopen(BASE).read()\n images = re.findall(r'bilderbig\\/[a-zA-Z0-9_]*\\.jpg', str(contents))\n return list(set(images))\n\ndef download_image(name):\n request.urlretrieve(BASE + 'bilderbig/' + name, IMAGES_DIR + name)\n\ndef get_random_local_image():\n images = os.listdir(IMAGES_DIR)\n images = list(filter(lambda s: os.path.isfile(IMAGES_DIR + s), images))\n images = list(filter(lambda s: s.endswith('.jpg'), images))\n \n if images:\n return IMAGES_DIR + random.choice(images)\n else:\n return None\n\ndef get_random_image():\n check_dirs()\n \n images = get_images_list()\n img = random.choice(images)\n name = img[10:]\n\n if not local_exists(name):\n download_image(name)\n\n return IMAGES_DIR + name\n\ndef get_all_images():\n check_dirs()\n\n images = get_images_list()\n\n print('Found', len(images), 'images')\n\n index = 1\n for img in images:\n name = img[10:]\n print(str(index) + ')', name, end='')\n \n try:\n download_image(name)\n\n except KeyboardInterrupt:\n exit() \n except:\n print('\\r-->', str(index) + ')', name, 'FAILED', end='')\n \n print('')\n index += 1\n\ndef set_background(path):\n print('set image', path)\n os.system('gsettings set org.gnome.desktop.background picture-uri file://' + path)\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1 and sys.argv[1] == 'all':\n get_all_images()\n else: \n img = None\n \n try:\n img = get_random_image()\n except:\n img = get_random_local_image()\n \n if img:\n set_background(img)\n else:\n print('Failed to find an image')","sub_path":"stalenhag.py","file_name":"stalenhag.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"179855015","text":"#aprendendo Python\n#programa de controle para latas de tinta\n\narea = int(input(\"Qual a area a ser pintada: \"))\n\n#cada litro preenche 3 metros quadrados de area\n#cada lata tem capacidade de 18 litros\n\n\n\nif area % 54 != 0:\n latas = int(area/54) + 1\n\nelse:\n latas = area/54\n\npreco = latas*80\n\nprint('O numero de latas e %d ' %(latas))\nprint('O preco total e %d ' %(preco))\n","sub_path":"latas.py","file_name":"latas.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"283290260","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"\n@File : sample.py\n@Contact : wanghan@inovance.com\n@License : (C)Copyright 2020-2021, inovance\n\n@Modify Time @Author @Version @Description\n------------ ------- -------- -----------\n2020/10/30 16:58 WANG HAN 1.0 None\n\"\"\"\n\n# import lib\n# 导入日志,加上模块名\nfrom configparser import ConfigParser\n\nfrom lib.logger.log import Logger\n\nlogger = Logger(file_name='l3')\n\n# 导入配置文件\n\nconfig = ConfigParser()\nconfig.read(\"../config.ini\")\nscanner_model = config.get('SCANNER', 'scanner_model') # 扫描仪型号\nscanner_host = config.get('SCANNER', 'scanner_host')\nscanner_port = config.getint('SCANNER', 'scanner_port')\nscanner_height = config.getint('SCANNER', 'scanner_height')\nscanner_numbers = config.getint('SCANNER', 'scanner_numbers') # 扫描仪个数\n","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"301111253","text":"# -*- coding: utf-8 -*-\n\"\"\"Transition matrix module.\"\"\"\n\nfrom typing import Optional\n\nfrom scanpy import logging as logg\nfrom anndata import AnnData\n\nfrom cellrank.tools.kernels._kernel import (\n VelocityKernel,\n KernelExpression,\n ConnectivityKernel,\n)\n\n\ndef transition_matrix(\n adata: AnnData,\n vkey: str = \"velocity\",\n backward: bool = False,\n weight_connectivities: Optional[float] = None,\n sigma_corr: Optional[float] = None,\n scale_by_variances: bool = False,\n var_key: Optional[str] = \"velocity_graph_uncertainties\",\n var_min: float = 0.1,\n use_negative_cosines: bool = True,\n self_transitions: bool = False,\n perc: Optional[float] = None,\n threshold: Optional[float] = None,\n density_normalize: bool = True,\n) -> KernelExpression:\n \"\"\"\n Compute a transition matrix based on a combination of RNA Velocity and transcriptomic similarity.\n\n To learn more about the way in which the transition matrices are computed, see\n :class:`cellrank.tl.kernels.VelocityKernel` for the velocity-based transition matrix and\n :class:`cellrank.tl.kernels.ConnectivityKernel` for the transcriptomic-similarity-based transition matrix.\n\n Params\n ------\n adata: :class:`anndata.AnnData`\n Annotated data object.\n vkey\n Key from :paramref:`adata` `.layers` to access the velocities.\n backward\n Direction of the process.\n weight_connectivities\n Weight given to transcriptomic similarities as opposed to velocities. Must be in `[0, 1]`.\n use_negative_cosines\n Whether to use correlations with cells that have an angle > 90 degree with :math:`v_i`.\n sigma_corr\n Scaling parameter for the softmax. Larger values will lead to a more concentrated distribution (more peaked).\n Default is to use 1 / median_velocity_correlation.\n scale_by_variances\n Use velocity variances to scale the softmax.\n var_key\n Key from `adata.uns` to acess velocity variances.\n var_min\n Variances are clipped to this value at the lower end.\n self_transitions\n Assigns elements to the diagonal of the velocity-graph based on a confidence measure\n perc\n Quantile of the distribution of exponentiated velocity correlations. This is used as a threshold to set\n smaller values to zero.\n threshold\n Set a threshold to remove exponentiated velocity correlations smaller than :paramref:`threshold`.\n density_normalize\n Whether to use density correction when computing the transition probabilities.\n Density correction is done as by [Haghverdi16]_.\n\n Returns\n -------\n :class:`cellrank.tl.KernelExpression`\n A kernel expression object.\n \"\"\"\n\n # initialise the velocity kernel and compute transition matrix\n vk = VelocityKernel(\n adata,\n backward=backward,\n vkey=vkey,\n use_negative_cosines=use_negative_cosines,\n var_key=var_key,\n )\n vk.compute_transition_matrix(\n sigma_corr=sigma_corr,\n scale_by_variances=scale_by_variances,\n var_min=var_min,\n self_transitions=self_transitions,\n perc=perc,\n threshold=threshold,\n density_normalize=density_normalize,\n )\n\n if weight_connectivities is not None:\n if 0 < weight_connectivities < 1:\n logg.info(\n f\"Using a connectivity kernel with weight `{weight_connectivities}`\"\n )\n ck = ConnectivityKernel(adata, backward=backward).compute_transition_matrix(\n density_normalize=density_normalize\n )\n final = (1 - weight_connectivities) * vk + weight_connectivities * ck\n elif weight_connectivities == 0:\n final = vk\n elif weight_connectivities == 1:\n final = ConnectivityKernel(\n adata, backward=backward\n ).compute_transition_matrix(density_normalize=density_normalize)\n else:\n raise ValueError(\n f\"The parameter `weight_connectivities` must be in range `[0, 1]`, found `{weight_connectivities}`.\"\n )\n else:\n final = vk\n final.write_to_adata()\n\n return final\n","sub_path":"cellrank/tools/_transition_matrix.py","file_name":"_transition_matrix.py","file_ext":"py","file_size_in_byte":4195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"606907110","text":"from pi_trees_ros.pi_trees_ros import *\nfrom pi_trees_lib.task_setup import *\n\nimport GlobalData\nimport CheckRefboxCommand\nimport SkillBook\nimport InPlay\n\n\nclass DirectFriend(ParallelAll):\n def __init__(self, name):\n super(DirectFriend , self).__init__(name)\n\n self.refcheck = CheckRefboxCommand.IsDIRECT_FREE_FRIEND('Is Direct Friend')\n self.execute = Execute('Execute Friend Direct kick')\n\n # add parent tasks\n self.add_child(self.refcheck)\n self.add_child(self.execute)\n\n\nclass Execute(Sequence):\n def __init__(self, name):\n super(Execute, self).__init__(name)\n\n self.wrap = SkillBook.CornerStandby('CornerStandby')\n self.inplay = InPlay.InPlay('InPlay')\n\n self.add_child(self.wrap)\n self.add_child(self.inplay)\n","sub_path":"roots_decision_making/scripts/DirectFriend.py","file_name":"DirectFriend.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"128338911","text":"# -*- coding: utf-8 -*-\n\nimport webapp2\nimport logging\nfrom openfish import components, functions, models\nfrom webapp2_extras import json\nfrom webapp2_extras.appengine.auth.models import User\nfrom time import time\n\n\n\n\n\nclass Handler( components.BaseHandler ):\n\n\t# Comment 作成\n\tdef create( self ):\n\t\ttarget_model = self.request.get( 'target_model', None );\n\t\ttarget_id = self.request.get( 'target_id', None );\n\t\tcontent = self.request.get( 'content', None );\n\t\trating = self.request.get( 'rating', None );\n\n\n\t\t# パラメータが足りなかったらエラー\n\t\tif content is None or target_model is None or target_id is None:\n\t\t\tmessage = \"parameter missing\"\n\t\t\tlogging.error( message )\n\t\t\tout = {\n\t\t\t\t\"meta\": functions.createMetaData( status=\"fail\", code=400, method_name=\"createComment\", message=message ),\n\t\t\t};\n\t\t\tself.response.out.write( json.encode(out) )\n\t\t\treturn\n\n\t\t# 文字列からモデルクラス取得\n\t\tclassPath_str = \"models.\"+ target_model.capitalize()\n\t\tklass = webapp2.import_string( classPath_str )\n\t\ttarget_obj = klass.get_by_id( int(target_id) );\n\n\n\t\t# ターゲットオブジェクトがなければエラー\t\n\t\tif target_obj is None:\n\t\t\tmessage = target_model +\"#\"+ target_id +\" is not found\"\n\t\t\tlogging.error( message )\n\t\t\tout = {\n\t\t\t\t\"meta\": functions.createMetaData( status=\"fail\", code=400, method_name=\"createComment\", message=message ),\n\t\t\t};\n\t\t\tself.response.out.write( json.encode(out) )\n\t\t\treturn\n\n\t\t# 保存\n\t\tcomment = models.Comment()\n\t\tcomment.content = content\n\t\tcomment.rating = rating\n\t\tcomment.target_key = target_obj.key\n\t\tif self.user:\n\t\t\tcomment.user_key = self.user.key\n\t\tcomment.put();\n\n\t\t# 出力\n\t\tout = {\n\t\t\t\"meta\": functions.createMetaData( status=\"ok\", code=200, method_name=\"createComment\" ),\n\t\t\t\"response\": {\n\t\t\t\t\"comments\":[\n\t\t\t\t\tcomment.toDict(),\n\t\t\t\t],\n\t\t\t},\n\t\t};\n\t\tself.response.out.write( json.encode(out) )\n\n\n\n\n\t# クエリ\n\tdef query( self ):\n\t\ttarget_model = self.request.get( 'target_model', None );\n\t\ttarget_id = self.request.get( 'target_id', None );\n\n\t\tpage = self.request.get( \"page\", 1 );\n\t\tper_page = self.request.get( \"per_page\", 10 );\n\t\tlimit = self.request.get( \"limit\", 100 );\n\t\tskip = self.request.get( \"skip\", 0 );\n\t\twhere = self.request.get( \"where\" );\n\t\torder = self.request.get( \"order\" );\n\t\tresponse_json_depth = self.request.get( \"response_json_depth\", 3 );\n\n\n\t\t# 文字列からモデルクラス取得\n\t\tclassPath_str = \"models.\"+ target_model.capitalize()\n\t\tklass = webapp2.import_string( classPath_str )\n\n\n\t\tgql_str = \"\"\n\t\tif where:\n\t\t\tgql_str = \"WHERE \"+ where +\" AND is_deleted=False\"\n\t\telse:\n\t\t\tgql_str = \"WHERE is_deleted=False\"\n\t\tgql_str = gql_str +\" AND target_key = key('%s',%d)\" % ( target_model.capitalize(), int(target_id) )\n\n\n\t\tif order:\n\t\t\tgql_str = gql_str +\" ORDER BY \"+ order\n\n\n\t\tq = models.Comment.gql( gql_str )\n\t\tresults = q.fetch( int(limit) )\n\t\tklass.prefetchReferences( results )\n\n\t\t# 出力ひな形作成\n\t\tout = {\n\t\t\t\"meta\": functions.createMetaData( status=\"ok\", code=200, method_name=\"queryComments\" ),\n\t\t\t\"response\": {\n\t\t\t\t\"comments\":[],\n\t\t\t},\n\t\t};\n\n\t\tfor result in results:\n\t\t\tout[\"response\"][\"comments\"].append( result.toDict( full=False ) )\n\n\t\tself.response.out.write( json.encode(out) )\n\n\n\n\n\n","sub_path":"handlers/v1/comments.py","file_name":"comments.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"275697813","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win-amd64\\egg\\running\\gpx2kml.py\n# Compiled at: 2012-12-15 11:57:46\n# Size of source mod 2**32: 7339 bytes\nimport pdb, optparse, datetime, os.path\nfrom lxml import etree\nfrom pykml.factory import KML_ElementMaker as KML\nfrom pykml.factory import GX_ElementMaker as GX\nimport gpxpy, gpxpy.geo\nfrom loutilities import timeu\nMETERPMILE = 1609.3439941\nt = timeu.asctime('%Y-%m-%dT%H:%M:%SZ')\n\ndef main():\n usage = 'usage: %prog [options] \\n\\n'\n usage += 'where:\\n'\n usage += ' \\tgpx formatted file'\n parser = optparse.OptionParser(usage=usage)\n parser.add_option('-p', '--points', dest='points', action='store_true', help='specify if points output is desired', default=False)\n parser.add_option('-f', '--flyover', dest='flyover', action='store_true', help='specify if flyover output is desired', default=False)\n parser.add_option('-c', '--color', dest='color', help='track color if not flyover', default='641400FF')\n parser.add_option('-o', '--output', dest='output', help='output file', default=None)\n options, args = parser.parse_args()\n colors = {'pink':'64781EF0', \n 'blue':'64F01E14'}\n gpxfile = args.pop(0)\n if options.output == None:\n outfile = os.path.basename(gpxfile) + '.kml'\n else:\n outfile = options.output\n _GPX = open(gpxfile, 'r')\n gpx = gpxpy.parse(_GPX)\n stylename = 'sn_shaded_dot'\n color = colors[options.color]\n sty = KML.Style((KML.IconStyle(KML.scale(1.2), KML.Icon(KML.href('http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png')), KML.color(colors[options.color]))),\n id=stylename)\n iconstylename = '#sn_shaded_dot'\n doc = KML.Document(KML.Name('generated from {0}'.format(gpxfile)), KML.open(1))\n doc.append(sty)\n times = []\n coords = []\n dists = []\n points = []\n lastpoint = None\n for track in gpx.tracks:\n for segment in track.segments:\n for point in segment.points:\n if not lastpoint:\n lastpoint = point\n plon = point.longitude\n plat = point.latitude\n pelev = point.elevation\n points.append(point)\n thisdist = gpxpy.geo.distance(lastpoint.latitude, lastpoint.longitude, lastpoint.elevation, plat, plon, pelev)\n lastpoint = point\n dists.append(thisdist)\n ptime = t.dt2asc(point.time)\n times.append(ptime)\n coords.append('{lon},{lat},{alt}'.format(lon=plon, lat=plat, alt=0))\n\n if options.flyover:\n plm = KML.Placemark()\n doc.append(plm)\n track = GX.track()\n plm.append(track)\n for when in times:\n track.append(KML.when(when))\n\n for coord in coords:\n track.append(KML.coordinates(coord))\n\n else:\n if options.points:\n lasttime = t.asc2epoch(times[0])\n totdist = 0\n for i in range(len(times)):\n thistime = t.asc2epoch(times[i])\n dur = thistime - lasttime\n lasttime = thistime\n totdist += dists[i]\n ex = KML.ExtendedData(KML.Data(KML.displayName('time'), KML.value(times[i])), KML.Data(KML.displayName('duration'), KML.value(dur)), KML.Data(KML.displayName('totdistance'), KML.value(int(round(totdist)))))\n plm = KML.Placemark(KML.name(''), KML.styleUrl(iconstylename))\n plm.append(ex)\n plm.append(KML.Point(KML.altitudeMode('clampToGround'), KML.coordinates(coords[i])))\n doc.append(plm)\n\n else:\n if options.color:\n doc.append(KML.Style((KML.LineStyle(KML.color(colors[options.color]), KML.width(5))),\n id=(options.color)))\n stylename = '#{0}'.format(options.color)\n plm = KML.Placemark(KML.name('runtrack'))\n if options.color:\n plm.append(KML.styleUrl(stylename))\n doc.append(plm)\n ls = KML.LineString(KML.altitudeMode('clampToGround'))\n plm.append(ls)\n kcoords = ''\n for coord in coords:\n kcoords += coord + ' \\n'\n\n ls.append(KML.coordinates(kcoords))\n _GPX.close()\n kml = KML.kml(doc)\n docstr = etree.tostring(kml, pretty_print=True)\n OUT = open(outfile, 'w')\n OUT.write(docstr)\n OUT.close()\n\n\nif __name__ == '__main__':\n main()","sub_path":"pycfiles/runtilities-2.0.1-py3.6/gpx2kml.cpython-36.py","file_name":"gpx2kml.cpython-36.py","file_ext":"py","file_size_in_byte":4601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"241323370","text":"'''This problem was asked by Stitch Fix.\n\nPascal's triangle is a triangular array of integers constructed with the following formula:\n\n The first row consists of the number 1.\n For each subsequent row, each element is the sum of the numbers directly above it, on either side.\n\nFor example, here are the first few rows:\n\n 1\n 1 1\n 1 2 1\n 1 3 3 1\n1 4 6 4 1\n\nGiven an input k, return the kth row of Pascal's triangle.\n\nBonus: Can you do this using only O(k) space?'''\n\n\ndef c(k, n):\n ans = 1\n div = 1\n for i in range(n):\n ans*=(k-i)\n div*=(n-i)\n return ans//div\n \n\n\ndef kthPascalRow(k):\n for i in range(0, k+1):\n print(c(k, i), end=' ')\n print()\n\n\n\nif __name__ == \"__main__\":\n kthPascalRow(int(input()))\n \n","sub_path":"Problem#429.py","file_name":"Problem#429.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"149629990","text":"from numbapro import vectorize, cuda\nfrom numbapro.cudalib import cublas\nfrom sklearn import preprocessing\nimport cudamat as cm\nimport numpy as np\nimport cProfile as profile\nfrom timeit import default_timer as timer\nimport math\nfrom utils import *\nfrom PyCuNN import *\n\nblas = cublas.Blas()\n\ndef run():\n \"\"\"\n a = np.ones((1000,500),dtype='float32')\n c = np.ones((1000,500),dtype='float32')\n #o = np.zeros((1000,400),dtype='float32')\n da = cuda.to_device(a)\n dc = cuda.to_device(c)\n \n mmadd(da,dc,da)\n\n da.copy_to_host(a)\n\n print(a)\n\n \"\"\"\n gc.collect()\n\n a = np.ones((10,200),dtype='float32')\n c = np.zeros((1),dtype='int32')\n f = np.zeros((10,200),dtype='float32')\n #o = np.zeros((3000,200),dtype='float32')\n da = cuda.to_device(a)\n dc = cuda.to_device(c)\n\n start = timer()\n z = np.sum(np.exp(a),axis=1)\n nptime = timer()-start\n print('np',nptime,z)\n\n start = timer()\n g = msum(da)\n cutime = timer()-start\n g.copy_to_host(f)\n print('cutime',cutime,f[:,0])\n\n #print('CU',g)\n\n '''b = cm.CUDAMatrix(a)\n d = cm.CUDAMatrix(c)\n\n start = timer()\n b.T\n cmtime = timer() -start\n print('cmtime',cmtime)\n\n print('perf imporvement',cmtime/cutime)\n\n #print('cmoutput',g.asarray())'''\n\n \n # print(np.sum(c,axis=1))\n \ndef tests():\n a = np.random.rand(300,500)\n b = np.random.rand(500,300)\n\n start = timer()\n c = np.dot(a,b)\n nptime = timer()-start\n print('nptime',nptime)\n\n x = np.array(np.random.rand(600,1500),dtype='float32',order='F')\n y = np.array(np.random.rand(1500,300),dtype='float32',order='F')\n z = np.zeros((1000,1000),order='F',dtype='float32')\n\n stream = cuda.stream()\n\n dx = cuda.to_device(x)\n dy = cuda.to_device(y)\n dz = cuda.to_device(z)\n\n start = timer()\n blas.gemm('N','N',1000,1500,1000,1.0,dx,dy,0.0,dz)\n cutime = timer()-start\n print('cutime',cutime)\n\n #dz.copy_to_host(z)\n print(dz[0])\n\n c = np.ones((1000,1000),order='F',dtype='float32')\n print(c.shape)\n dc = cuda.to_device(c)\n\n # blockDim = (256,256)\n #gridDim = (((1000 + blockDim[0]-1)/blockDim[0]),((1000 + blockDim[1]-1)/blockDim[1]))\n\n blockDim = (30,30)\n gridDim = ((((c.shape[0] + blockDim[0]) - 1) / blockDim[0]), (((c.shape[1] + blockDim[1]) - 1) / blockDim[1]))\n\n start = timer()\n mtanh[gridDim,blockDim,stream](dc)\n tantime = timer() - start\n print('tantime',tantime)\n\n dc.copy_to_host(c,stream=stream)\n stream.synchronize()\n print(c)\n\n y = cm.CUDAMatrix(np.ones((1000,1000)))\n\n start = timer()\n cm.tanh(y)\n cmtan = timer()-start\n print('cmtan',cmtan)\n\n x = cm.CUDAMatrix(np.random.rand(1000,1500))\n y = cm.CUDAMatrix(np.random.rand(1500,1000))\n\n start = timer()\n cm.dot(x,y)\n cmtime = timer()-start\n print('cmtime',cmtime)\n\nrun()\n\n'''\n\ndef iter():\n start = timer()\n with open('./data/ptb.train.short.txt') as doc:\n f = doc.read()\n words = f.split(' ')\n seq = f.split('\\n')\n\n \n enc = prepro.LabelBinarizer()\n enc.fit(words)\n ds = []\n for x in sequences:\n w = x.split(' ')\n del w[-1]\n del w[0]\n seq = []\n for z in range(len(w)-1):\n i = cm.CUDAMatrix(enc.transform([w[z]]))\n t = cm.CUDAMatrix(enc.transform([w[z+1]]))\n seq.append([i,t])\n ds.append(seq)\n slurp = timer() - start\n print('slurptime:' slurp)\n\n start = timer()\n ds=[]\n with open('./data/ptb.train.short.txt','r+') as doc:\n for line in doc:\n\n \n enc = prepro.LabelBinarizer()\n enc.fit(words)\n ds = []\n for x in sequences:\n w = x.split(' ')\n del w[-1]\n del w[0]\n seq = []\n for z in range(len(w)-1):\n i = cm.CUDAMatrix(enc.transform([w[z]]))\n t = cm.CUDAMatrix(enc.transform([w[z+1]]))\n seq.append([i,t])\n ds.append(seq)\n slurp = timer() - start\n print('slurptime:' slurp)\n\n\n'''\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"nn/numbaprotests.py","file_name":"numbaprotests.py","file_ext":"py","file_size_in_byte":4002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"486331578","text":"from PIL import Image\n\n\ndef pixelate_redactor(input_file_path, output_file_path, pixel_size, start_x, start_y, size_x, size_y):\n original = Image.open(input_file_path)\n pixelated = original.resize(\n (original.size[0] // pixel_size, original.size[1] // pixel_size),\n Image.NEAREST\n )\n pixelated = pixelated.resize(\n (pixelated.size[0] * pixel_size, pixelated.size[1] * pixel_size),\n Image.NEAREST\n )\n box = (start_x, start_y, start_x + size_x, start_y + size_y)\n original.paste(pixelated.crop(box), box)\n original.save(output_file_path)\n","sub_path":"pixelate_redactor/pixelate_redactor.py","file_name":"pixelate_redactor.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"615967968","text":"from collections import defaultdict\nimport time\nimport pdb\n# NOTE: these algos have errors. FIX\n\ndef change(coins, t):\n r = []\n return\n\n\n# a =: array, t =: target\ndef make_change(coins, t):\n if t < 0:\n return 0\n if t == 0:\n return 1\n return sum([make_change(coins, t - coin) for coin in coins])\n\n# a =: array, t =: target\ndef make_change2(coins, t):\n if t < 0:\n return 0\n if t == 0:\n return 1\n return min([1 + make_change(coins, t - coin) for coin in coins])\n\n# a =: array, t =: target\ndef make_change_memo(coins, t, memo=defaultdict(int)):\n if memo[t] != 0:\n return memo[t]\n\n if t < 0:\n return 0\n if t == 0:\n return 1\n\n # memo[t] = sum([make_change_memo(coins, t - coin, memo) for coin in coins])\n for coin in coins:\n memo[t] += make_change_memo(coins, t - coin, memo)\n \n return memo[t]\n\ndef time_fun(f, *args):\n start = time.time()\n f(*args)\n return time.time() - start\n\nif __name__=='__main__':\n #print('roundtrip = {}'.format(time_fun(make_change, [1,5,10,25], 45) ))\n #print('memo roundtrip = {}'.format(time_fun(make_change_memo, [1,5,10,25], 45) ))\n print(make_change2([1,2,3],4))\n #print(change([1,2,3],4))\n\n","sub_path":"change.py","file_name":"change.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"28565704","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport itertools\nimport logging\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom datetime import timedelta\n\nimport networkx as nx\n\nfrom yelp_beans.logic.config import get_config\nfrom yelp_beans.logic.user import user_preference\nfrom yelp_beans.models import Meeting\nfrom yelp_beans.models import MeetingParticipant\nfrom yelp_beans.models import MeetingSpec\n\n\ndef get_disallowed_meetings(users, prev_meeting_tuples, spec):\n \"\"\"Returns set of matches that are not allowed\n Returns:\n Set of tuples\n \"\"\"\n # don't match users with previous meetings\n pairs = prev_meeting_tuples\n\n userids = sorted([user.key.id() for user in users])\n id_to_user = {user.key.id(): user for user in users}\n all_pairs = {pair for pair in itertools.combinations(userids, 2)}\n\n for rule in spec.meeting_subscription.get().dept_rules:\n rule = rule.get()\n pairs = pairs.union({pair for pair in all_pairs if is_same(rule.name, pair, id_to_user)})\n return pairs\n\n\ndef is_same(field, match, users):\n return users[match[0]].metadata[field] == users[match[1]].metadata[field]\n\n\ndef save_meetings(matches, spec):\n for match in matches:\n meeting_key = Meeting(meeting_spec=spec.key).put()\n MeetingParticipant(meeting=meeting_key, user=match[0].key).put()\n MeetingParticipant(meeting=meeting_key, user=match[1].key).put()\n logging.info(meeting_key)\n logging.info('{}, {}'.format(\n match[0].get_username(),\n match[1].get_username(),\n ))\n\n\ndef get_previous_meetings(cooldown=None):\n\n if cooldown is None:\n cooldown = get_config()['meeting_cooldown_weeks']\n\n meetings = defaultdict(list)\n\n # get all meeting specs from x weeks ago til now\n time_threshold_for_meetings = datetime.now() - timedelta(weeks=cooldown)\n\n meeting_spec_keys = [\n spec.key for spec in MeetingSpec.query(\n MeetingSpec.datetime > time_threshold_for_meetings\n ).fetch()\n ]\n\n logging.info('Previous Meeting History: ')\n logging.info([meeting.get().datetime.strftime(\"%Y-%m-%d %H:%M\") for meeting in meeting_spec_keys])\n\n if meeting_spec_keys == []:\n return set([])\n\n # get all meetings from meeting specs\n meeting_keys = [meeting.key for meeting in Meeting.query().filter(\n Meeting.meeting_spec.IN(meeting_spec_keys)).fetch()]\n\n if meeting_keys == []:\n return set([])\n\n # get all participants from meetings\n participants = MeetingParticipant.query().filter(\n MeetingParticipant.meeting.IN(meeting_keys)\n ).fetch()\n\n if participants == []:\n return set([])\n\n # group by meeting Id\n for participant in participants:\n meetings[participant.meeting.id()].append(participant.user)\n\n # ids are sorted, all matches should be in increasing order by id for the matching algorithm to work\n disallowed_meetings = set([tuple(sorted(meeting, key=lambda Key: Key.id())) for meeting in meetings.values()])\n\n logging.info('Past Meetings')\n logging.info([tuple([meeting.get().get_username() for meeting in meeting]) for meeting in disallowed_meetings])\n\n disallowed_meetings = {tuple([meeting.id() for meeting in meeting]) for meeting in disallowed_meetings}\n\n return disallowed_meetings\n\n\ndef generate_meetings(users, spec, prev_meeting_tuples=None):\n \"\"\"\n Returns 2 tuples:\n - meetings: list of dicts of the same type as prev_meetings, to indicate\n this iteration's found meetings\n - unmatched_user_ids: users with no matches.\n \"\"\"\n if prev_meeting_tuples is None:\n prev_meeting_tuples = get_previous_meetings()\n\n uid_to_users = {user.key.id(): user for user in users}\n user_ids = sorted(uid_to_users.keys())\n\n # Determine matches that should not happen\n disallowed_meeting_set = get_disallowed_meetings(\n users, prev_meeting_tuples, spec\n )\n graph_matches = construct_graph(user_ids, disallowed_meeting_set)\n\n # matching returns (1,4) and (4,1) this de-dupes\n graph_matches = dict((a, b) if a <= b else (b, a)\n for a, b in graph_matches.iteritems())\n\n matches = []\n for uid_a, uid_b in graph_matches.items():\n user_a = uid_to_users[uid_a]\n user_b = uid_to_users[uid_b]\n time = user_preference(user_a, spec)\n matches.append((user_a, user_b, time))\n\n logging.info('{} employees matched'.format(len(matches) * 2))\n logging.info([(meeting[0].get_username(), meeting[1].get_username()) for meeting in matches])\n\n unmatched = [\n uid_to_users[user]\n for user in user_ids\n if user not in graph_matches.keys()\n if user not in graph_matches.values()\n ]\n\n logging.info('{} employees unmatched'.format(len(unmatched)))\n logging.info([user.get_username() for user in unmatched])\n\n return matches, unmatched\n\n\ndef construct_graph(user_ids, disallowed_meetings):\n \"\"\"\n We can use a maximum matching algorithm for this:\n https://en.wikipedia.org/wiki/Blossom_algorithm\n Yay graphs! Networkx will do all the work for us.\n \"\"\"\n\n # special weights that be put on the matching potential of each meeting,\n # depending on heuristics for what makes a good/bad potential meeting.\n meeting_to_weight = {}\n\n # This creates the graph and the maximal matching set is returned.\n # It does not return anyone who didn't get matched.\n meetings = []\n possible_meetings = {\n meeting for meeting in itertools.combinations(user_ids, 2)\n }\n allowed_meetings = possible_meetings - disallowed_meetings\n\n for meeting in allowed_meetings:\n weight = meeting_to_weight.get(meeting, 1.0)\n meetings.append(meeting + ({'weight': weight},))\n\n graph = nx.Graph()\n graph.add_nodes_from(user_ids)\n graph.add_edges_from(meetings)\n\n return nx.max_weight_matching(graph)\n","sub_path":"yelp_beans/match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":6008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"332852911","text":"import subprocess, os, sys\nfrom operator import attrgetter\n\n\ntry:\n import yaml\nexcept ImportError:\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", 'pyyaml'])\nfinally:\n import yaml\n\n\nclass Dragon:\n def __init__(self, cell_number, coins):\n self.cell_number = cell_number\n self.coins = coins\n\n def __str__(self):\n return \"cell_number = {}, coins = {}\".format(self.cell_number, self.coins)\n\nclass Dragons:\n\n target_value = 0\n def __init__(self):\n self.dragons_queue = []\n\n def remove_dragon(self):\n min_value_dragon = min(self.dragons_queue, key=attrgetter('coins'))\n self.dragons_queue.remove(min_value_dragon)\n\n def dragons_money(self):\n return sum(int(x.coins) for x in self.dragons_queue)\n\n def dragons_print(self):\n for dragon in self.dragons_queue:\n print(dragon)\n\ndef find_path(input_file):\n with open(input_file) as f:\n data = yaml.load(f, Loader=yaml.FullLoader)\n\n dragons = Dragons()\n steps = data['steps']\n Dragons.target_value = steps.pop().split()[1]\n cell_num = 2\n for step in steps:\n type, value = step.split()\n if type == \"d\":\n dragon = Dragon(cell_num, value)\n dragons.dragons_queue.append(dragon)\n else:\n while dragons and len(dragons.dragons_queue) >= int(value):\n dragons.remove_dragon()\n cell_num += 1\n\n if len(dragons.dragons_queue) < int(Dragons.target_value):\n print(\"-1\")\n else:\n print (dragons.dragons_money())\n print (len(dragons.dragons_queue))\n dragons_cells_str = \"\"\n for dragon in dragons.dragons_queue:\n dragons_cells_str += str(dragon.cell_number) + \" \"\n print (dragons_cells_str)\n return dragons.dragons_queue\n\n\ndef read_input():\n if len(sys.argv) == 2:\n input_file = sys.argv[1]\n if os.path.isfile(input_file):\n return input_file\n else:\n raise Exception('Parameter {} is not a file'.format(input_file))\n else:\n raise Exception(\"Please enter input file (python solution.py input1.yaml)\")\n\nif __name__ == '__main__':\n find_path(read_input())\n\n\n\n\n\n","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"4761425","text":"import itertools\nimport warnings\nimport copy\nimport time\nfrom math import inf\n\nimport numpy as np\nfrom sklearn.feature_selection import f_classif, f_regression\n\nfrom ..computation.util import to_dataframe\nfrom ..data import Collection\nfrom ..computation import Input, Graph\nfrom ..transformers import *\n\n__all__ = [\n 'construct',\n]\n\nminimal = {\n 'series-to-series': [\n Resultant(),\n Ratio(),\n Difference(rel=True),\n Difference(rel=False),\n ],\n 'series-to-attribute': [\n Length(),\n Sum(),\n Min(),\n Max(),\n Mean(),\n Median(),\n StandardDeviation(),\n Variance(),\n Skewness(),\n Kurtosis(),\n ]\n}\n\nfast = {\n 'series-to-series':\n minimal['series-to-series'],\n 'series-to-attribute':\n minimal['series-to-attribute'] + [\n BinnedEntropy(bins=10),\n ] + [\n C3(lag=lag) for lag in (1, 2, 3)\n ] + [\n CID(),\n ] + [\n CountAboveMean(),\n CountBelowMean(),\n ] + [\n Energy(),\n EnergyRatio(chunks=10),\n ] + [\n SumChange(abs=True),\n ] + [\n MeanChange(abs=abs)\n for abs in (True, False)\n ] + [\n MeanSecondDerivativeCentral(),\n ] + [\n IndexMassQuantile(q=round(q, 1), rel=True)\n for q in (.1, .2, .3, .4, .6, .7, .8, .9)\n ] + [\n ArgMin(first=first, rel=True)\n for first in (True, False)\n ] + [\n ArgMax(first=first, rel=True)\n for first in (True, False)\n ] + [\n # NumberPeaks(support=support)\n # for support in (1, 3, 5, 10, 50)\n # ] + [\n NumberCrossings(threshold=-1),\n NumberCrossings(threshold=0),\n NumberCrossings(threshold=1),\n ] + [\n LongestStrikeAboveMean(),\n LongestStrikeBelowMean(),\n ] + [\n TimeReversalAsymmetryStatistic(lag=lag)\n for lag in (1, 2, 3)\n ] + [\n HighStandardDeviation(r=round(r, 1))\n for r in (.1, .2, .3, .4, .6, .7, .8, .9)\n ] + [\n HighVariance(),\n ] + [\n SymmetryLooking(r=round(r, 1))\n for r in (.1, .2, .3, .4, .6, .7, .8, .9)\n ] + [\n RangeCount(min=-1, max=1),\n RangeCount(min=-np.inf, max=0),\n RangeCount(min=0, max=np.inf),\n ] + [\n ValueCount(value=-1),\n ValueCount(value=0),\n ValueCount(value=1),\n ] + [\n Outliers(r=r, rel=rel)\n for r in (1, 1.5, 2, 3, 4, 5)\n for rel in (True, False)\n ] + [\n HasDuplicateMin(),\n HasDuplicateMax(),\n ] + [\n Slice(AutoCorrelation(), i=i, axis='time') for i in range(10)\n ]\n}\n\nfull = {\n 'series-to-series':\n fast['series-to-series'],\n 'series-to-attribute':\n fast['series-to-attribute'] + [\n Quantile(q=round(q, 1))\n for q in (.1, .2, .3, .4, .6, .7, .8, .9)\n ] + [\n Slice(FFT(), i=i, axis='time') for i in range(100)\n ] + [\n Slice(CWT(), i=i, axis='time') for i in range(10)\n ] + [\n SpectralMean(FFT()),\n SpectralVariance(FFT()),\n SpectralSkewness(FFT()),\n SpectralKurtosis(FFT()),\n ] + [\n PowerSpectralDensity(),\n ] + [\n LinearTrend(Aggregate(size=size, agg=agg))\n for size in (5, 10, 50)\n for agg in ('mean', 'var', 'min', 'max')\n ] + [\n Slice(AutoRegressiveCoefficients(), i=i, axis='time') for i in range(10)\n ] + [\n Slice(FriedrichCoefficients(m=3, r=30), i=i, axis='time') for i in range(4)\n ] + [\n MaxLangevinFixedPoint(m=3, r=30),\n ] + [\n NumberPeaksCWT(),\n ] + [\n LinearTrend(),\n ] + [\n NumberUniqueValues(),\n SumReoccurringValues(),\n SumReoccurringDataPoints(),\n HasDuplicate(),\n ]\n}\n\n\ndef construct(X, y, task='classification',\n transformers='full', max_depth=1, corr=0.99, alpha=0.05,\n return_data=False, return_log=False):\n # Create a log during the construction process\n log = {time.time(): {'event': 'started'}} if return_log else None\n\n # Format arguments\n X, y, transformers, max_depth = format_args(X, y, transformers, task, max_depth)\n N = len(y)\n\n # Step 1: series-to-series transformations\n if log: log[time.time()] = {'event': 'series-to-series-started'}\n series = build_series(X, N, transformers['series-to-series'], max_depth, corr, log=log)\n if log: log[time.time()] = {'event': 'series-to-series-finished'}\n\n # Step 2: series-to-attribute transformations\n if log: log[time.time()] = {'event': 'series-to-attribute-started'}\n s2a = transformers['series-to-attribute']\n attributes, data = build_attributes(X, y, task, series, s2a, alpha, log=log)\n if log: log[time.time()] = {'event': 'series-to-attribute-finished'}\n\n # Create result\n if log: log[time.time()] = {'event': 'finished'}\n return create_result(attributes, data, log, return_data, return_log)\n\n\ndef build_series(X, N, transformers, max_depth, corr, log=None):\n series_depth_0_to_d = []\n series_depth_d = []\n views = list(X)\n data = {Input(view).trace: X[view] for view in X}\n stats = []\n\n depth = 0\n while True:\n new_series = []\n for transformer in generate_series_to_series_transformers(\n views, transformers, data, series_depth_0_to_d, series_depth_d, depth\n ):\n if log: log[time.time()] = {\n 'event': 'series-to-series-try',\n 'transformer': str(transformer),\n }\n\n # Compute output of the transformer\n if isinstance(transformer, Input):\n output = data[transformer.trace]\n else:\n # output = transformer.transform(*[data[p.trace] for p in transformer.parents])\n try:\n result = Graph(transformer).transform(X, return_dataframe=False)\n output = result[list(result)[0]]\n except:\n output = None\n if output is None:\n continue\n\n # Compute stats for the transformer\n tstats = to_collection(SinglePassStatistics().transform(output)).values # .reshape((N, -1))\n\n # Check redundancy\n non_redundant = check_non_redundant(tstats, stats, corr)\n\n # Add if non redundant\n if non_redundant:\n # data[transformer.trace] = output\n new_series.append(transformer)\n stats.append(tstats)\n if log: log[time.time()] = {\n 'event': 'series-to-series-add',\n 'transformer': str(transformer),\n }\n\n # Skip otherwise\n elif log:\n log[time.time()] = {\n 'event': 'series-to-series-redundant',\n 'transformer': str(transformer),\n }\n\n series_depth_0_to_d = series_depth_0_to_d + series_depth_d\n series_depth_d = new_series\n\n # Stop early when no new series could be added\n if len(series_depth_d) == 0:\n break\n\n depth = depth + 1\n if depth > max_depth:\n break\n\n # Return series (constructed transformers) and their corresponding data (computed values)\n return series_depth_0_to_d + series_depth_d\n\n\ndef build_attributes(X, y, task, series, transformers, alpha, log=None):\n attributes = []\n attributes_data = dict()\n\n for s in series:\n if isinstance(s, Input):\n x = X[s.input_id]\n else:\n result = Graph(s).transform(X, return_dataframe=False)\n x = result[list(result)[0]]\n for t in transformers:\n transformer = copy.deepcopy(t)\n transformer._parents = [s]\n\n if log: log[time.time()] = {\n 'event': 'series-to-attribute-try',\n 'transformer': str(transformer),\n }\n\n # xs, ys = subsample(x, y, ratio=0.1)\n\n # Compute output of the transformer\n try:\n output = to_collection(transformer.transform(x))\n except:\n output = None\n if output is None:\n continue\n\n # Reshape the transformer's output\n tvalues = output.values.reshape((len(y), -1))\n\n # Check relevance\n relevant, relevance = test_relevance(tvalues, y, task, alpha)\n if not relevant:\n if log: log[time.time()] = {\n 'event': 'series-to-attribute-irrelevant',\n 'transformer': transformer,\n 'data': relevance,\n }\n continue\n\n # Relevant\n if log: log[time.time()] = {\n 'event': 'series-to-attribute-add',\n 'transformer': transformer,\n 'data': relevance,\n }\n\n attributes.append(transformer)\n attributes_data[transformer.trace] = output\n # attributes_data[transformer.trace] = None\n\n # Return attributes (constructed transformers) and the data\n return attributes, attributes_data\n\n\ndef subsample(x, y, ratio=0.1):\n i = np.random.choice(x.shape[0], size=int(ratio * x.shape[0]))\n xs = Collection.from_array(x.values[i], dims=x.dims, time=x.time[i])\n ys = y[i]\n return xs, ys\n\n\ndef generate_series_to_series_transformers(views, transformers, data, series_depth_0_to_d,\n series_depth_d, depth):\n if depth < 1:\n for view in views:\n yield Input(view)\n else:\n series = series_depth_0_to_d + series_depth_d\n for t in transformers:\n for parents in itertools.combinations(series, t.n_inputs):\n if not any(p in series_depth_d for p in parents):\n continue\n if not constraints_satisfied(parents, data):\n continue\n for transformer in set_parents(t, parents, data):\n yield transformer\n\n\ndef constraints_satisfied(parents, data):\n if len(parents) == 2:\n if isinstance(parents[0], Input) and isinstance(parents[1], Input):\n data1 = data[parents[0].trace]\n data2 = data[parents[1].trace]\n if hasattr(data1, 'tags') and hasattr(data2, 'tags'):\n if ('location' in data1.tags) and ('location' in data2.tags) \\\n and ('type' in data1.tags) and ('type' in data2.tags):\n return ((data1.tags['location'] == data2.tags['location']) or\n (data1.tags['type'] == data2.tags['type']))\n return True\n\n\ndef check_non_redundant(tstats, stats, corr):\n if len(stats) > 0:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n # r = np.corrcoef(np.concatenate([stats, tstats.T], axis=0))\n # r = np.abs(r[:stats.shape[0], stats.shape[0]:])\n # return not np.any(r > corr)\n for other in stats:\n for j in range(tstats.shape[2]):\n for k in range(other.shape[2]):\n for i in range(2, 8):\n if np.corrcoef(tstats[:, i, j], other[:, i, k])[0][1] > corr:\n return False\n return True\n else:\n return True\n\n\ndef test_relevance(values, y, task, alpha):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n f = f_classif if task == 'classification' else f_regression\n _, pval = f(np.nan_to_num(values), y)\n return np.any(pval < alpha), np.min(pval)\n\n\ndef set_parents(node, parents, data):\n \"\"\"\n Returns\n -------\n list(Transformer)\n \"\"\"\n c = copy.deepcopy(node)\n c._parents = parents\n return [c]\n # n_timestamps = [data[p.trace].shape[1] for p in parents]\n # # Resample parents if number timestamps is not unique\n # # and return a transformer for each sample rate\n # if len(set(n_timestamps)) > 1:\n # transformers = []\n # for num in set(n_timestamps):\n # resampled = []\n # for i, parent in enumerate(parents):\n # if n_timestamps[i] != num:\n # resampled.append(Resample(parent, num=num, axis='time'))\n # else:\n # resampled.append(parent)\n # c = copy.deepcopy(node)\n # c._parents = resampled\n # transformers.append(c)\n # return transformers\n # # Otherwise, use the original parents\n # else:\n # c = copy.deepcopy(node)\n # c._parents = parents\n # return [c]\n\n\n# def create_graph(attributes):\n# graph = Graph()\n# nodes = []\n# for attr in attributes:\n# nodes.append(graph.add_node(attr))\n# for node in graph.nodes:\n# node._is_output = False\n# for node in nodes:\n# node._is_output = True\n# return graph\n\n\ndef create_result(attributes, data, log, return_data, return_log):\n result = list()\n graph = Graph()\n result.append(graph)\n nodes = []\n if return_data:\n outputs = dict()\n # result.append({node: data[attributes[i].trace] for i, node in enumerate(graph.outputs)})\n for attribute in attributes:\n node = graph.add_node(attribute)\n nodes.append(node)\n outputs[node] = data[attribute.trace]\n result.append(to_dataframe(outputs))\n else:\n for attr in attributes:\n nodes.append(graph.add_node(attr))\n for node in graph.nodes:\n node._is_output = False\n for node in nodes:\n node._is_output = True\n if return_log:\n result.append(log)\n if len(result) == 1:\n result = result[0]\n else:\n result = tuple(result)\n return result\n\n\ndef format_args(X, y, transformers, task, max_depth):\n # X should be a dict(int or str: Collection)\n if isinstance(X, list):\n X = {i: X[i] for i in range(len(X))}\n elif isinstance(X, Collection):\n X = {'X': X}\n elif not isinstance(X, dict):\n X = {'X': Collection.from_array(X)}\n # y should be a np.ndarray\n if isinstance(y, list):\n y = np.array(y)\n if isinstance(y, np.ndarray):\n y = flatten(y)\n else:\n y = flatten(y.values)\n if task == 'regression':\n y = y.astype(float)\n # Transformers should be {'series-to-series': list, 'series-to-attributes': list}\n if transformers == 'minimal':\n t = minimal\n elif transformers == 'fast':\n t = fast\n elif transformers == 'full':\n t = full\n else:\n t = copy.deepcopy(transformers)\n # Max. depth should be a number or inf\n if isinstance(max_depth, int) or isinstance(max_depth, float):\n if max_depth < 0:\n max_depth = inf\n else:\n max_depth = int(max_depth)\n else:\n max_depth = inf\n return X, y, t, max_depth\n\n\ndef flatten(values):\n if values.ndim == 2:\n return values[:, 0]\n elif values.ndim == 3:\n return values[:, 0, 0]\n else:\n return values.flatten()\n\n\ndef to_collection(x):\n if x is None:\n return None\n elif isinstance(x, Collection):\n return x\n else:\n if any(c is None for c in x):\n return None\n elif len(set([c.shape[1] for c in x])) > 1:\n return None\n else:\n return Collection.from_array(np.concatenate([c.values for c in x]))\n","sub_path":"tsfuse/construction/autods19.py","file_name":"autods19.py","file_ext":"py","file_size_in_byte":15861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"43444533","text":"print(\"Welcome to the Fibonacci Sequence generator!\")\nnumNumbers = int(input(\"How many digits would you like? \"))\n\nlastNum = 1\nlastLastNum = 1\nif numNumbers > 0:\n print(str(lastLastNum))\nif numNumbers > 1:\n print(str(lastNum))\n\nfor i in range(2, numNumbers):\n currentNum = lastNum + lastLastNum\n print(str(currentNum))\n\n lastLastNum = lastNum\n lastNum = currentNum","sub_path":"Labs_Python/1._Introduction_to_Python/2._Control_Flow/Fibonacci_Sequence/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"576128373","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/lib/python3.6/site-packages/filabel/logic.py\n# Compiled at: 2018-12-31 13:00:08\n# Size of source mod 2**32: 12195 bytes\nimport enum, fnmatch, itertools, requests, aiohttp, asyncio\n\nclass GitHub:\n __doc__ = '\\n This class can communicate with the GitHub API\\n just give it a token and go.\\n '\n API = 'https://api.github.com'\n\n def __init__(self, token):\n \"\"\"\n :param token: GitHub token\n \"\"\"\n self.token = token\n self.session = requests.Session()\n self.session.headers = {'User-Agent': 'filabel'}\n self.session.auth = self._token_auth\n\n def user(self):\n \"\"\"\n Get current user authenticated by token\n \"\"\"\n return self._paginated_json_get(f\"{self.API}/user\")\n\n def _token_auth(self, req):\n \"\"\"\n This alters all our outgoing requests\n \"\"\"\n req.headers['Authorization'] = 'token ' + self.token\n return req\n\n def _paginated_json_get(self, url, params=None):\n r = self.session.get(url, params=params)\n r.raise_for_status()\n json = r.json()\n if 'next' in r.links:\n if 'url' in r.links['next']:\n json += self._paginated_json_get(r.links['next']['url'], params)\n return json\n\n def pull_requests(self, owner, repo, state='open', base=None):\n \"\"\"\n Get all Pull Requests of a repo\n\n :param owner: GtiHub user or org\n :param repo: repo name\n :param state: open, closed, all\n :param base: optional branch the PRs are open for\n \"\"\"\n params = {'state': state}\n if base is not None:\n params['base'] = base\n url = f\"{self.API}/repos/{owner}/{repo}/pulls\"\n return self._paginated_json_get(url, params)\n\n def pr_files(self, owner, repo, number):\n \"\"\"\n Get files of one Pull Request\n\n :param owner: GtiHub user or org\n :param repo: repo name\n :param number: PR number/id\n \"\"\"\n url = f\"{self.API}/repos/{owner}/{repo}/pulls/{number}/files\"\n return self._paginated_json_get(url)\n\n def pr_filenames(self, owner, repo, number):\n \"\"\"\n Get filenames of one Pull Request. A generator.\n\n :param owner: GtiHub user or org\n :param repo: repo name\n :param number: PR number/id\n \"\"\"\n return (f['filename'] for f in self.pr_files(owner, repo, number))\n\n def reset_labels(self, owner, repo, number, labels):\n \"\"\"\n Set's labels for Pull Request. Replaces all existing lables.\n\n :param owner: GtiHub user or org\n :param repo: repo name\n :param lables: all lables this PR will have\n \"\"\"\n url = f\"{self.API}/repos/{owner}/{repo}/issues/{number}\"\n r = self.session.patch(url, json={'labels': labels})\n r.raise_for_status()\n return r.json()['labels']\n\n\nclass AsyncGitHub(GitHub):\n __doc__ = \"\\n This is a subclass of :class: `.GitHub`.\\n It overwrites it's behaviour to support asynchronnous communication.\\n \"\n API = 'https://api.github.com'\n\n def __init__(self, token):\n super().__init__(token)\n self.create_session()\n\n def create_session(self):\n \"\"\"\n Create asynchronnous session: `aiohttp.ClientSession`\n \"\"\"\n self.session = aiohttp.ClientSession(skip_auto_headers=[\n 'User-Agent'],\n headers={'User-Agent':'filabel', \n 'Authorization':'token ' + self.token})\n\n async def user(self):\n \"\"\"\n Get current user authenticated by token\n \"\"\"\n return self._paginated_json_get(f\"{self.API}/user\")\n\n async def _paginated_json_get(self, url, params=None):\n async with self.session.get(url, params=params) as r:\n json = await r.json()\n r.raise_for_status()\n if 'next' in r.links:\n if 'url' in r.links['next']:\n loop = asyncio.get_event_loop()\n t = loop.create_task(self._paginated_json_get(r.links['next']['url'], params))\n await t\n json += t.result()\n return json\n\n async def pr_files(self, owner, repo, number):\n \"\"\"\n Get files of one Pull Request\n\n :param owner: GtiHub user or org\n :param repo: repo name\n :param number: PR number/id\n \"\"\"\n url = f\"{self.API}/repos/{owner}/{repo}/pulls/{number}/files\"\n loop = asyncio.get_event_loop()\n t = loop.create_task(self._paginated_json_get(url))\n await t\n return t.result()\n\n async def pr_filenames(self, owner, repo, number):\n \"\"\"\n Get filenames of one Pull Request. A generator.\n\n :param owner: GtiHub user or org\n :param repo: repo name\n :param number: PR number/id\n \"\"\"\n loop = asyncio.get_event_loop()\n t = loop.create_task(self.pr_files(owner, repo, number))\n await t\n return (f['filename'] for f in t.result())\n\n async def reset_labels(self, owner, repo, number, labels):\n \"\"\"\n Set's labels for Pull Request. Replaces all existing lables.\n\n :param owner: GtiHub user or org\n :param repo: repo name\n :param lables: all lables this PR will have\n \"\"\"\n url = f\"{self.API}/repos/{owner}/{repo}/issues/{number}\"\n async with self.session.patch(url, json={'labels': labels}) as r:\n json = await r.json()\n r.raise_for_status()\n return json['labels']\n\n\nclass Change(enum.Enum):\n __doc__ = '\\n Enumeration of possible label changes\\n '\n ADD = 1\n DELETE = 2\n NONE = 3\n\n\nclass Report:\n __doc__ = '\\n Simple container for reporting repo-pr label changes\\n '\n\n def __init__(self, repo):\n self.repo = repo\n self.ok = True\n self.prs = {}\n\n\nclass Filabel:\n __doc__ = '\\n Main login of PR labeler\\n '\n\n def __init__(self, token, labels, state='open', base=None, delete_old=True):\n \"\"\"\n :param token: GitHub token\n :param labels: Configuration of labels with globs\n :param state: State of PR to be (re)labeled\n :param base: Base branch of PRs to be (re)labeled\n :param delete_old: If no longer matching labels should be deleted\n \"\"\"\n self.github = GitHub(token)\n self.labels = labels\n self.state = state\n self.base = base\n self.delete_old = delete_old\n\n @property\n def defined_labels(self):\n \"\"\"\n Set of labels defined in configuration\n \"\"\"\n return set(self.labels.keys())\n\n def _matching_labels(self, pr_filenames):\n \"\"\"\n Find matching labels based on given filenames\n\n :param pr_filenames: list of filenames as strings\n \"\"\"\n labels = set()\n for filename in pr_filenames:\n for label, patterns in self.labels.items():\n for pattern in patterns:\n if fnmatch.fnmatch(filename, pattern):\n labels.add(label)\n break\n\n return labels\n\n def _compute_labels(self, defined, matching, existing):\n \"\"\"\n Compute added, remained, deleted, and future label sets\n\n :param defined: Set of defined labels in config\n :param matching: Set of matching labels that should be in PR\n :param existing: Set of labels that are currently in PR\n \"\"\"\n added = matching - existing\n remained = matching & existing\n deleted = set()\n future = existing\n if self.delete_old:\n deleted = (existing & defined) - matching\n future = existing - defined\n future = future | matching\n return (added, remained, deleted, future)\n\n def run_pr(self, owner, repo, pr_dict):\n \"\"\"\n Manage labels for single given PR\n\n :param owner: Owner of GitHub repository\n :param repo: Name of GitHub repository\n :param pr_dict: PR as dict from GitHub API\n \"\"\"\n pr_filenames = list(self.github.pr_filenames(owner, repo, pr_dict['number']))\n added, remained, deleted, future = self._compute_labels(self.defined_labels, self._matching_labels(pr_filenames), set(l['name'] for l in pr_dict['labels']))\n new_labels = self.github.reset_labels(owner, repo, pr_dict['number'], list(future))\n new_label_names = set(l['name'] for l in new_labels)\n if future == new_label_names:\n return sorted(itertools.chain([(a, Change.ADD) for a in added], [(r, Change.NONE) for r in remained], [(d, Change.DELETE) for d in deleted]))\n\n def run_repo(self, reposlug):\n \"\"\"\n Manage labels for all matching PRs in given repo\n\n :param reposlug: Reposlug (full name) of GitHub repo (i.e. \"owner/name\")\n \"\"\"\n report = Report(reposlug)\n owner, repo = reposlug.split('/')\n try:\n prs = self.github.pull_requests(owner, repo, self.state, self.base)\n except Exception:\n report.ok = False\n return report\n else:\n for pr_dict in prs:\n url = pr_dict.get('html_url', 'unknown')\n report.prs[url] = None\n try:\n report.prs[url] = self.run_pr(owner, repo, pr_dict)\n except Exception:\n pass\n\n return report\n\n\nclass AsyncFilabel(Filabel):\n __doc__ = '\\n This is a subclass of class: `.Filabel`.\\n It overwrites several methods to support asynchronnous communication.\\n '\n\n def __init__(self, token, labels, state='open', base=None, delete_old=True):\n super().__init__(token, labels, state, base, delete_old)\n self.github = AsyncGitHub(token)\n self.token = token\n\n async def run_pr(self, owner, repo, pr_dict):\n \"\"\"\n Manage labels for single given PR asynchronnously\n\n :param owner: Owner of GitHub repository\n :param repo: Name of GitHub repository\n :param pr_dict: PR as dict from GitHub API\n \"\"\"\n loop = asyncio.get_event_loop()\n t = loop.create_task(self.github.pr_filenames(owner, repo, pr_dict['number']))\n await t\n pr_filenames = list(t.result())\n added, remained, deleted, future = self._compute_labels(self.defined_labels, self._matching_labels(pr_filenames), set(l['name'] for l in pr_dict['labels']))\n t = loop.create_task(self.github.reset_labels(owner, repo, pr_dict['number'], list(future)))\n await t\n new_labels = t.result()\n new_label_names = set(l['name'] for l in new_labels)\n if future == new_label_names:\n return sorted(itertools.chain([(a, Change.ADD) for a in added], [(r, Change.NONE) for r in remained], [(d, Change.DELETE) for d in deleted]))\n\n async def run_repo(self, reposlug):\n \"\"\"\n Manage labels for all matching PRs in given repo asynchronnously\n\n :param reposlug: Reposlug (full name) of GitHub repo (i.e. \"owner/name\")\n :type reposlug: str\n \"\"\"\n report = Report(reposlug)\n owner, repo = reposlug.split('/')\n loop = asyncio.get_event_loop()\n try:\n task = loop.create_task(self.github.pull_requests(owner, repo, self.state, self.base))\n await task\n prs = task.result()\n except Exception:\n report.ok = False\n return report\n else:\n tasks = {}\n for pr_dict in prs:\n url = pr_dict.get('html_url', 'unknown')\n report.prs[url] = None\n cr = asyncio.create_task(self.run_pr(owner, repo, pr_dict))\n tasks[url] = cr\n\n for url, task in tasks.items():\n try:\n await task\n report.prs[url] = task.result()\n except Exception:\n pass\n\n return report","sub_path":"pycfiles/filabel_marekreimer-0.6.1.linux-x86_64.tar/logic.cpython-36.py","file_name":"logic.cpython-36.py","file_ext":"py","file_size_in_byte":12084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"382988973","text":"# Python3 implementation to find the\n# Leftmost Column with atleast a\n# 1 in a sorted binary matrix\nimport sys\nN = 3\n \n# Function to search for the\n# leftmost column of the matrix\n# with atleast a 1 in sorted\n# binary matrix\ndef search(mat, n, m):\n \n a = sys.maxsize\n \n # Loop to iterate over all the\n # rows of the matrix\n for i in range (n):\n low = 0\n high = m - 1\n ans = sys.maxsize\n \n # Binary Search to find the\n # leftmost occurence of the 1\n while (low <= high):\n mid = (low + high) // 2\n \n # Condition if the column\n # contains the 1 at this\n # position of matrix\n if (mat[i][mid] == 1):\n \n if (mid == 0):\n ans = 0\n break\n \n elif (mat[i][mid - 1] == 0):\n ans = mid\n break\n \n if (mat[i][mid] == 1):\n high = mid - 1\n else:\n low = mid + 1\n \n # If there is a better solution\n # then update the answer\n if (ans < a):\n a = ans\n \n # Condition if the solution\n # doesn't exist in the matrix\n if (a == sys.maxsize):\n return -1\n return a + 1\n \n# Driver Code\nif __name__ == \"__main__\":\n \n mat = [[0, 0, 0],\n [1, 0, 1],\n [0, 1, 1]]\n print(search(mat, 3, 3))\n \n","sub_path":"leetCode/FB_Leftmost_column_with_atleast_one_in_row.py","file_name":"FB_Leftmost_column_with_atleast_one_in_row.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"397353140","text":"import os\nimport re\nimport datetime\nimport hashlib\n\nfrom zipfile import ZipFile\nfrom django.core.cache import cache\nfrom django.core.files.base import ContentFile\nfrom django.conf import settings\n\nfrom dateutil.relativedelta import relativedelta\nfrom voting.models import Vote\nfrom .models import Template\nfrom .storage import TemplateStorage\n\n\ndef import_popcorn_templates(popcorn_path, prefix):\n \"\"\"Import the templates from the path provided with the following conventions:\n - The folder name will be the slug and named used for the template\n - The folders must contain a ``.cfg`` file and an ``.html`` file.\n \"\"\"\n candidates = [n for n in os.listdir(popcorn_path) if os.path.isdir(os.path.join(popcorn_path, n)) ]\n for candidate in candidates:\n data = {'slug': candidate}\n candidate_path = os.path.join(popcorn_path, candidate)\n for item in os.listdir(candidate_path):\n # TODO: get template data and import assets\n assert False, (prefix, candidate, item)\n try:\n # Already imported\n Template.objects.get(slug=candidate)\n continue\n except Template.DoesNotExist:\n pass\n Template.objects.create(**data)\n return\n\n\ndef update_views_count(item):\n \"\"\"Updates the visitor count on a given ``object`` updates the count\n in the object after 10 minutes\n The object must have\n - ``views_count`` field\n \"\"\"\n key = 'views%s' % (hashlib.md5('%s%s' % (item.id, type(item)))\n .hexdigest())\n if cache.get(key):\n views_count = cache.get(key) + 1\n else:\n views_count = item.views_count + 1\n cache.set(key, views_count)\n cache_expiration = item.modified + relativedelta(minutes=settings.CACHE_OBJECT_METADATA)\n if datetime.datetime.utcnow() > cache_expiration:\n item.views_count = views_count\n item.save()\n return views_count\n\n\ndef get_order_fields(request_get, **kwargs):\n \"\"\"Determines the ordering of the fields by inspecting the\n ``order`` passed in the request GET\"\"\"\n available_order = {\n 'views': ['-views_count', '-created'],\n 'created': ['-created'],\n 'votes': ['-votes_count', '-created'],\n 'default': ['-is_featured', '-created'],\n }\n if kwargs:\n available_order.update(kwargs)\n order = request_get.get('order')\n if order and order in available_order:\n return available_order[order]\n return available_order['default']\n\n\ndef update_vote_score(item):\n \"\"\"Caches the ``vote_score`` for ordering\"\"\"\n votes = Vote.objects.get_score(item)\n cache_expiration = item.modified + relativedelta(minutes=settings.CACHE_OBJECT_METADATA)\n if datetime.datetime.utcnow() > cache_expiration \\\n and votes['score'] > item.votes_count:\n item.votes_count = votes['score']\n item.save()\n return votes\n\n\ndef import_zipped_template(zipped_template, base_path, storage_class=TemplateStorage):\n \"\"\"Uncompress a zipped file and imports it using the given ``Storage``\"\"\"\n valid_extensions = \"|\".join(['html', 'jpg', 'png', 'css', 'js', 'json',\n 'gif'])\n regex = '([-\\w]+\\.(?:%s))' % valid_extensions\n pattern = re.compile(regex)\n template_files = ZipFile(zipped_template)\n saved_files = []\n for file_path in template_files.namelist():\n file_bits = file_path.split('/')\n file_name = file_bits[-1]\n if pattern.match(file_name):\n short_filename = '/'.join(file_bits[1:])\n storage_filename = '%s%s' % (base_path, short_filename)\n storage = storage_class()\n content_file = ContentFile(template_files.read(file_path))\n saved_files.append(storage.save(storage_filename, content_file))\n return saved_files\n","sub_path":"popcorn_gallery/popcorn/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"442855945","text":"from cell import Cell\r\nfrom pygame import draw\r\n\r\nclass Grid:\r\n def __init__(self, rows, cols, width, height):\r\n self.rows = rows\r\n self.cols = cols\r\n self.cells = [[Cell(width//rows, height//cols, i, j) for j in range(cols)] for i in range(rows)]\r\n self.width = width\r\n self.height = height\r\n\r\n def __repr__(self):\r\n return f''\r\n\r\n def draw(self, surface):\r\n for row in self.cells:\r\n for cell in row:\r\n cell.draw(surface)\r\n\r\n gap = self.width // self.rows\r\n for i in range(max(self.rows, self.cols)):\r\n draw.line(surface, (0, 0, 0), (0, i*gap), (self.width, i*gap))\r\n draw.line(surface, (0, 0, 0), (i*gap, 0), (i*gap, self.height))\r\n\r\n def cell_clicked(self, click):\r\n for row in self.cells:\r\n for cell in row:\r\n if cell.selected(click):\r\n cell.color = (255, 0, 0)\r\n\r\n def clear(self):\r\n for row in self.cells:\r\n for cell in row:\r\n cell.color = (255, 255, 255)\r\n\r\n\r\n\r\n\r\n","sub_path":"grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"550940188","text":"import numpy as np\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport natsort\nimport cv2 as cv\n\n\ndef decode_segmap(temp):\n Imps = [0, 0, 0]\n Building = [100, 100, 100]\n Lowvg = [150, 150, 150]\n Tree = [200, 200, 200]\n Car = [250, 250, 250]\n # bg = [255,0,0]\n\n label_colours = np.array(\n [\n Imps,\n Building,\n Lowvg,\n Tree,\n Car,\n # bg,\n ]\n )\n r = temp.copy()\n g = temp.copy()\n b = temp.copy()\n for l in range(0, 5):\n r[temp == l] = label_colours[l, 0]\n g[temp == l] = label_colours[l, 1]\n b[temp == l] = label_colours[l, 2]\n # rgb = np.zeros((temp.shape[0], temp.shape[1], 3))\n rgb = np.zeros((temp.shape[0], temp.shape[1], 3), dtype=np.uint8)\n # rgb[:, :, 0] = r / 255.0\n # rgb[:, :, 1] = g / 255.0\n # rgb[:, :, 2] = b / 255.0\n rgb[:, :, 0] = r\n rgb[:, :, 1] = g\n rgb[:, :, 2] = b\n return rgb\n\nTensor_Path = Path(\"test_out/deeplabv3/tensors\")\nTensor_File = natsort.natsorted(list(Tensor_Path.glob(\"*.npy\")), alg=natsort.PATH)\nTensor_Str = []\nfor j in Tensor_File:\n Tensor_Str.append(str(j))\n\nth=0.98\nfor k in tqdm(range(len(Tensor_Str))):\n lanes_one_channel=np.load(Tensor_Str[k])\n pred=np.zeros((256,512),dtype=np.uint)\n pred[lanes_one_channel>th]=4\n decoded = decode_segmap(pred)\n out_path=\"test_out/deeplabv3/prcurve/098/\"+Path(Tensor_Str[k]).stem+\".bmp\"\n decoded_bgr = cv.cvtColor(decoded, cv.COLOR_RGB2BGR)\n # misc.imsave(out_path, decoded)\n cv.imwrite(out_path, decoded_bgr)\n # print(\"kk\")\n\n\n\n","sub_path":"prcurve.py","file_name":"prcurve.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"140311165","text":"# Objects used in the VFA algorithm (Gene, SNP, SNP_pair) are defined here\n\n####################################################################################################################\n### Define Gene Object and annotation function\n####################################################################################################################\n\ndef annotate_Gene(self, name, database_cursor):\n \"\"\"Used to load annotaiton information for a gene\"\"\"\n database_cursor.execute(\"use hg19\")\n SQL_string = \"SELECT gd_app_name, gd_pub_ensembl_id FROM HUGO WHERE gd_app_sym=\" + \"'\" + name + \"'\"\n database_cursor.execute(SQL_string)\n qr = database_cursor.fetchall()\n if qr == ():\n self.name = name\n self.definition = \"Not Found in HUGO\"\n else:\n self.name = name\n self.definition = qr[0][0]\n self.ensembl = qr[0][1]\n \n # Get Gene location from UCSC known genes\n SQL_string = \"select chrom, txStart, txEnd from knownGene where name=(select kgID from kgXref where geneSymbol = '\" + name + \"' limit 1)\"\n database_cursor.execute(SQL_string)\n qr = database_cursor.fetchall()\n if qr == ():\n self.chrom = \"Not Found\"\n self.txStart = \"Not Found\"\n self.txEnd = \"Not Found\"\n else:\n self.chrom = qr[0][0]\n self.txStart = qr[0][1]\n self.txEnd = qr[0][2]\n \n return self\n\n # Need to get precise gene location from another database...\n\n\nclass Gene():\n \"\"\"The Gene class contains all attributes of a given Gene\"\"\"\n def __init__(self, name='Not Loaded', pval='Not Loaded', eQTL='Not Loaded', GWAS='Not Loaded', definition='Not Loaded', score='Not Loaded', accession='Not Loaded', ensembl='Not Loaded', UCSCknown='Not Loaded', chrom='Not Loaded', txStart='Not Loaded', txEnd='Not Loaded', strand='Not Loaded', url='Not Loaded', SNP_pairs='Not Loaded', GWAS_catalog='Not Loaded', num_SNPs='Not Loaded', num_blocks='Not Loaded'):\n self.name = name\n self.pval = pval\n self.eQTL = eQTL\n self.GWAS = GWAS\n self.definition = definition\n self.score = score\n self.accession = accession\n self.ensembl = ensembl\n self.UCSCknown = UCSCknown\n self.chrom = chrom\n self.txStart = txStart\n self.txEnd = txEnd\n self.strand = strand\n self.url = url\n self.SNP_pairs = SNP_pairs\n self.GWAS_catalog = GWAS_catalog\n self.num_SNPs = num_SNPs\n self.num_blocks = num_blocks\n \n \n # Define a method for printing Gene objects\n def __str__(self):\n prt_str = '\\n'\n prt_str = prt_str + '{:<}'.format(\"Gene: \" + self.name + '\\n')\n if self.definition == 'Not Loaded':\n prt_str = prt_str + '{:<}'.format('\\t' + \"Gene definition not found in HUGO\" + '\\n')\n else:\n prt_str = prt_str + '{:<}'.format('\\t' + self.definition + '\\n')\n prt_str = prt_str + '{:<}'.format('\\t' + \"Has \" + str(self.num_SNPs) + \" SNPs compressed to \" + str(self.num_blocks) + \" blocks in eQTL \" + self.eQTL + '\\n')\n if self.chrom == 'pseudo':\n # This is a pseudo gene, lacking a defined location\n prt_str = prt_str + '{:<}'.format('\\t' + \"Location: \" + \"Pseudo Gene\" + '\\n')\n else:\n prt_str = prt_str + '{:<}'.format('\\t' + \"Location: \" + str(self.chrom) + \":\" + str(self.txStart) + \"-\" + str(self.txEnd) + '\\n')\n prt_str = prt_str + '{:<}'.format('\\t' + \"Accession: \" + self.accession + '\\n')\n prt_str = prt_str + '{:<}'.format('\\t' + \"Ensembl: \" + self.ensembl + '\\n')\n prt_str = prt_str + '{:<}'.format('\\t' + \"UCSCknown: \" + self.UCSCknown + '\\n')\n \n # Print GWAS catalog entries\n if self.GWAS_catalog == 'Not Loaded':\n prt_str = prt_str + '{:<}'.format('\\t' + \"This gene has no GWAS catalog entries:\" + '\\n')\n else:\n prt_str = prt_str + '{:<}'.format('\\t' + \"This gene has \" + str(len(self.GWAS_catalog)) + \" GWAS catalog entries:\" + '\\n')\n for entry in self.GWAS_catalog:\n prt_str = prt_str + '{:<}'.format('\\t\\t' + entry[0] + '\\n')\n \n # Separate gene and GWAS alignment sections\n prt_str = prt_str + '\\n'\n\n prt_str = prt_str + '{:<}'.format('\\t' + \"GWAS: \" + self.GWAS + '\\n')\n if self.score != 'Not Loaded':\n prt_str = prt_str + '{:<}'.format('\\t' + \"Has score: \" + str(self.score) + '\\n')\n else:\n prt_str = prt_str + '\\t' + \"Score: \" + str(self.score) + '\\n'\n\n # Print the p-value for this alignment\n if self.pval != 'Not Loaded':\n prt_str = prt_str + '\\t' + \"With score p-value: \" + '{:02.2e}'.format(float(self.pval)) + '\\n'\n else:\n prt_str = prt_str + '\\t' + \"Score p-value: \" + str(self.pval) + '\\n'\n\n # Separate alignment and SNP pairs section\n prt_str = prt_str + '\\n'\n\n # Print out the SNP pairs for each block below\n if self.SNP_pairs != 'Not Loaded':\n for pair in self.SNP_pairs:\n prt_str = prt_str + str(pair) + '\\n'\n else:\n prt_str = prt_str + '\\t' + \"SNP Pair Blocks: None Loaded\" + '\\n'\n return prt_str\n\n def ucsc_url(self):\n # Create a URL to this location in the UCSC Genome Browser\n self.url = 'http://genome.ucsc.edu/cgi-bin/hgTracks?db=' + self.assembly + '&position=' + self.chrom + ':' + str(self.txStart) + '-' + str(self.txEnd)\n\n\n\n####################################################################################################################\n### Define SNP Objects and annotation function\n####################################################################################################################\n\n\ndef annotate_SNP(self, name, database_cursor):\n \"\"\"Used to load annotaiton information for a SNP\"\"\"\n database_cursor.execute(\"use hg19\")\n SQL_string = \"SELECT chrom, chromEnd, func, alleleFreqs FROM snp135 WHERE name =\" + \"'\" + name + \"'\"\n database_cursor.execute(SQL_string)\n qr = database_cursor.fetchall()\n if qr == ():\n self.rsID = name\n self.chrom = \"missing\"\n self.location = \"missing\"\n self.function = \"missing\"\n self.alleles = \"missing\"\n else:\n self.rsID = name\n self.chrom = qr[0][0]\n self.location = qr[0][1]\n self.function = qr[0][2]\n self.alleles = qr[0][3]\n return self\n\n\nclass SNP():\n def __init__(self, rsID='Not Loaded', chrom='Not Loaded', location='Not Loaded', strand='Not Loaded', alleles='Not Loaded', function='Not Loaded', description='Not Loaded', pval='Not Loaded'):\n self.rsID = rsID\n self.chrom = chrom\n self.location = location\n self.strand = strand\n self.alleles = alleles\n self.function = function\n self.description = description\n self.pval = pval\n \n #Define a method for printing each SNP\n def __str__(self):\n prt_str = ''\n prt_str = str(self.rsID) + \"\\t\" + str(self.chrom) + \"\\t\" + str(self.location) + \"\\t\" + str(self.alleles) + \"\\t\" + str(self.function)\n if self.pval != 'Not Loaded':\n prt_str = prt_str + \"\\t\" + str(self.pval)\n return prt_str\n\n\n\n####################################################################################################################\n### Define SNP_pair Object\n####################################################################################################################\n\n# This class holds a pair of aligned SNPs: one from GWAS, and one from eQTL\n\nclass SNP_pair:\n def __init__(self, block_number=\"Not Loaded\", chrom=\"Not Loaded\", eQTL_SNP='Not Loaded', eQTL_SNP_location='Not Loaded', GWAS_SNP='Not Loaded', GWAS_SNP_location='Not Loaded', separation=\"Not Loaded\", LD_rsq='Not Loaded', eQTL_SNP_pval='Not Loaded', GWAS_SNP_pval=\"Not Loaded\"):\n self.block_number = block_number \n self.chrom = chrom\n self.eQTL_SNP = eQTL_SNP\n self.eQTL_SNP_location = eQTL_SNP_location\n self.GWAS_SNP = GWAS_SNP\n self.GWAS_SNP_location = GWAS_SNP_location\n self.separation = separation\n self.LD_rsq = LD_rsq\n self.eQTL_SNP_pval = eQTL_SNP_pval\n self.GWAS_SNP_pval = GWAS_SNP_pval\n \n \n def map(self):\n \"\"\"Find location information for SNPs in the pair.\"\"\"\n\n try:\n separation = abs(self.GWAS_SNP_location - self.eQTL_SNP_location)\n except(ValueError, TypeError):\n separation = 'NA'\n else:\n separation = abs(self.GWAS_SNP_location - self.eQTL_SNP_location)\n\n return separation\n \n #Define a method for printing each SNP pair\n def __str__(self):\n self.map()\n prt_str = ''\n prt_str = prt_str + '\\t'\n prt_str = prt_str + '{:<12}'.format('Blk: ' + str(self.block_number))\n prt_str = prt_str + '{:<8}'.format(self.chrom)\n prt_str = prt_str + '{:<12}'.format(self.eQTL_SNP)\n prt_str = prt_str + '{:<12}'.format(self.eQTL_SNP_location)\n prt_str = prt_str + '{:<12}'.format(self.GWAS_SNP)\n prt_str = prt_str + '{:<12}'.format(self.GWAS_SNP_location)\n prt_str = prt_str + '{:<10}'.format(self.separation)\n if self.LD_rsq != 'NA' and self.LD_rsq != 'Not Loaded':\n prt_str = prt_str + '{:02.2f}'.format(float(self.LD_rsq)) + ' ' \n else:\n prt_str = prt_str + '{:<5}'.format(self.LD_rsq) + ' '\n prt_str = prt_str + '{:02.2e}'.format(float(self.eQTL_SNP_pval)) + ' '\n if self.GWAS_SNP_pval != 'NA' and self.GWAS_SNP_pval != 'Not Loaded':\n prt_str = prt_str + '{:02.2e}'.format(float(self.GWAS_SNP_pval))\n else:\n prt_str = prt_str + '{:<8}'.format(self.GWAS_SNP_pval)\n return prt_str\n\n\n","sub_path":"VFA/vfa_v8/source/vfa_v8_object_defs.py","file_name":"vfa_v8_object_defs.py","file_ext":"py","file_size_in_byte":9826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"97023317","text":"import os\nimport random\nimport re\nimport sys\nimport copy\nimport numpy as np\n\nDAMPING = 0.85\nSAMPLES = 10000\n\n\ndef main():\n if len(sys.argv) != 2:\n sys.exit(\"Usage: python pagerank.py corpus\")\n corpus = crawl(sys.argv[1])\n ranks = sample_pagerank(corpus, DAMPING, SAMPLES)\n print(f\"PageRank Results from Sampling (n = {SAMPLES})\")\n for page in sorted(ranks):\n print(f\" {page}: {ranks[page]:.4f}\")\n ranks = iterate_pagerank(corpus, DAMPING)\n print(f\"PageRank Results from Iteration\")\n for page in sorted(ranks):\n print(f\" {page}: {ranks[page]:.4f}\")\n\n\ndef crawl(directory):\n \"\"\"\n Parse a directory of HTML pages and check for links to other pages.\n Return a dictionary where each key is a page, and values are\n a list of all other pages in the corpus that are linked to by the page.\n \"\"\"\n pages = dict()\n\n # Extract all links from HTML files\n for filename in os.listdir(directory):\n if not filename.endswith(\".html\"):\n continue\n with open(os.path.join(directory, filename)) as f:\n contents = f.read()\n links = re.findall(r\"]*?)href=\\\"([^\\\"]*)\\\"\", contents)\n pages[filename] = set(links) - {filename}\n\n # Only include links to other pages in the corpus\n for filename in pages:\n pages[filename] = set(\n link for link in pages[filename]\n if link in pages\n )\n\n return pages\n\n\ndef transition_model(corpus, page, damping_factor):\n \"\"\"\n Return a probability distribution over which page to visit next,\n given a current page.\n\n With probability `damping_factor`, choose a link at random\n linked to by `page`. With probability `1 - damping_factor`, choose\n a link at random chosen from all pages in the corpus.\n \"\"\"\n holder = {}\n total_count = 0\n \n #duplicate links on the same page are treated as a single link,\n # and links from a page to itself are ignored as well)\n\n # find page in corpus\n for each_link in corpus[page]:\n if each_link not in holder and each_link is not page:\n holder[each_link] = 1 #duplicate links on the same page are treated as a single link,\n total_count+=1\n\n answer = {}\n if total_count == 0:\n damping_factor = 0\n else:\n page_prob = (damping_factor)/total_count\n \n for each_page in corpus:\n if each_page in holder:\n answer[each_page] = page_prob*holder[each_page] + (1-damping_factor)/len(corpus.keys())\n else:\n answer[each_page] = (1-damping_factor)/len(corpus.keys())\n return answer\n\n\ndef sample_pagerank(corpus, damping_factor, n):\n \"\"\"\n Return PageRank values for each page by sampling `n` pages\n according to transition model, starting with a page at random.\n\n Return a dictionary where keys are page names, and values are\n their estimated PageRank value (a value between 0 and 1). All\n PageRank values should sum to 1.\n \"\"\"\n #initialise tracking dict\n holder = {}\n for each_page in corpus:\n holder[each_page] = 0\n \n # for first iteration\n init_page = random.choice(list(corpus.keys())) \n \n for i in range(n):\n probabilities = transition_model(corpus,init_page,damping_factor)\n next_page = calculate_page(probabilities)\n\n holder[next_page] +=1/n\n\n init_page = next_page\n return holder\n \ndef calculate_page(trans_prob):\n \n random_prob = random.uniform(0,1)\n lower_limit = 0\n upper_limit = 0\n for link,prob in trans_prob.items():\n upper_limit += prob\n if random_prob >= lower_limit and random_prob < upper_limit:\n return link\n else:\n lower_limit += prob\n\n\n\ndef iterate_pagerank(corpus, damping_factor):\n \"\"\"\n Return PageRank values for each page by iteratively updating\n PageRank values until convergence.\n\n Return a dictionary where keys are page names, and values are\n their estimated PageRank value (a value between 0 and 1). All\n PageRank values should sum to 1.\n \"\"\"\n tolerance = 0.001\n answer = {}\n \n N = len(corpus.keys())\n # get initial PR values\n\n \n incoming_corpus = {}\n \n # get incoming corpus\n for link, link_to_others in corpus.items():\n incoming_corpus[link] = set()\n \n for link, link_to_others in corpus.items():\n for each_link in link_to_others:\n if each_link not in incoming_corpus:\n incoming_corpus[each_link] = {link}\n else:\n incoming_corpus[each_link].add(link)\n \n for link in corpus:\n answer[link] = 1/N\n \n \n numlinks = {}\n # get numlinks, remove duplicates\n for link, link_to_others in corpus.items():\n templist = []\n for i in link_to_others:\n if i in templist:\n continue\n else:\n templist.append(i)\n numlinks[link] = len(templist) \n # new PR values\n flag = True\n\n while flag:\n new_answer = {}\n for link, prob in answer.items():\n # get second term \n second_term = 0\n new_prob = 0\n for link2, prob2 in answer.items(): \n # iterating through incoming_corpus for link, if iterating (link2 = i) is in incoming_corpus link, sum it\n if link2 in incoming_corpus[link]:\n second_term += damping_factor * prob2/numlinks[link2]\n new_prob = (1-damping_factor)/N + second_term\n new_answer[link] = new_prob\n new_answer = normalize(new_answer)\n if compare_answers(answer, new_answer, tolerance):\n flag = False\n else:\n flag = True\n answer = copy.copy(new_answer)\n return answer\n \ndef normalize(answer):\n sum_of_answer_prob = sum(answer.values())\n return {k:(v/sum_of_answer_prob) for (k,v) in answer.items()}\n\n#Returns true when converged\ndef compare_answers(old, new, tolerance):\n\n for link in old:\n if abs(old[link] - new[link]) > tolerance:\n return False\n break\n return True\n\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"pagerank/pagerank/pagerank.py","file_name":"pagerank.py","file_ext":"py","file_size_in_byte":6217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"442762593","text":"#\n# @lc app=leetcode id=322 lang=python3\n#\n# [322] Coin Change\n#\n# https://leetcode.com/problems/coin-change/description/\n#\n# algorithms\n# Medium (29.98%)\n# Likes: 2135\n# Dislikes: 85\n# Total Accepted: 233.5K\n# Total Submissions: 738.5K\n# Testcase Example: '[1,2,5]\\n11'\n#\n# You are given coins of different denominations and a total amount of money\n# amount. Write a function to compute the fewest number of coins that you need\n# to make up that amount. If that amount of money cannot be made up by any\n# combination of the coins, return -1.\n# \n# Example 1:\n# \n# \n# Input: coins = [1, 2, 5], amount = 11\n# Output: 3 \n# Explanation: 11 = 5 + 5 + 1\n# \n# Example 2:\n# \n# \n# Input: coins = [2], amount = 3\n# Output: -1\n# \n# \n# Note:\n# You may assume that you have an infinite number of each kind of coin.\n# \n#\nclass Solution:\n # DP - top-down\n def coinChange(self, coins: List[int], amount: int) -> int:\n if amount < 1:\n return 0\n return self.helper(coins, amount, {})\n \n def helper(self, coins, remain, count):\n if remain < 0: \n return -1\n if remain == 0:\n return 0\n if count.get(remain, 0) != 0:\n return count[remain]\n minValue = float(\"inf\")\n for coin in coins:\n res = 1 + self.helper(coins, remain - coin, count)\n if res > 0 and res < minValue:\n minValue = res\n count[remain] = -1 if minValue == float(\"inf\") else minValue\n return count[remain]\n \n # DP - botton-up\n def coinChange(self, coins: List[int], amount: int) -> int:\n dp = [amount + 1] * (amount + 1)\n dp[0] = 0\n for i in range(1, amount + 1):\n for j in range(len(coins)):\n if coins[j] <= i:\n dp[i] = min(dp[i], dp[i - coins[j]] + 1)\n return -1 if dp[amount] > amount else dp[amount]\n\n\n\n","sub_path":"322.coin-change.py","file_name":"322.coin-change.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"389294604","text":"from data import build_dict, get_data\n\n\ndef find_author(token: str, authors: any = None):\n \"\"\"\n Function to get data from spotify api\n \"\"\"\n if isinstance(authors, list):\n return build_dict(token, authors=authors)\n\n else:\n return build_dict(token, author=authors)\n\n\ndef find_track(token: str, tracks: list = None):\n songs = {}\n for track in tracks:\n response = get_data(\n url=f\"https://api.spotify.com/v1/tracks/{track}\",\n headers={\"Authorization\": f\"Bearer {token}\"},\n )\n\n if response[\"name\"] not in songs:\n songs[response[\"name\"]] = {}\n\n songs[response[\"name\"]][\"artists\"] = \", \".join(\n [artist[\"name\"] for artist in response[\"artists\"]]\n )\n songs[response[\"name\"]][\"album_type\"] = response[\"album\"][\"album_type\"]\n songs[response[\"name\"]][\"release_date\"] = response[\"album\"][\"release_date\"]\n songs[response[\"name\"]][\"song_art\"] = response[\"album\"][\"images\"][0][\"url\"]\n songs[response[\"name\"]][\"duration\"] = int(response[\"duration_ms\"]) / 1000\n songs[response[\"name\"]][\"preview_url\"] = response[\"preview_url\"]\n\n return songs\n\n\ndef find_new_music(token: str):\n songs = {}\n response = get_data(\n url=f\"https://api.spotify.com/v1/playlists/37i9dQZF1DX4JAvHpjipBk\",\n headers={\"Authorization\": f\"Bearer {token}\"},\n )\n\n for i in range(0, len(response[\"tracks\"][\"items\"])):\n songs[i] = {\n \"name\": response[\"tracks\"][\"items\"][i][\"track\"][\"name\"],\n \"artists\": \", \".join(\n [\n response[\"tracks\"][\"items\"][i][\"track\"][\"artists\"][j][\"name\"]\n for j in range(\n 0, len(response[\"tracks\"][\"items\"][i][\"track\"][\"artists\"])\n )\n ]\n ),\n \"image\": response[\"tracks\"][\"items\"][i][\"track\"][\"album\"][\"images\"][0][\n \"url\"\n ],\n \"preview_url\": response[\"tracks\"][\"items\"][i][\"track\"][\"preview_url\"],\n }\n\n return songs\n\n\ndef query_search(token: str, search: str, search_type: str):\n\n response = get_data(\n url=f\"https://api.spotify.com/v1/{'tracks' if search_type == 'track' else 'artists'}/{search}\",\n headers={\"Authorization\": f\"Bearer {token}\"},\n )\n if \"error\" not in response:\n return (\n find_author(token=token, authors=search)\n if search_type == \"artist\"\n else find_track(token=token, tracks=[search])\n )\n\n else:\n response = get_data(\n url=f'https://api.spotify.com/v1/search?q={search.lower().replace(\" \", \"%20\")}&type={search_type}&market=US&limit=25',\n headers={\"Authorization\": f\"Bearer {token}\"},\n )\n if \"error\" not in response:\n return (\n find_author(\n token=token,\n authors=[artist[\"id\"] for artist in response[\"artists\"][\"items\"]],\n )\n if search_type == \"artist\"\n else find_track(\n token=token,\n tracks=[track[\"id\"] for track in response[\"tracks\"][\"items\"]],\n )\n )\n else:\n return {}\n\n\ndef add_checker(token: str, artist: str):\n response = get_data(\n url=f\"https://api.spotify.com/v1/artists/{artist}\",\n headers={\"Authorization\": f\"Bearer {token}\"},\n )\n if \"error\" not in response:\n return True\n else:\n return False\n","sub_path":"find.py","file_name":"find.py","file_ext":"py","file_size_in_byte":3532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"166431063","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom pydispatch import dispatcher\nfrom scrapy import signals\nfrom selenium import webdriver\nfrom scrapy.http import Request\nfrom LaGou.tools import random_ua\nfrom LaGou.tools import common\nfrom LaGou.tools.Chrome_Tools import Chrome_Tool\nfrom LaGou.items import ArticleLoader\nfrom urllib import parse\nimport time\nimport re\nfrom LaGou import items\n\n\nclass LagouspiderSpider(scrapy.Spider):\n name = 'LaGouSpider'\n allowed_domains = ['www.lagou.com/beijing']\n start_urls = ['https://www.lagou.com/beijing/']\n # start_urls = ['https://www.lagou.com/beijing-zhaopin/webqianduan/']\n\n\n def __init__(self):\n # 信号量\n self.chrome = Chrome_Tool()\n super().__init__()\n dispatcher.connect(self.spider_closed, signal=signals.spider_closed)\n self.number = 1\n\n # 重启浏览器后需要重新 获取网页\n # self.chrome.get_page(url)\n def set_chrome(self):\n self.chrome.__del__()\n self.chrome.__init__()\n\n def spider_closed(self):\n self.chrome.__del__()\n\n # 解析首页\n def parse(self, response):\n # for i in range(1, 5):\n # # 交给 chrome处理\n # yield Request(url=self.start_urls[0], meta={'Key': i}, callback=self.parse_detial, encoding='utf8',\n # dont_filter=True)\n selectors_titles = response.xpath('//div[@class=\"mainNavs\"]/div[1]/div[2]/dl')\n for title in selectors_titles:\n TableName = title.xpath('dt/span/text()').get()\n print(\"======== \" + TableName + \" ========\")\n self.logger.info(\"======== \" + TableName + \" ========\")\n for i in title.xpath('dd//a'):\n DirectionType = i.xpath('h3/text()').get()\n url = i.xpath('@href').get()\n self.logger.info(\"%s %s %s\",DirectionType, \":\", url)\n print(DirectionType, \":\", url)\n # 列表页请求\n time.sleep(10)\n yield Request(url=url,\n meta={'TableName': TableName,\n 'DirectionType': DirectionType,\n 'Key':True},\n callback=self.parse_detial,\n encoding='utf8',\n dont_filter=True)\n\n\n\n # 解析列表页\n def parse_detial(self, response):\n print(\"列表页:\", response.url)\n try:\n TableName = response.meta.get('TableName',\"其他\")\n DirectionType = response.meta.get('DirectionType',\"x\")\n self.logger.info(\"✅列表页: %s : %s : %s\",TableName,DirectionType,response.url)\n\n ## 解析网页 通过循环获取职位Url 交给 详情页解析函数解析\n # //ul[@class=\"item_con_list\"]/li[2]\n selectors = response.xpath('//ul[@class=\"item_con_list\"]/li')\n\n for selector in selectors:\n Company_url = selector.xpath('div[1]/div[2]/div/a/@href').get()\n Company = selector.xpath('div[1]/div[2]/div/a/text()').get()\n Company = Company + '('+Company_url+')'\n url = selector.xpath('div[1]/div[1]/div/a/@href').get()\n s = re.match('(^https://.*?\\.html)\\?show=.*',url)\n url = s.group(1)\n time.sleep(30)\n yield Request(url=url,\n meta={\n 'Key':True,\n 'TableName': TableName,\n 'DirectionType': DirectionType,\n 'Company': Company,\n },\n callback=self.parse_detial_message,\n encoding='utf8',\n dont_filter=True)\n except Exception as e:\n self.logger.info(\"❌❌解析列表页[Error]:%s : %s\",response.url, e)\n print(\"❌❌解析列表页[Error]:%s : %s\",response.url, e)\n # 传递点击事件 获取URL\n try:\n i = 0\n while not self.chrome.find(\"//div[@class='item_con_pager']/div/a[@class='page_no pager_next_disabled']\"):\n i = i + 1\n time.sleep(30)\n yield Request(url=parse.urljoin(response.url, str(i)),\n meta={\n 'Key':True,\n 'TableName': TableName,\n 'DirectionType': DirectionType,\n },\n callback=self.parse_detial,\n encoding='utf8',\n dont_filter=True)\n except Exception as e:\n self.logger.info(\"❌❌列表下一页[Error]: %s %s\", response.url, e)\n print(\"❌❌列表下一页[Error]: %s %s\", response.url, e)\n\n # 解析详情页 ??\n def parse_detial_message(self, response):\n print(\"详情页:\", response.url)\n if response.status != 200:\n time.sleep(60)\n self.logger.info(\"❌❌详情页获取失败![error]: %s %s\",response.url, response.status)\n print(\"❌❌详情页获取失败![error]: %s %s\",response.url, response.status)\n yield Request(url=parse.urljoin(response.url, str(i)),\n meta={\n 'Key':True,\n 'TableName': response.meta.get(\"TableName\",\"其他\"),\n 'DirectionType': response.meta.get(\"DirectionType\",'X')\n },\n callback=self.parse_detial,\n encoding='utf8',\n dont_filter=True)\n\n # ID TableName DirectionType Url (方向 大、小)\n # Company (公司) Position(职位) Salary (薪资) Experience (经验)Education (学历)JobType (工作类型)\n # JobDescribe (概述)\n try:\n item_loader =ArticleLoader(item=items.LagouItem(), response=response)\n\n TableName = response.meta.get('TableName', '其他')\n DirectionType = response.meta.get('DirectionType', '不知')\n Company = response.meta.get('Company', '不知')\n\n item_loader.add_value('ID',common.get_md5(TableName+DirectionType+Company+response.url))\n\n item_loader.add_value('TableName', TableName)\n item_loader.add_value('DirectionType', DirectionType)\n item_loader.add_value('Url', response.url)\n\n item_loader.add_value('Company', Company)\n item_loader.add_xpath('Position', '//h1/text()')\n\n Salary = response.xpath('//div[@class=\"position-content-l\"]/dd/h3/span[1]/text()').get()\n\n\n if re.match('(.*k)\\-(.*k)', Salary):\n s = re.match('(.*)\\-(.*)', Salary)\n item_loader.add_value('Salary_min', s.group(1))\n item_loader.add_value('Salary_max', s.group(2))\n else:\n item_loader.add_value('Salary_min', Salary)\n item_loader.add_value('Salary_max', '-')\n\n item_loader.add_xpath('Experience', '//div[@class=\"position-content-l\"]/dd/h3/span[3]/text()') # 经验5-10年 /\n item_loader.add_xpath('Education', '//div[@class=\"position-content-l\"]/dd/h3/span[4]/text()') # 本科及以上 /\n item_loader.add_xpath('JobType', '//div[@class=\"position-content-l\"]/dd/h3/span[5]/text()') # 全职\n # item_loader.add_xpath('JobDescribe', '//*[@id=\"job_detail\"]/dd[2]//text()')\n\n str_ = \"\"\n Describe = response.xpath('//*[@id=\"job_detail\"]/dd[2]//text()')\n for i in Describe:\n str_ += i.get()\n\n item_loader.add_value('JobDescribe', str_)\n\n item = item_loader.load_item()\n print(item)\n yield item\n except Exception as e:\n self.logger.info('⭕️⭕️详情页信息获取失败: %s %s',response.url)\n print('⭕️⭕️详情页信息获取失败: %s %s',response.url)\n\n\n\n","sub_path":"Crawl/LaGou/LaGou/spiders/LaGouSpider.py","file_name":"LaGouSpider.py","file_ext":"py","file_size_in_byte":8178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"621645901","text":"import time\nimport RPi.GPIO as GPIO\nfrom datetime import datetime\n\nGPIO.setmode(GPIO.BCM) #extrai a pinagem da placa que esta sendo usada\nGPIO.setwarnings(False)\nsaidas = (0, 4, 17, 22, 10, 11, 14, 15)\nentradas = (23, 24)\nGPIO.setup(saidas, GPIO.OUT)\nGPIO.setup(entradas, GPIO.IN)\n\nimport pyrebase #importa a biblioteca de compatibilidade com firebase\n\nconfig = {\n \"apiKey\" : \"AlzaSyBQCtSX2w7qwRKvU2eYUbjikVzPROEc8XU\",\n \"authDomain\" : \"autohome-cfe1f.firebaseapp.com\",\n \"databaseURL\" : \"https://autohome-cfe1f.firebaseio.com\",\n \"storageBucket\" : \"autohome-cfe1f.appspot.com\"\n}\n\nfirebase = pyrebase.initialize_app(config)\n\ndb = firebase.database() \n\nGPIO.output(saidas, False)\n\n#bounce time is given in ms and represents the mininum time between two callbacks\nGPIO.add_event_detect(23, GPIO.RISING, bouncetime=5000)\ndef my_callback_rising(self):\n luz = db.child(\"Reg_Boards\").child(\"2www\").child(\"Device\").child(\"Luz\").get()\n print(\"rising\")\n db.child(\"Reg_Boards\").child(\"2www\").child(\"Device\").child(\"Luz\").child(\"status\").set(\"acesa\")\n db.child(\"Reg_Boards\").child(\"2www\").child(\"Log\").child(\"Log_luz\").child(\"data\").set( \"acesa manualmente\")\nGPIO.add_event_callback(23, my_callback_rising)\nGPIO.add_event_detect(24, GPIO.FALLING, bouncetime=5000)\ndef my_callback_falling(self):\n luz = db.child(\"Reg_Boards\").child(\"2www\").child(\"Device\").child(\"Luz\").get()\n print(\"falling\")\n db.child(\"Reg_Boards\").child(\"2www\").child(\"Device\").child(\"Luz\").child(\"status\").set(\"apagada\")\n db.child(\"Reg_Boards\").child(\"2www\").child(\"Log\").child(\"Log_luz\").child(\"data\").set(\"apagada manualmente\")\nGPIO.add_event_callback(24, my_callback_falling)\n\nwhile True:\n cafe = db.child(\"Reg_Boards\").child(\"2www\").child(\"Device\").child(\"Cafeteira\").child(\"queue\").get()\n luz = db.child(\"Reg_Boards\").child(\"2www\").child(\"Device\").child(\"Luz\").get()\n cafe_on = db.child(\"Reg_Boards\").child(\"2www\").child(\"Device\").child(\"Cafeteira\").child(\"Power\").get()\n#status luz\n#Luz\n if luz.val() is not None: \n if luz.val()['status'] == \"apagada\":\n print(\"luz apagada\")\n GPIO.output(15, False)\n elif luz.val()['status'] == \"acesa\":\n print(\"luz acesa\")\n GPIO.output(15, True)\n#Cafeteira \t\n if cafe.val() is not None:\n if cafe_on.val()['status'] == \"on\": \n print(\"cafeteira ligada\") \n GPIO.output(14, True)\n else:\n print(\"Cafeteira Desligada\")\n GPIO.output(14, False)\n if cafe.val()['type'] == \"expresso\" or cafe.val()['type'] == \"Expresso\":\n print(cafe.val()['type'])\n GPIO.output(0,True)\n time.sleep(10)\n db.child(\"Reg_Boards\").child(\"2www\").child(\"Device\").child(\"Cafeteira\").child(\"queue\").child(\"type\").remove()\n GPIO.output(0,False)\n elif cafe.val()['type'] == \"duplo\" or cafe.val()['type'] == \"Duplo\":\n print(cafe.val()['type'])\n GPIO.output(4, True)\n time.sleep(10)\n db.child(\"Reg_Boards\").child(\"2www\").child(\"Device\").child(\"Cafeteira\").child(\"queue\").child(\"type\").remove()\n GPIO.output(4,False)\n elif cafe.val()['type'] == \"cappuccino\" or cafe.val()['type'] == \"Cappuccino\":\n print(cafe.val()['type'])\n GPIO.output(17, True)\n time.sleep(10)\n db.child(\"Reg_Boards\").child(\"2www\").child(\"Device\").child(\"Cafeteira\").child(\"queue\").child(\"type\").remove()\n GPIO.output(17,False)\n elif cafe.val()['type'] == \"latte\" or cafe.val()['type'] == \"Latte\":\n print(cafe.val()['type'])\n GPIO.output(22, True)\n time.sleep(10)\n db.child(\"Reg_Boards\").child(\"2www\").child(\"Device\").child(\"Cafeteira\").child(\"queue\").child(\"type\").remove()\n GPIO.output(22,False)\n elif cafe.val()['type'] == \"americano\" or cafe.val()['type'] == \"Americano\":\n print(cafe.val()['type'])\n GPIO.output(10, True)\n time.sleep(10)\n db.child(\"Reg_Boards\").child(\"2www\").child(\"Device\").child(\"Cafeteira\").child(\"queue\").child(\"type\").remove()\n GPIO.output(10,False)\n elif cafe.val()['type'] == \"mooca\" or cafe.val()['type'] == \"Mooca\":\n print(cafe.val()['type'])\n GPIO.output(11, True)\n time.sleep(10)\n db.child(\"Reg_Boards\").child(\"2www\").child(\"Device\").child(\"Cafeteira\").child(\"queue\").child(\"type\").remove()\n GPIO.output(11,False)\nGPIO.cleanup()\n","sub_path":"projeto3.py","file_name":"projeto3.py","file_ext":"py","file_size_in_byte":4520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"438883466","text":"import decimal as d\nfrom datetime import datetime, timedelta\nfrom decimal import Decimal\nimport calendar\n\n\"\"\"\n\nclass MyAlgo(QCAlgorithm):\ndef Initialize(self):\nAddEquity(\"SPY\")\n\nself.Schedule.On(self.DateRules.Every(DayOfWeek.Monday, DayOfWeek.Monday), \\\nself.TimeRules.AfterMarketOpen(self.spy), \\\nAction(self.open_positions))\n\nself.Schedule.On(self.DateRules.Every(DayOfWeek.Friday, DayOfWeek.Friday), \\\nself.TimeRules.BeforeMarketClose(self.spy, 30), \\\nAction(self.close_positions))\n\ndef open_positions(self):\nself.SetHoldings(\"SPY\", 0.10)\n\ndef close_positions(self):\nself.Liquidate(\"SPY\")\n\"\"\"\n\nclass VigilantAssetAllocationAlgorithm(QCAlgorithm):\n\n def Initialize(self):\n self.SetCash(25000)\n self.SetStartDate(2004, 1, 1)\n self.LastRotationTime = datetime.min\n self.RotationInterval = timedelta(days=1)\n self.first = True\n\n\n # these are the growth symbols we'll rotate through\n GrowthSymbols = [\"SPY\", \n \"EFA\",\n \"EEM\",\n \"AGG\"]\n \n # these are the safety symbols we go to when things are looking bad for growth\n SafetySymbols = [\"LQD\", \n \"IEF\", \n \"SHY\"]\n \n # I split the indicators into two different sets to make it easier for illustrative purposes below.\n # Storing all risky asset data into SymbolData object\n self.SymbolData = []\n for symbol in list(GrowthSymbols):\n self.AddSecurity(SecurityType.Equity, symbol, Resolution.Minute)\n self.oneMonthPerformance = self.MOMP(symbol, 21, Resolution.Daily)\n self.threeMonthPerformance = self.MOMP(symbol, 63, Resolution.Daily)\n self.sixMonthPerformance = self.MOMP(symbol, 126, Resolution.Daily)\n self.twelveMonthPerformance = self.MOMP(symbol, 252, Resolution.Daily)\n self.SymbolData.append([symbol, self.oneMonthPerformance, self.threeMonthPerformance, self.sixMonthPerformance, self.twelveMonthPerformance])\n \n # Storing all risk-free data into SafetyData object\n self.SafetyData = []\n for symbol in list(SafetySymbols):\n self.AddSecurity(SecurityType.Equity, symbol, Resolution.Minute)\n self.oneMonthPerformance = self.MOMP(symbol, 21, Resolution.Daily)\n self.threeMonthPerformance = self.MOMP(symbol, 63, Resolution.Daily)\n self.sixMonthPerformance = self.MOMP(symbol, 126, Resolution.Daily)\n self.twelveMonthPerformance = self.MOMP(symbol, 252, Resolution.Daily)\n self.SafetyData.append([symbol, self.oneMonthPerformance, self.threeMonthPerformance, self.sixMonthPerformance, self.twelveMonthPerformance])\n \n self.Schedule.On(self.DateRules.MonthEnd(\"SPY\"), self.TimeRules.AfterMarketOpen(\"SPY\", 10), self.Rebalance)\n self.rebalance = True \n \n # Note need to set benchmark after adding data!\n self.SetBenchmark(\"SPY\")\n \n def OnData(self, data):\n \n if self.first:\n self.first = False\n #self.LastRotationTime = self.Time\n return\n #delta = self.Time - self.LastRotationTime\n #if delta > self.RotationInterval:\n if self.rebalance == True:\n #self.LastRotationTime = self.Time\n \n ##Using the Score class at the bottom, compute the score for each risky asset.\n ##This approach overweights the front month momentum value and progressively underweights older momentum values\n \n orderedObjScores = sorted(self.SymbolData, key=lambda x: Score(x[1].Current.Value,x[2].Current.Value,x[3].Current.Value,x[4].Current.Value).ObjectiveScore(), reverse=True)\n \n ##Using the Score class at the bottom, compute the score for each risk-free asset.\n orderedSafeScores = sorted(self.SafetyData, key=lambda x: Score(x[1].Current.Value,x[2].Current.Value,x[3].Current.Value,x[4].Current.Value).ObjectiveScore(), reverse=True)\n \n ##Count the number of risky assets with negative momentum scores and store in N. If all four of the offensive assets exhibit positive momentum scores, \n ##select the offensive asset with the highest score and allocate 100% of the portfolio to that asset at the close\n N = 0\n for x in orderedObjScores:\n self.Log(\">>SCORE>>\" + x[0] + \">>\" + str(Score(x[1].Current.Value,x[2].Current.Value,x[3].Current.Value,x[4].Current.Value).ObjectiveScore()))\n if Score(x[1].Current.Value,x[2].Current.Value,x[3].Current.Value,x[4].Current.Value).ObjectiveScore() < 0:\n N += 1\n \n # pick which one is best from risky and risk-free symbols and store for use below\n bestGrowth = orderedObjScores[0]\n secondGrowth = orderedObjScores[1]\n bestSafe = orderedSafeScores[0]\n secondSafe = orderedSafeScores[1]\n \n ## If any of the four risky assets exhibit negative momentum scores, select the risk-free asset (LQD, IEF or SHY) with the highest score \n ## (regardless of whether the score is > 0) and allocate 100% of the portfolio to that asset at the close. \n if N > 0:\n self.Log(\"PREBUY>>LIQUIDATE>>\")\n self.Liquidate()\n self.Log(\">>BUY>>\" + str(bestSafe[0]) + \"@\" + str(Decimal(100) * bestSafe[1].Current.Value))\n self.SetHoldings(bestSafe[0], 1) \n #self.SetHoldings(secondSafe[0], .5) \n self.rebalance = False\n else: \n self.Log(\"PREBUY>>LIQUIDATE>>\")\n self.Liquidate()\n self.Log(\">>BUY>>\" + str(bestGrowth[0]) + \"@\" + str(Decimal(100) * bestGrowth[1].Current.Value))\n self.SetHoldings(bestGrowth[0], 1) \n #self.SetHoldings(secondGrowth[0], .5) \n self.rebalance = False\n \n def Rebalance(self):\n self.rebalance = True\n self.Debug(\"Rebalance\") \n \n\nclass Score(object):\n \n def __init__(self,oneMonthPerformanceValue,threeMonthPerformanceValue,sixMonthPerformanceValue,twelveMonthPerformanceValue):\n self.oneMonthPerformance = oneMonthPerformanceValue\n self.threeMonthPerformance = threeMonthPerformanceValue\n self.sixMonthPerformance = sixMonthPerformanceValue\n self.twelveMonthPerformance = twelveMonthPerformanceValue\n \n def ObjectiveScore(self):\n weight1 = 12\n weight2 = 4\n weight3 = 2\n return (weight1 * self.oneMonthPerformance) + (weight2 * self.threeMonthPerformance) + (weight3 * self.sixMonthPerformance) + self.twelveMonthPerformance","sub_path":"Algorithm.Python/VigilantAssetAllocation.py","file_name":"VigilantAssetAllocation.py","file_ext":"py","file_size_in_byte":6853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"349392724","text":"\"\"\"\nQuizCardList\n\nHandles listiation table between quiz and cards\n\"\"\"\n\nfrom models.quizcardlist import QuizCardListModel\nfrom util.logger import Logger\nfrom util.parser import ReqParser\n\n\nclass QuizCardListController():\n logger = Logger(__name__)\n @classmethod\n def make_quiz_card_list(cls, data):\n if QuizCardListModel.find_by_id(data['quiz_id']):\n return \"Quiz card list with given quiz_id already exists. Use PUT to edit\", 400, None\n\n cards = QuizCardListController.card_list_parser(data['cards'])\n # cards = data['cards'] # this is a list\n\n if QuizCardListController.check_duplicate_card(cards):\n return \"each card used in a quiz must be unique\", 400, None\n\n try:\n new_quiz_card_list = QuizCardListModel(data['quiz_id'], cards)\n new_quiz_card_list.save_to_db()\n except:\n cls.logger.exception(\"Error in creating a new card list for quiz\")\n return \"Internal Server Error\", 500, None\n\n return \"\", 201, None\n\n @classmethod\n def card_list_parser(self, cards):\n \"\"\"\n gets a list of json strings and returns a {\"1\": \"name1\",...} format\n \"\"\"\n quiz_card_list = {}\n for index, card in enumerate(cards.values()):\n quiz_card_list[str(index+1)] = card\n return quiz_card_list\n\n @classmethod\n def check_duplicate_card(cls, cards):\n if len(cards.values()) is not len(set(cards.values())):\n return True\n return False\n\n @classmethod\n def edit_quiz_card_list(cls, data):\n if not QuizCardListModel.find_by_id(data['quiz_id']):\n return \"Quiz card list with given quiz_id does not exists.\", 400, None\n\n try:\n target_list = QuizCardListModel.find_by_id(data['quiz_id'])\n target_list['cards'] = data['cards']\n except:\n cls.logger.exception(\"Error in editing a quiz card list\")\n return \"Internal Server Error\", 500, None\n\n return \"\", 200, None\n\n @classmethod\n def get_all(cls):\n try:\n quiz_cards = QuizCardListModel.get_all()\n except:\n cls.logger.exception(\"Error in getting all cards for given quiz_id\")\n return \"Internal System Error\", 500, None\n\n return \"\", 200, quiz_cards\n\n @classmethod\n def get_quiz_card_list_by_id(cls, quiz_id):\n if not QuizCardListModel.find_by_id(quiz_id):\n return \"Quiz with that quiz_id does not exists\", 400, None\n\n try:\n target_list = QuizCardListModel.find_by_id(quiz_id)\n except:\n cls.logger.exception(\"Error in getting quiz card list for given quiz_id\")\n return \"Internal System Error\", 500, None\n\n return \"\", 200, target_list\n","sub_path":"controllers/quizcardlist.py","file_name":"quizcardlist.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"643468099","text":"import time\nimport pandas as pd\nfrom pathlib import Path\nfrom options.train_options import TrainOptions\nfrom data import CreateDataLoader\nfrom models import create_model\nfrom util.custom_visualizer import Visualizer\n\nfrom datasets.mgh.SurvDataset import SurvDataset, SurvBootstrapDataset, SurvCrossValidationDataset\nfrom datasets.mgh.slice.SliceDataGenerator import SliceDataGenerator\n\nfrom datasets.mgh.summarize_pred import summarized_statistics, get_statistics, plot_predicted_time_histogram\n\n# HY/2018-09-15 added logging\n# TU/2018-09-18 added custom formatter\nimport logging\nimport logging.handlers\nfrom util.logging_formatter import CustomFormatter\nimport sys\n# 2018-10-12/TU: added stdout handler\nhandler = logging.StreamHandler(sys.stdout)\nhandler.setFormatter(CustomFormatter())\n# 2018-10-12/TU: added file handler to output log file\nfile_handler = logging.FileHandler('checkpoints/train.log')\nfile_handler.setFormatter(CustomFormatter())\n\nlogging.basicConfig(level=logging.INFO, handlers=[handler, file_handler])\nlogger = logging.getLogger(\"\") # 2018-09-17/TU: set root logger\n\n\ndef build_model(opt, save_opt=True):\n model = create_model(opt)\n model.setup(opt)\n import pickle\n if save_opt:\n save_path = Path(opt.checkpoints_dir).joinpath(opt.name, 'options.pkl')\n with open(save_path, 'wb') as file_:\n pickle.dump(opt, file_)\n return model\n\n\ndef train_loop(model, opt, dataset_train, dataset_valid=None):\n dataset_len_train = len(dataset_train)\n\n logger.info('#training with %d images' % dataset_len_train)\n\n if dataset_valid:\n dataset_len_valid = len(dataset_valid)\n # 2018-09-21/TU: get cycle iterator to validate the model in every print_freq (1 batch / iteration)\n from itertools import cycle\n cycle_iter_valid = cycle(iter(dataset_valid))\n else:\n dataset_len_valid = 0\n cycle_iter_valid = None\n\n visualizer = Visualizer(opt)\n visualizer.reset()\n\n total_steps = 0\n y_max = float('-inf') # for fixing the figure range of y axis\n\n # 2018-10-10/TU: loop the training while lr is not zero\n for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):\n epoch_start_time = time.time()\n iter_data_time = time.time()\n epoch_iter = 0\n batch_iter = 0\n preds_for_batch = []\n scatter_train, event_train, ids_train = [], [], []\n\n for i, data in enumerate(dataset_train):\n iter_start_time = time.time()\n if total_steps % opt.print_freq == 0:\n t_data = iter_start_time - iter_data_time\n visualizer.reset()\n total_steps += opt.batch_size\n epoch_iter += opt.batch_size\n batch_iter += 1\n\n model.set_input(data)\n # try:\n # generator = False if epoch <= 3 else True\n model.optimize_parameters()\n # except Exception as e:\n # import traceback\n # logger.error(traceback.format_exc())\n\n if (total_steps % opt.display_freq) < ((total_steps - opt.batch_size) % opt.display_freq):\n model.gather_time_images_to_cpu()\n save_result = total_steps % opt.update_html_freq == 0\n visualizer.display_current_results(model.get_current_visuals(), epoch, save_result, fig_name=\"train\")\n\n if (total_steps % opt.display_freq) < ((total_steps - opt.batch_size) % opt.display_freq):\n # update the max value of the loss\n y_max = max(max(model.get_current_losses().values()), y_max)\n\n losses = model.get_current_losses()\n t = (time.time() - iter_start_time) / opt.batch_size\n visualizer.print_current_losses(epoch, epoch_iter, losses, t, t_data)\n if opt.display_id > 0:\n visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_len_train, losses,\n fig_name=\"train\", ylim=(0, y_max))\n\n # 2018-09-18/TU: added metrics\n metrics = model.metrics()\n if opt.display_id > 0:\n visualizer.plot_current_metrics(epoch, float(epoch_iter) / dataset_len_train, metrics,\n fig_name=\"train\", ylim=(0, 1.0))\n visualizer.plot_current_pred(model.actual_pred_data, fig_name='train iter scatter',\n xlim=(0, 4500), ylim=(0, 4500))\n scatter_train.extend(model.actual_pred_data)\n event_train.extend(data['e'].cpu().data.tolist())\n ids_train.extend(data['id'].tolist())\n\n # 2018-09-21/TU: validate the model\n if dataset_len_valid > 0:\n data_val = next(cycle_iter_valid)\n model.set_input(data_val)\n model.test()\n losses_val = model.get_current_losses()\n metrics_val = model.metrics()\n\n y_max = max(max(losses_val.values()), y_max)\n\n visualizer.display_current_results(model.get_current_visuals(), epoch, save_result=False,\n fig_name=\"validation iter\")\n visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_len_train, losses_val,\n fig_name=\"validation iter\", ylim=(0, y_max))\n visualizer.plot_current_metrics(epoch, float(epoch_iter) / dataset_len_train, metrics_val,\n fig_name=\"validation iter\", ylim=(0, 1.0))\n visualizer.plot_current_pred(model.actual_pred_data, fig_name='valid iter scatter',\n xlim=(0, 4500), ylim=(0, 4500))\n\n if total_steps % opt.save_latest_freq == 0:\n logger.info('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))\n model.save_networks('latest')\n\n iter_data_time = time.time()\n\n if dataset_len_valid > 0:\n # 2018-09-20/TU: added validation routine\n losses_val, scatter_val, event, ids = dict(), [], [], []\n for i, data in enumerate(dataset_valid):\n visualizer.reset()\n\n model.set_input(data)\n model.test()\n losses = model.get_current_losses()\n model.metrics()\n scatter_val.extend(model.actual_pred_data)\n event.extend(data['e'].cpu().data.tolist())\n ids.extend(data['id'].tolist())\n\n if not i == 0:\n for key in losses.keys():\n losses_val[key] += losses[key]\n # for key in metrics.keys():\n # metrics_val[key] += metrics[key]\n else:\n losses_val = losses\n # metrics_val = metrics\n for key in losses_val.keys():\n losses_val[key] /= dataset_len_valid / opt.batch_size\n\n # 2018-11-19/TU: summarize the predicted times for test\n def create_data_frame(scatter, event):\n time_df = pd.DataFrame(columns=['event_time', 'observed', 'pred_times', 'pred'])\n t, pred_t = zip(*scatter)\n time_df['event_time'] = t\n time_df['observed'] = event\n time_df['pred'] = pred_t\n\n return time_df\n\n pred_df_val = create_data_frame(scatter_val, event)\n metrics_val = get_statistics(pred_df_val)\n summarized_metrics_val, summarized_scatter_val, summarized_df = summarized_statistics(scatter_val,\n event, ids)\n plot_predicted_time_histogram(summarized_df, save_dir=model.save_dir, epoch=epoch)\n\n summarized_metrics_train, summarized_scatter_train, _ = summarized_statistics(scatter_train,\n event_train, ids_train)\n # update the max value of the loss\n y_max = max(max(losses_val.values()), y_max)\n\n # 2018-11-20/TU: plot for validation\n visualizer.plot_current_losses(epoch, 0, losses_val,\n fig_name=\"validation\", ylim=(0, y_max))\n visualizer.plot_current_metrics(epoch, 0, metrics_val,\n fig_name=\"validation\", ylim=(0, 1.0))\n visualizer.plot_current_pred(scatter_val, fig_name='valid scatter', plot_limit=1000,\n xlim=(0, 4500), ylim=(0, 4500))\n visualizer.plot_current_pred(scatter_train, fig_name='train scatter', plot_limit=1000,\n xlim=(0, 4500), ylim=(0, 4500))\n\n visualizer.plot_current_metrics(epoch, 0, summarized_metrics_val,\n fig_name=\"valid_summarized\", ylim=(0, 1.0))\n visualizer.plot_current_pred(summarized_scatter_val, fig_name='valid summarized scatter',\n xlim=(0, 4500), ylim=(0, 4500))\n\n visualizer.plot_current_metrics(epoch, 0, summarized_metrics_train,\n fig_name=\"train_summarized\", ylim=(0, 1.0))\n visualizer.plot_current_pred(summarized_scatter_train, fig_name='train summarized scatter',\n xlim=(0, 4500), ylim=(0, 4500))\n\n # if epoch % opt.save_epoch_freq == 0:\n if epoch in [5, 7, 10]:\n logger.info('saving the model at the end of epoch %d, iters %d' %\n (epoch, total_steps))\n model.save_networks('latest')\n model.save_networks(epoch)\n\n logger.info('End of epoch %d / %d \\t Time Taken: %d sec' %\n (epoch, opt.niter + opt.niter_decay,\n time.time() - epoch_start_time)) # HY/2018-09-16 'opt.niter + opt.niter_decay' determines the total number of epochs.\n model.update_learning_rate()\n\n logger.info('saving the latest model at the end of training.')\n model.save_networks('latest')\n\n # 2018-11-21/TU: test at the end of training\n if dataset_len_valid > 0:\n scatter_val, event, ids = [], [], []\n for i, data in enumerate(dataset_valid):\n visualizer.reset()\n\n model.set_input(data)\n model.test()\n model.metrics()\n scatter_val.extend(model.actual_pred_data)\n event.extend(data['e'].cpu().data.tolist())\n ids.extend(data['id'].tolist())\n\n # 2018-11-19/TU: summarize the predicted times for test prediction\n _, _, summarized_df = summarized_statistics(scatter_val, event, ids)\n else:\n summarized_df = None\n\n return summarized_df\n\n\ndef oneshot_training(opt):\n filtering_params = {\n 'mask_area': 128 ** 2,\n 'has_GAP': 'GAP' in opt.extra_channels or 'GAP_one-hot' in opt.extra_channels,\n 'samples_per_case': opt.samples_per_case\n }\n\n surv_dataset = SurvDataset('datasets/mgh/slice/RAILD_{}_slice_surv_data_v{}.h5'\n .format(opt.mask_type, opt.gpu_ids[0]*2))\n surv_dataset.generate_data_frame(validation_ratio=0, **filtering_params)\n data_loader_train = CreateDataLoader(opt, surv_dataset=surv_dataset, drop_last=True,\n rotation_range=45)\n dataset_train = data_loader_train.load_data()\n\n surv_dataset.h5_file_path = 'datasets/mgh/slice/RAILD_{}_slice_surv_data_v{}.h5'.format(opt.mask_type, opt.gpu_ids[0]*2 + 1)\n data_loader_test = CreateDataLoader(opt, surv_dataset=surv_dataset, drop_last=True,\n phase='test')\n dataset_test = data_loader_test.load_data()\n\n try:\n model = build_model(opt)\n train_loop(model, opt, dataset_train, dataset_test)\n except Exception as e:\n import traceback\n logger.error(traceback.format_exc())\n raise e\n\n\ndef cross_validation(num_fold, opt):\n import os\n opt.checkpoints_dir = opt.checkpoints_dir + '/' + opt.name\n if not os.path.exists(opt.checkpoints_dir):\n os.makedirs(opt.checkpoints_dir)\n\n filtering_params = {\n 'mask_area': 128 ** 2,\n 'has_GAP': 'GAP' in opt.extra_channels or 'GAP_one-hot' in opt.extra_channels,\n 'samples_per_case': opt.samples_per_case\n }\n\n surv_cross_valid_dataset = SurvCrossValidationDataset('datasets/mgh/slice/RAILD_lung_slice_surv_data.h5')\n surv_cross_valid_dataset.generate_data_frame(num_fold, **filtering_params)\n surv_cross_valid_dataset.cross_validation_df.to_csv(opt.checkpoints_dir + '/cross_valid_dataset.csv')\n d_gen = SliceDataGenerator(exams_df=surv_cross_valid_dataset.exams_df)\n\n summary_df_list = []\n base_name = opt.name\n for fold in range(num_fold):\n opt.name = base_name + '_v{}'.format(fold + 1)\n opt.display_env = opt.name\n save_dir = Path(opt.checkpoints_dir).joinpath(opt.name)\n if not save_dir.exists():\n save_dir.mkdir()\n\n surv_cross_valid_dataset.set_validation_no(fold + 1)\n surv_data = d_gen.generate_dataset(surv_cross_valid_dataset)\n max_time = surv_data['train']['end_t']\n\n data_loader_train = CreateDataLoader(opt, surv_data=surv_data['train'], drop_last=True, t_max=max_time,\n rotation_range=45)\n dataset_train = data_loader_train.load_data()\n\n data_loader_test = CreateDataLoader(opt, surv_data=surv_data['test'], drop_last=True, t_max=max_time)\n dataset_test = data_loader_test.load_data()\n\n try:\n model = build_model(opt)\n summary_df = train_loop(model, opt, dataset_train, dataset_test)\n summary_df_list.append(summary_df)\n except Exception as e:\n import traceback\n logger.error(traceback.format_exc())\n raise e\n\n from functools import reduce\n result_df = reduce(lambda x, y: pd.concat((x, y)), summary_df_list)\n result_df.to_csv(opt.checkpoints_dir + '/{}-folds_cross_validation.csv'.format(num_fold))\n\n\ndef train_bootstrapping(opt, resampling=100):\n opt.checkpoints_dir = opt.checkpoints_dir + '/' + opt.name\n import os\n if not os.path.exists(opt.checkpoints_dir):\n os.makedirs(opt.checkpoints_dir)\n\n # 2018-12-17/TU: need copies of the HDF5 file for multi running on the different GPU (file suffix is '_v**')\n # If read a HDF5 file as the same time from the different processes,\n # it should cause the unexpected error because HDF5 is not supported the multi process\n surv_bootstrap_dataset = SurvBootstrapDataset('datasets/mgh/slice/RAILD_{}_slice_surv_data_v{}.h5'\n .format(opt.mask_type, opt.gpu_ids[0]))\n\n filtering_params = {\n 'mask_area': 128**2,\n 'has_GAP': 'GAP' in opt.extra_channels or 'GAP_one-hot' in opt.extra_channels,\n 'samples_per_case': opt.samples_per_case\n }\n\n indices = ['bootstrap_{:04d}'.format(n + 1) for n in range(resampling)]\n for index in indices:\n opt.name = index\n save_dir = Path(opt.checkpoints_dir + '/' + opt.name)\n if save_dir.exists():\n continue\n else:\n save_dir.mkdir()\n\n # 2018-11-28/TU: train the model with the subset\n surv_bootstrap_dataset.generate_data_frame(**filtering_params)\n surv_bootstrap_dataset.subset_df.to_csv(save_dir.joinpath('subset.csv'))\n surv_bootstrap_dataset.filtered_dataset_df.to_csv(save_dir.joinpath('filtered_dataset.csv'))\n surv_bootstrap_dataset.subsampled_dataset_df.to_csv(save_dir.joinpath('original_dataset.csv'))\n\n data_loader = CreateDataLoader(opt, surv_dataset=surv_bootstrap_dataset, drop_last=True,\n rotation_range=45, phase='train')\n data_iter = data_loader.load_data()\n\n model = build_model(opt)\n train_loop(model, opt, data_iter)\n # model.eval()\n\n # 2018-11-28/TU: prediction(x*, x*)\n data_loader = CreateDataLoader(opt, surv_dataset=surv_bootstrap_dataset, drop_last=True,\n phase='consistency')\n # data_loader.dataset.phase = 'consistency'\n data_iter = data_loader.load_data()\n consistency_summary_df = test_model(model, data_iter)\n consistency_summary_df.to_csv(save_dir.joinpath('consistency.csv'))\n\n # 2018-11-28/TU: prediction(x*, x)\n data_loader = CreateDataLoader(opt, surv_dataset=surv_bootstrap_dataset, drop_last=True,\n phase='original')\n # data_loader.dataset.phase = 'original'\n data_iter = data_loader.load_data()\n test_summary_df = test_model(model, data_iter)\n test_summary_df.to_csv(save_dir.joinpath('test.csv'))\n\n opt.name = 'bootstrap_origin'\n save_dir = Path(opt.checkpoints_dir + '/' + opt.name)\n if not save_dir.exists():\n save_dir.mkdir()\n\n # 2018-11-28/TU: train the model with original dataset x\n # surv_dataset = SurvDataset(surv_bootstrap_dataset.h5_file_path,\n # source_df=surv_bootstrap_dataset.dataset_df)\n surv_bootstrap_dataset.generate_data_frame(training_ratio=1.0, validation_ratio=0.0, **filtering_params)\n surv_bootstrap_dataset.filtered_dataset_df.to_csv(save_dir.joinpath('filtered_dataset.csv'))\n surv_bootstrap_dataset.train_df.to_csv(save_dir.joinpath('dataset.csv'))\n\n data_loader = CreateDataLoader(opt, surv_dataset=surv_bootstrap_dataset, drop_last=True,\n rotation_range=45, phase='train')\n data_iter = data_loader.load_data()\n\n model = build_model(opt)\n train_loop(model, opt, data_iter)\n # model.eval()\n\n # 2018-11-28/TU: prediction(x, x)\n data_loader = CreateDataLoader(opt, surv_dataset=surv_bootstrap_dataset, drop_last=True,\n phase='original')\n data_iter = data_loader.load_data()\n consistency_summary_df = test_model(model, data_iter)\n consistency_summary_df.to_csv(save_dir.joinpath('consistency.csv'))\n\n\ndef test_model(model, dataset):\n scatter_val, event, ids = [], [], []\n for i, data in enumerate(dataset):\n model.set_input(data)\n model.test()\n model.metrics()\n scatter_val.extend(model.actual_pred_data)\n event.extend(data['e'].cpu().data.tolist())\n ids.extend(data['id'].tolist())\n\n # 2018-11-19/TU: summarize the predicted times for test prediction\n _, _, summary_df = summarized_statistics(scatter_val, event, ids)\n return summary_df\n\n\nif __name__ == '__main__':\n # 2018-09-17/TU: hard coding of arguments\n sys.argv.extend([\n # '--no_image',\n # '--no_mask',\n '--no_dropout',\n '--add_gaussian_noise',\n '--batch_size', '64',\n # '--extra_channels', 'd_age', 'd_gender', 'GAP',\n '--name', 'pix2surv_bootstrap_v33',\n '--samples_per_case', '100',\n # '--mask_as_x',\n \n '--dataroot', 'datasets/mgh',\n '--model', 'pix2surv',\n '--image_size', '256',\n '--lr', '1.0e-5',\n '--netD', 'n_layers',\n '--n_layers_D', '6',\n '--ndf', '2',\n '--netG', 'unet_256',\n '--ngf', '64',\n '--niter', '5',\n '--niter_decay', '5',\n '--gpu_ids', '0',\n # '--master_node_id', '0',\n ])\n\n opt = TrainOptions().parse()\n\n train_bootstrapping(opt, 50)\n\n # cross_validation(5, opt)\n\n # oneshot_training(opt)\n\n","sub_path":"train_surv.py","file_name":"train_surv.py","file_ext":"py","file_size_in_byte":20300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"463096193","text":"import PyPDF2\n\nf = open('mypdf.pdf', 'rb')\n\npdf_reader = PyPDF2.PdfFileReader(f)\n\nprint(pdf_reader.numPages)\n\npage_one = pdf_reader.getPage(0)\npage_one_text = page_one.extractText()\nprint(page_one_text)\n\npdf_writer = PyPDF2.PdfFileWriter()\npdf_writer.addPage(page_one)\npdf_output = open('some.pdf','wb')\npdf_writer.write(pdf_output)\n\n\n\npdf_output.close()\nf.close()\n","sub_path":"test_pdf.py","file_name":"test_pdf.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"528161956","text":"import tensorflow as tf\n\n# Disable TF info logs\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'\n\n# Tensor shapes\nwith tf.Graph().as_default():\n scalar = tf.zeros([])\n vector = tf.zeros([3])\n matrix = tf.zeros([2, 3])\n\n with tf.Session() as sess:\n print('Scalar has shape', scalar.get_shape(), 'and value:\\n', scalar.eval())\n print('Vector has shape', vector.get_shape(), 'and value:\\n', vector.eval())\n print('Matrix has shape', matrix.get_shape(), 'and value:\\n', matrix.eval())\n\n# # Vector Addition\n# # Create and manipulate two vectors\n# with tf.Graph().as_default():\n# # Create a six-element vector (1-D tensor)\n# primes = tf.constant([2, 3, 5, 7, 11, 13], dtype=tf.int32)\n#\n# # Create another six-element vector.\n# # Each element in the vector will be initialize to 1\n# # The first arg is the shape of the tensor\n# # Either works\n# ones = tf.constant(1, dtype=tf.int32)\n# ones = tf.ones([6], dtype=tf.int32)\n#\n# # Add the two vectors. The resulting tensor is a six-element vector\n# just_beyond_primes = tf.add(primes, ones)\n#\n# # Create a session to run the default graph\n# with tf.Session() as sess:\n# print(just_beyond_primes.eval())\n","sub_path":"PreWork/TFCreatingAndManipulatingTensors.py","file_name":"TFCreatingAndManipulatingTensors.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"155729254","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Oct 1 06:49:13 2019\r\npip install fuzzywuzzy\r\n\r\n\r\nhttps://marcobonzanini.com/2015/02/25/fuzzy-string-matching-in-python/\r\n\r\n@author: rzhou11\r\n\"\"\"\r\nimport pandas as pd\r\nfrom fuzzywuzzy import fuzz\r\nfrom fuzzywuzzy import process\r\n\r\n#File_to_import: 'NMR42.xlsx'\r\n\r\nFile_to_import='../data/Step_3_NMR42.xlsx'\r\nFile_to_export='../data/Step_4_NMR42_IDmatch.xlsx'\r\nFile_need_attention_to_export='../data/Step_4_NMR42_IDmatch_Need_Attention.xlsx'\r\n\r\nFile_nmruser='../NMR_User_List.xlsx'\r\n\r\nuser_to_exclude=['rzhou11','abingol1','test']\r\n\r\n# open the excel file\r\ndf=pd.read_excel(File_to_import)\r\n\r\nc=[]\r\n\r\nl=len(df['File Name'])\r\n\r\n# Split the file name to generate user name\r\nfor i in range(0,l):\r\n if '_' in df['File Name'][i]:\r\n c.append(df['File Name'][i].split('_')[0].lower())\r\n elif '-' in df['File Name'][i]:\r\n c.append(df['File Name'][i].split('-')[0].lower())\r\n else:\r\n c.append(df['File Name'][i].lower())\r\n\r\n# add a new column 'User' based on the split user name\r\ndf['User']=c\r\n\r\n\r\n# get the NMR user list from the file: NMR User List.xlsx\r\ndf_user=pd.read_excel(File_nmruser,header=0)\r\n\r\n# extract the User column from the file\r\nuserlist=list(df_user['User'])\r\n\r\n#print(userlist)\r\n\r\n# check the length of the users\r\nuser_len=[]\r\nfor user in userlist:\r\n user_len.append(len(user))\r\nuser_len_min=min(user_len)\r\n\r\n# \r\n#print(min(user_len))\r\n\r\n# initialize an empty list\r\nuser_edited=[]\r\n\r\n# loop through the whole user list in the df file\r\nfor i in range(len(df.User)):\r\n#print(df.User[i])\r\n#print(type(df.User[i]))\r\n#\r\n username=df.User[i]\r\n if username[0].isdigit()==True or len(username)<=user_len_min-2:\r\n user_edited.append('Unknown')\r\n \r\n else:\r\n userID=process.extractOne(username,userlist)[0]\r\n user_match=process.extractOne(username,userlist)[1]\r\n# get the user if the match factor is greater than 80, otherwise mark it as 'unknown'\r\n \r\n if user_match>=80:\r\n user_edited.append(userID)\r\n else:\r\n user_edited.append('Unknown')\r\n\r\n\r\n#print(user_edited)\r\n#print(len(user_edited))\r\n\r\n# add a new column in the df file with the column name: user_edited\r\ndf['User_edited']=user_edited\r\n#print(df)\r\n\r\n# add another new column to check if the user_edited matches is the same as the user in the df file with the column name: Match\r\ndf['Match']=df.User==df.User_edited\r\n#print(df)\r\n\r\n# remove some extra columns in the df:\r\ndf=df[['File Name', 'Date modified','Instrument','User','User_edited','Match']]\r\nprint(df.head())\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# write to a new file:\r\ndf.to_excel(File_to_export,index=False)\r\n\r\ndf_need_attention=df[df['Match']==False]\r\ndf_need_attention.to_excel(File_need_attention_to_export,index=False)\r\nprint(df_need_attention.shape)\r\n\r\nerror_rate=\"{0:.1%}\".format(df_need_attention.shape[0]/df.shape[0])\r\nprint(\"User ID Typo Rate: \"+ str(error_rate))\r\n\r\n# cannot get this following hbia791 right; tried to lower the user_match value from 80 to 67 but got more errors for other users;\r\n# hbia791 and hbian only 67\r\n#155\thbia791-P1\t2019-09-05 15:03:40.443761\tNMR42-2311\thbia791\tUnknown\tFALSE\r\n","sub_path":"script/Step 4. SplitFileName_IDmatch.py","file_name":"Step 4. SplitFileName_IDmatch.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"71417615","text":"import random\r\nprint(\"Enter your choice\")\r\n\r\nprint(\"Press 1 to play\")\r\nprint(\"Press 2 to Quit\")\r\nchoice=int(input(\"Enter your choice \"))\r\nuser=str(input(\"Scissor or Paper or Rock \"))\r\nprint(\"You chosed \"+user)\r\noptions=['Scissor','Paper','Rock'] \r\ncomp=random.choice(options)\r\nprint(\"Computer chosed \"+comp)\r\nwhile(choice!=2):\r\n if(user==comp):\r\n print(\"Draw\")\r\n break\r\n elif(user=='Scissor' and comp=='Paper'):\r\n print(\"You win\")\r\n break\r\n elif(user=='Scissor' and comp=='Rock'):\r\n print(\"You lose\")\r\n break\r\n elif(user=='Paper' and comp=='Scissor'):\r\n print(\"You lose\")\r\n break\r\n elif(user=='Paper' and comp=='Rock'):\r\n print(\"You win\")\r\n break\r\n elif(user=='Rock' and comp=='Paper'):\r\n print(\"You lose\")\r\n break\r\n elif(user=='Rock' and comp=='Scissor'):\r\n print(\"You win\")\r\n break\r\n else:\r\n print(\"ok\")\r\n break;\r\nprint(\"Thanks for playing\")\r\n \r\n\r\n","sub_path":"SPR.py","file_name":"SPR.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"434143134","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 16 18:57:53 2018\r\n\r\n@author: Andre\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.cm as cm\r\nimport os\r\nimport platform\r\nimport sys\r\nimport pickle\r\nfrom scipy.interpolate import interp1d, interp2d\r\n\r\nbaseDir=os.path.dirname(os.path.realpath(sys.argv[0]))\r\n\r\nsys.path.insert(0, os.path.join(baseDir,\"myLibrary\"))\r\n\r\n \r\n\r\n#import commonFunctions as cf\r\n#from solarConstants import solarConstants\r\n#sc = solarConstants()\r\n#from scipy.interpolate import interp1d\r\n\r\nclass solarRadialDependence():\r\n def __init__(self,r):\r\n self.radius = r\r\n \r\n def interpolate(self,newR,newL):\r\n \r\n self.interpolatedRadius = newR\r\n self.interpolatedWl = newL\r\n self.isInterpolated = True\r\n self.radialScaleFactor = self.radialScaleFactorFunction(newR,newL)\r\n \r\n def saveObject(self, destination):\r\n with open(destination, 'wb') as output: # Overwrites any existing file.\r\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)\r\n \r\nclass solarLimbDarkening(solarRadialDependence):\r\n # return solar scale factor to disk center \r\n # Allen 3rd edition \r\n #-(a1+2*a2)\r\n def __init__(self,r):\r\n solarRadialDependence.__init__(self,r)\r\n \r\n ro = np.arange(0.,1.01,0.01)\r\n lo = np.array([0.55, 1., 1.5, 2., 3., 5.])\r\n a1o = np.array([-0.47, -0.24, -0.15, -0.12, -0.11, -0.08])\r\n a2o = np.array([-0.23, -0.20, -0.21, -0.18, -0.12, -0.07])\r\n \r\n scaleF = np.zeros((np.size(ro), np.size(lo)))\r\n x = np.arcsin(ro)\r\n for i in range(np.size(lo)):\r\n scaleF[:,i] = 1. + a1o[i]*(1.-np.cos(x))+a2o[i]*(1.-np.cos(x))**2.\r\n \r\n # need to do 2D interpolation now\r\n self.radialScaleFactorFunction = interp2d(ro,lo,scaleF.T,kind='linear')\r\n \r\n #x = np.arcsin(self.radius)\r\n \r\n# self.radialScaleFactorFunction = interf(np.arcsin(self.radius),self.lam) \r\n# self.x = np.rad2deg(x)\r\n \r\nclass forwardRadialDependence(solarRadialDependence):\r\n # return off disk scale factor\r\n def __init__(self,r,baseDir,line='SiX',normalizedTo=1.,crop=None):\r\n solarRadialDependence.__init__(self,r)\r\n self.baseDir = baseDir\r\n self.line = line\r\n self.normalizedTo = normalizedTo\r\n self.r = r\r\n if line == 'SiX':\r\n tempData =np.loadtxt(os.path.join(self.baseDir,\r\n 'radialData','siX.txt'), skiprows=1)\r\n elif line == 'FeXIII':\r\n tempData =np.loadtxt(os.path.join(self.baseDir,\r\n 'radialData','feXIII.txt'), skiprows=1)\r\n elif line == 'continuum':\r\n tempData =np.loadtxt(os.path.join(self.baseDir,\r\n 'radialData','continuum.txt'), skiprows=1)\r\n # remove values at the beginning\r\n if crop is not None:\r\n tempData = np.delete(tempData,np.arange(0,crop),axis=0)\r\n # create value for r=1 using linear extrapolation\r\n dx=tempData[1,0]-tempData[0,0]\r\n dy=tempData[1,1]-tempData[0,1]\r\n dr=tempData[0,0]-1.\r\n di=dy/dx*dr\r\n \r\n self.originalR = tempData[:,0]\r\n \r\n self.originalBrightness = tempData[:,1]/tempData[0,1]\r\n a = np.array([[1.,tempData[0,1]-di,-99,-99]])\r\n tempData = np.insert(tempData,0,a,axis=0)\r\n \r\n # normalize to specific value for R\r\n self.radialScaleFactor = np.interp(self.r, tempData[:,0],tempData[:,1])\r\n self.normalizationFactor = np.interp(self.normalizedTo, tempData[:,0],tempData[:,1])\r\n self.radialScaleFactor = self.radialScaleFactor/self.normalizationFactor\r\n\r\n \r\nclass judgeRadialDependence(solarRadialDependence):\r\n # return off disk scale factor\r\n # modification to FeXIII ratios for R 1.03 to 1.13\r\n # only valid for normalization to 1.1\r\n def __init__(self,r,line,normalizedTo=1.1,correction=True):\r\n solarRadialDependence.__init__(self,r)\r\n self.line = line\r\n if correction:\r\n assert (normalizedTo == 1.1), \"correction holds only for R=1.1 normalization\"\r\n self.normalizedTo = normalizedTo\r\n self.r = r\r\n self.originalR = np.array([1.03, 1.05, 1.07, 1.1, 1.2, 1.3, 1.5, 1.7])\r\n self.rMod = np.arange(1.,1.1,0.01)\r\n self.valMod = np.array([ 0.93419267,0.92586801,0.91761752,0.90944056,\r\n 0.89618208,0.88311688,0.89067384,0.89829545,\r\n 0.9309924,0.96487947,1.])\r\n if self.line == 'FeXIII1074':\r\n tempData = np.array([51.,40.,31.,22.,8.8,4.0,2.05,0.43])/51.\r\n elif self.line == 'FeXIII1079':\r\n tempData = np.array([13.,10.5,8.,5.1,1.1,0.42,0.09,0.021])/14.\r\n elif self.line == 'SiX1430':\r\n tempData = np.array([11.,9.,7.2,5.1,2.,0.9,0.22,0.09])/11.\r\n elif self.line == 'SiIX2584':\r\n tempData = np.array([2.7,2.,1.6,1.1,0.39,0.13,0.033,0.01])/2.7\r\n elif self.line == 'SiIX3928':\r\n tempData = np.array([1.1,1.,0.9,0.7,0.31,0.13,0.054,0.021])/1.1\r\n elif self.line == 'MgVIII3029':\r\n tempData = np.array([0.9,0.7,0.59,0.41,0.14,0.083,0.022,0.01])/0.9\r\n \r\n # create value for r=1 using linear extrapolation\r\n dx=self.originalR[1]-self.originalR[0]\r\n dy=tempData[1]-tempData[0]\r\n dr=self.originalR[0]-1.\r\n di=dy/dx*dr\r\n# self.originalScale = tempData\r\n self.originalScale = np.insert(tempData,0,tempData[0]-di)\r\n self.originalR = np.insert(self.originalR,0,1.)\r\n \r\n # normalize to specific value for R\r\n temp = np.log10(self.originalScale)\r\n \r\n temp=np.interp(self.r,self.originalR, np.log10(self.originalScale))\r\n self.radialScaleFactor = 10**temp\r\n # force constant ratio below R=1.1 for iron lines by altering 1079 line\r\n if (self.line == 'FeXIII1079' and correction):\r\n rtemp = self.r[self.r<=1.1]\r\n stemp = np.interp(rtemp,self.rMod,self.valMod)\r\n self.radialScaleFactor[self.r<=1.1] = self.radialScaleFactor[self.r<=1.1]*stemp\r\n self.normalizationFactor = np.interp(self.normalizedTo,\r\n self.originalR,self.originalScale)\r\n self.radialScaleFactor = self.radialScaleFactor/self.normalizationFactor\r\n\r\nclass zannaRadialDependence(solarRadialDependence):\r\n # return off disk scale factor\r\n def __init__(self,r,line='FeXIII1074',normalizedTo=1.):\r\n solarRadialDependence.__init__(self,r)\r\n self.line = line\r\n self.normalizedTo = normalizedTo\r\n self.r = r\r\n \r\n self.originalR = np.array([1.0, 1.05 ,1.1, 1.15, 1.2, 1.3, 1.4, 1.5, 1.6])\r\n self.originalScale = ((10.**np.array([2.65, 2.35, 2.0, 1.8, 1.566, 1.2, \r\n 0.9, 0.7, 0.5]))/(10.**2.65))\r\n temp = np.log10(self.originalScale)\r\n temp = np.interp(self.r,self.originalR,temp)\r\n# # normalize to specific value for R\r\n self.radialScaleFactor=10**temp\r\n self.normalizationFactor = np.interp(self.normalizedTo,\r\n self.originalR,self.originalScale)\r\n self.radialScaleFactor = self.radialScaleFactor/self.normalizationFactor\r\n\r\nclass kuhnRadialDependence(solarRadialDependence):\r\n # return off disk scale factor\r\n def __init__(self,r,line='FeXIII1074',normalizedTo=1.):\r\n solarRadialDependence.__init__(self,r)\r\n self.line = line\r\n self.normalizedTo = normalizedTo\r\n self.r = r\r\n# cw = 1.0747\r\n# disk = solarFluxAllen(cw)\r\n self.originalR = np.array([1.0 , 1.069, 1.521, 2.042 ])\r\n self.originalScale = (np.array([29.51,14.66 , 2.39, 0.63 ])*1e-7)/(29.51e-7)\r\n \r\n temp = np.log10(self.originalScale)\r\n temp = np.interp(self.r,self.originalR,temp)\r\n# # normalize to specific value for R\r\n self.radialScaleFactor=10**temp\r\n self.normalizationFactor = np.interp(self.normalizedTo,\r\n self.originalR,self.originalScale)\r\n self.radialScaleFactor = self.radialScaleFactor/self.normalizationFactor\r\n#radius = np.arange(1.0, 1.5,0.01)\r\n#lines = ['FeXIII1074','FeXIII1079']\r\n#fig, ax=plt.subplots()\r\n#a = judgeRadialDependence(radius,lines[0],normalizedTo=1.1)\r\n#b = judgeRadialDependence(radius,lines[1],normalizedTo=1.1,correction=False)\r\n#ax.plot(radius,a.radialScaleFactor/b.radialScaleFactor)\r\n##print(a.radialScaleFactor/b.radialScaleFactor)\r\n# \r\n##ax.set_yscale('log')\r\n##ax.set_xlim([1.0, 1.15])\r\n#ax.set_xlabel('distance [R$_{Sun}$]')\r\n#ax.set_ylabel('FeXIII radial scale factor ratio')\r\n##ax.legend(lines)\r\n##ax.set_title('FeXIII 1074')\r\n#plt.show()\r\n \r\n#limb=solarLimbDarkening(np.arange(0,1.01,.01))\r\n#plt.plot(limb.radius,limb.radialScaleFactor)\r\n#radius = np.arange(1., 1.6,0.001)\r\n#si = forwardRadialDependence(radius,baseDir, 'SiX',1.1)\r\n#fe = forwardRadialDependence(radius,baseDir, 'FeXIII',1.1)\r\n#con = forwardRadialDependence(radius,baseDir, 'continuum',1)\r\n#fig, ax=plt.subplots()\r\n#ax.plot(radius,si.radialScaleFactor,'k', radius, fe.radialScaleFactor, 'b',\r\n# radius,con.radialScaleFactor, 'r')#, si.originalR, si.originalBrightness, 'g')\r\n#ax.legend(['SiX','FeXIII','continuum'])\r\n#ax.set_xlabel('distance [R$_{Sun}$]')\r\n#ax.set_ylabel('relative line intensity')\r\n#plt.show()\r\n#\r\n#radius = np.arange(1., 1.6,0.001)\r\n#fez = zannaRadialDependence(radius,normalizedTo=1.1)\r\n#fej = judgeRadialDependence(radius,'FeXIII1074',normalizedTo=1.1)\r\n#fef = forwardRadialDependence(radius,baseDir,'FeXIII',normalizedTo=1.1)\r\n#fek = kuhnRadialDependence(radius,normalizedTo=1.1)\r\n#fig, ax=plt.subplots()\r\n#\r\n#ax.plot(fez.r,fez.radialScaleFactor,fej.r,fej.radialScaleFactor, 'r',\r\n# fef.r,fef.radialScaleFactor,'g',fek.r,fek.radialScaleFactor,'m')\r\n#ax.set_yscale('log')\r\n#ax.set_xlabel('distance [R$_{Sun}$]')\r\n#ax.set_ylabel('relative line intensity')\r\n#ax.legend(['DelZanna','Judge','FORWARD', 'Kuhn 1996'])\r\n#ax.set_title('FeXIII 10747')\r\n##\r\n##fig, ax=plt.subplots()\r\n##ax.plot(fek.r,fek.radialScaleFactor*4.5e-6)\r\n##ax.set_yscale('log')\r\n#plt.show()\r\n##\r\n#\r\n## use FeXIII for FeIX at 2218 for now\r\n#radius = np.arange(1.0, 1.6,0.001)\r\n#lines = ['FeXIII1074','FeXIII1079','SiX1430','SiIX2584',\r\n# 'MgVIII3029','SiIX3928']\r\n#fig, ax=plt.subplots()\r\n#for i in range(np.size(lines)):\r\n# tmp = judgeRadialDependence(radius,lines[i],\r\n# normalizedTo=1.1)\r\n# ax.plot(radius,tmp.radialScaleFactor)\r\n# \r\n##ax.set_yscale('log')\r\n#ax.set_xlim([1.0, 1.15])\r\n#ax.set_xlabel('distance [R$_{Sun}$]')\r\n#ax.set_ylabel('relative line intensity')\r\n#ax.legend(lines)\r\n##ax.set_title('FeXIII 1074')\r\n#plt.show()\r\n \r\n#radius = np.arange(0., 1.01,0.01)\r\n#lam = np.arange(0.5, 5.5,0.5)\r\n#ld = solarLimbDarkening(radius)\r\n#x = np.rad2deg(np.arcsin(radius))\r\n#\r\n#fig, ax=plt.subplots()\r\n#for i in range(np.size(lam)):\r\n# tmp = ld.radialScaleFactorFunction(radius,lam[i])\r\n# ax.plot(radius,tmp)\r\n#\r\n##ax.set_yscale('log')\r\n#ax.set_xlabel('distance [R$_{Sun}$]')\r\n#ax.set_ylabel('relative intensity')\r\n#ax.legend(lam)\r\n##ax.set_title('FeXIII 1074')\r\n#plt.show()\r\n\r\n\r\n","sub_path":"solarRadialDependence.py","file_name":"solarRadialDependence.py","file_ext":"py","file_size_in_byte":10763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"603647559","text":"import unittest\nfrom baby20.db.db import query_db, run_sql\nfrom baby20.models.kid_dal import KidDAL\n\n\nclass KidDALTest(unittest.TestCase):\n def setUp(self):\n insert_sql = \"INSERT INTO kids (username, first_name, middle_name, last_name, dob, parent_username)\"\n insert_sql += \"VALUES ('testuser', 'first', 'middle', 'last', '2012-09-03', 'parentuser')\"\n\n run_sql(insert_sql)\n\n def tearDown(self):\n delete_sql = \"DELETE FROM kids WHERE username = 'testuser'\"\n run_sql(delete_sql)\n\n def test_find_by_parent_username(self):\n kid_dal = KidDAL()\n kid = kid_dal.find_by_parent_username('testuser')\n\n self.assertEquals(kid.first_name, 'first')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"baby2.0/baby20/tests/test_kid_dal.py","file_name":"test_kid_dal.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"180035260","text":"__author__ = 'marcinpilarczyk'\n\nimport json\nimport time\n\nimport requests\n\n\"\"\"\nRaZberry Controller Python API\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThis lib is designed to simplify communication with RaZberry Z-Wave controllers\n\"\"\"\n\nclass RazberryController(object):\n\n\tdef __init__(self, baseUrl):\n\t\tself.BASE_URL = baseUrl\n\t\tself.devices = []\n\n\tdef get_simple_devices_info(self):\n\t\tsimpleRequestUrl = self.BASE_URL + \"/ZWaveAPI/Data/*\"\n\t\tj = requests.get(simpleRequestUrl).json()\n\n\t\t#self.categories = {'Binary Power Switch'}\n\n\t\tself.device_id_map = {}\n\n\t\tdevs = j.get('devices')\n\t\tfor dev in devs:\n\t\t\tself.device_id_map[dev] = dev\n\n\n\t#get list of connected devices, the categoryFilter param can be either a string or array of strings\n\tdef get_devices(self, categoryFilter=''):\n\n\t\t# the Razberry rest API is a bit rough so we need to make 2 calls to get all the info e need\n\t\tself.get_simple_devices_info()\n\n\t\tarequestUrl = self.BASE_URL + \"/ZWaveAPI/Data/*\"\n\t\tj = requests.get(arequestUrl).json()\n\n\t\tself.devices = []\n\t\tdevices = j.get('devices')\n\n\t\tfor keyDev in devices:\n\t\t\tfor keyInst in devices[keyDev].get('instances'):\n\t\t\t\t#item['deviceInfo'] = self.device_id_map.get(item)\n\t\t\t\tself.devices.append(RazberrySwitch(devices[keyDev], keyDev, keyInst, self))\n\n\t\treturn self.devices\n\n\nclass RazberryDevice(object):\n\n\tdef __init__(self, aJSonObj, aDeviceId, aInstanceId, razberryController):\n\t\tself.jsonState = aJSonObj\n\t\tself.deviceId = aDeviceId\n\t\tself.instanceId = aInstanceId\n\t\tself.razberryController = razberryController\n\t\tself.value = True\n\t\tself.name = ''\n\t\tif self.jsonState.get('data'):\n\t\t\tself.category = self.jsonState['data']['deviceTypeString']['value']\n\t\t\tself.name = self.jsonState['data']['givenName']['value']\n\t\telse:\n\t\t\tself.category = ''\n\n\t\tif not self.name:\n\t\t\tif self.category:\n\t\t\t\tself.name = 'RaZberry ' + self.category + ' ' + str(self.deviceId)\n\t\t\telse:\n\t\t\t\tself.name = 'RaZberry Device ' + str(self.deviceId)\n\n\n\tdef set_value(self, value):\n\t\trequestUrl = self.razberryController.BASE_URL + \"/ZWaveAPI/Run/devices[\" + str(self.deviceId) + \"].instances[\" + str(self.instanceId) + \"].SwitchBinary.Set(\" + str(value) + \")\"\n\t\tr = requests.get(requestUrl)\n\t\tself.value = r.text\n\n\tdef get_value(self):\n\t\treturn self.value\n\n\tdef refresh_value(self):\t\t\n\t\trequestUrl = self.razberryController.BASE_URL + \"/ZWaveAPI/Run/devices[\" + str(self.deviceId) + \"].instances[\" + str(self.instanceId) + \"].SwitchBinary.data.level.value\"\n\t\tr = requests.get(requestUrl)\n\t\tself.value = r.text\n\t\treturn self.get_value()\n\n\t@property\n\tdef razberry_device_id(self):\n\t\treturn self.deviceId\n\n\nclass RazberrySwitch(RazberryDevice):\n\n\tdef __init__(self, aJSonObj, aDeviceId, aInstanceId, razberryController):\n\t\tsuper().__init__(aJSonObj, aDeviceId, aInstanceId, razberryController)\n\n\tdef switch_on(self):\n\t\tself.set_value(255)\n\n\tdef switch_off(self):\n\t\tself.set_value(0)\n\n\tdef is_switched_on(self):\n\t\tself.refresh_value()\n\t\tval = self.get_value()\n\t\tif val == 'true':\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n","sub_path":"homeassistant/external/razberry/razberry.py","file_name":"razberry.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"297305488","text":"# Copyright 2009 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility functions related to IP Addresses & Hostnames.\"\"\"\n\n__author__ = 'tstromberg@google.com (Thomas Stromberg)'\n\nimport re\nimport sys\nimport zlib\n\nif __name__ == \"__main__\":\n sys.path.append('../../third_party')\n\n# Python 3.2 comes with ipaddress. Fall back to ipaddr.\ntry:\n import ipaddress\n IP = ipaddress.ip_address\n IP_NETWORK = ipaddress.ip_network\nexcept ImportError:\n from ipaddr import ipaddr\n IP = ipaddr.IPAddress\n IP_NETWORK = ipaddr.IPNetwork\n\n\ndef extract_ips(ip_string):\n \"\"\"Return a tuple of ip addressed held in a string.\n\n >>> extract_ips('127.0.0.1 8.8.8.8')\n [IPv4Address('127.0.0.1'), IPv4Address('8.8.8.8')]\n\n >>> extract_ips('127.0.0.1 ::1 8.8.8.8 2001:DB8::1')\n ['::1', '2001:DB8::1', IPv4Address('127.0.0.1'), IPv4Address('8.8.8.8')]\n \"\"\"\n ips = []\n # IPV6 If this regexp is too loose, see Regexp-IPv6 in CPAN for inspiration.\n ips.extend(re.findall('[\\dabcdef:]+:[\\dabcdef:]+', ip_string, re.IGNORECASE))\n for ip in re.findall('\\d+\\.\\d+\\.\\d+\\.+\\d+', ip_string):\n ips.append(IP(ip))\n return ips\n\n\ndef mask_string_with_ips(string):\n \"\"\"Mask all private IP addresses listed in a string.\n\n >>> mask_string_with_ips('10.0.1.40 192.168.1.1')\n '10.0.x.x-9470 192.168.x.x-5663'\n\n >>> mask_string_with_ips('8.8.8.8 75.75.75.75')\n '8.8.8.8 75.75.75.75'\n \"\"\"\n for ip in extract_ips(string):\n if ip.is_private:\n string = string.replace(str(ip), mask_ip(ip))\n return string\n\n\ndef mask_ip(ip):\n \"\"\"Return an IP with half the bits replaced with a checksum.\n\n >>> mask_ip(IP('10.1.10.25'))\n '10.1.x.x-6985'\n\n >>> mask_ip(IP('3ffe:1900:4545:3:200:f8ff:fe21:67cf'))\n '3ffe:1900:4545:3::xx:8135'\n\n >>> mask_ip(IP('fe80::200:f8ff:fe21:67cf'))\n 'fe80::200:f8ff::xx:1119'\n \"\"\"\n if sys.version_info.major <= 2:\n ip_bytes = bytes(str(ip))\n else:\n ip_bytes = bytes(str(ip), 'ascii')\n\n checksum = zlib.crc32(ip_bytes) & 0xffffffff\n if ip.version == 6:\n prefix = ':'.join(str(ip).split(':')[:4])\n return prefix + '::xx:%s' % str(checksum)[-4:]\n else:\n prefix = '.'.join(str(ip).split('.')[:2])\n return prefix + '.x.x-%s' % str(checksum)[-4:]\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","sub_path":"namebench/ip_util.py","file_name":"ip_util.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"280995821","text":"# Doc:\n# https://pygithub.readthedocs.io/en/latest/examples/Commit.html\n# https://github.com/PyGithub/PyGithub/tree/master/github\n# https://github.com/PyGithub/PyGithub/blob/master/github/Commit.py\n# https://github.com/PyGithub/PyGithub/blob/master/github/GithubObject.py\n# https://github.com/PyGithub/PyGithub/blob/master/github/Repository.py\n#\n\n\nfrom github import Github, GithubException\n\ng = Github(\"anjali.tomar.jiwaji@gmail.com\", \"milananju818\")\n\n# for repo in g.get_user().get_repos():\n# print(\"anjali......................\")\n# print(repo.name)\n\n#get topics\nrepo = g.get_repo(\"sibtc/restful-apis-example\")\nrepo.get_topics()\nrepo.stargazers_count\n\n# This code is giving all the open issues in the repository\n# open_issues = repo.get_issues(state='open')\n# for issue in open_issues:\n# print(issue)\n\n# this code is giving all the files in the repo recusively\n# contents = repo.get_contents(\"\")\n# while len(contents) > 1:\n# file_content=contents.pop(0)\n# if file_content.type==\"dir\":\n# contents.extend(repo.get_contents(file_content.path))\n# else:\n# print(file_content)\n\n# commit = repo.get_commit(sha=\"b49ec1e3306a778ed5bd0b27fab82a9a534efce8\")\n# print(commit.commit.author.date)\n# print(commit.commit.message)\n# print(commit.commit.committer.date)\n\nr2 = g.get_repo(\"apache/spark\")\ncommit = r2.get_commit(sha=\"a00181418911307725524641254439712e95445b\")\nprint(commit.commit.author.date)\n# print(commit.commit.message)\n# print(commit.commit.committer.date)\n# print(commit.commit.author.name)\n\n#print(type(r2))\ncommits = r2.get_commits()\nprint(r2.name)\nprint(type(commits))\nprint(\"-------------------------------\")\n# for commit in get_commits:\n # print(commit.name)\n# branch = g.get_repo(\"apache/spark\").get_branch(\"master\")\n# print(type(branch))\n# print(branch.commit.sha)\n\n\n\n\n# pr = repo.get_pull(1)\n# print(pr.user.login)\n","sub_path":"pygithub.py","file_name":"pygithub.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"146839495","text":"# Zodiac Compatibility\r\n#@course ICS3UC\r\n# @author Nathaniel\r\n# @date 2019/04/01\r\n\r\n# Subporgram to get yes or no answer\r\ndef validation():\r\n valid = False\r\n while (not valid):\r\n query = input(\"(y/n): \")\r\n if (query in ('y','n')):\r\n valid = True\r\n else:\r\n print(\"Invalid Input\")\r\n return query\r\n\r\n# Subprogram to determine birthday\r\ndef getBirthday(): \r\n months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']\r\n valid1 = False\r\n while (not valid1):\r\n try:\r\n month, day = (input(\"(mmm dd): \").split(' '))\r\n month = month.lower()\r\n day = int(day)\r\n if (month in months) and (day in range (1,32)):\r\n valid1 = True\r\n else:\r\n print(\"Make sure you have entered your months, and days accurately!\")\r\n except:\r\n print(\"Make sure that you follow the format correctly, and seperate your month and day with a space\")\r\n return month, day\r\n\r\n# Subprogram to determin Zodiac Sign\r\ndef getZodiac(month, day):\r\n if (month == \"jan\"):\r\n if (day >= 20):\r\n result = 'Capricorn'\r\n else:\r\n result = 'Sagittarius'\r\n elif (month == \"feb\"):\r\n if (day >= 16):\r\n result = 'Aquarius'\r\n else:\r\n result = 'Capricorn'\r\n elif (month == 'mar'):\r\n if (day >= 11):\r\n result = 'Pisces'\r\n else:\r\n result = 'Aquarius'\r\n elif (month == 'apr'):\r\n if (day >= 18):\r\n result = 'Aries'\r\n else:\r\n result = 'Pisces'\r\n elif (month == 'may'):\r\n if (day >= 13):\r\n result = 'Taurus'\r\n else:\r\n result = 'Aries'\r\n elif (month == 'jun'):\r\n if (day >= 21):\r\n result = 'Gemini'\r\n else:\r\n result = 'Taurus'\r\n elif (month == 'jul'):\r\n if (day >= 20):\r\n result = 'Cancer'\r\n else:\r\n result = 'Gemini'\r\n elif (month == 'aug'):\r\n if (day >= 10):\r\n result = 'Leo'\r\n else:\r\n result = 'Cancer'\r\n elif (month == 'sep'):\r\n if (day >= 16):\r\n result = 'Virgo'\r\n else:\r\n result = 'Leo'\r\n elif (month == 'oct'):\r\n if (day >= 30):\r\n result = 'Libra'\r\n else:\r\n result = 'Virgo'\r\n elif (month == 'nov'):\r\n if (day >= 23) and (day < 29):\r\n result = 'Scorpio'\r\n elif (day >= 29):\r\n result = 'Ophiuchus'\r\n else:\r\n result = 'Libra'\r\n elif (month == 'dec'):\r\n if (day >= 17):\r\n result = 'Sagittarius'\r\n else:\r\n result = 'Ophiuchus'\r\n return result\r\n\r\n# Subprogram to determine compatibility\r\ndef getCompatibility(sign1, sign2):\r\n if ((sign1 == \"Aries\" or sign1 == \"Leo\" or sign1 == \"Sagittarius\") and (sign2 == \"Aries\" or sign2 == \"Leo\" or sign2 == \"Sagittarius\")):\r\n compatibility = \"Compatible\"\r\n elif ((sign1 == \"Gemini\" or sign1 == \"Libra\" or sign1 == \"Aquarius\") and (sign2 == \"Gemini\" or sign2 == \"Libra\" or sign2 == \"Aquarius\")):\r\n compatibility = \"Compatible\"\r\n elif ((sign1 == \"Taurus\" or sign1 == \"Virgo\" or sign1 == \"Capricorn\") and (sign2 == \"Taurus\" or sign2 == \"Virgo\" or sign2 == \"Capricorn\")):\r\n compatibility = \"Compatible\"\r\n elif ((sign1 == \"Cancer\" or sign1 == \"Scorpio\" or sign1 == \"Pisces\" or sign1 == \"Ophiuchus\") and (sign2 == \"Cancer\" or sign2 == \"Scorpio\" or sign2 == \"Pisces\" or sign2 == \"Ophiuchus\")):\r\n compatibility = \"compatible\"\r\n else:\r\n compatibility = \"not compatible\" \r\n return compatibility\r\n \r\n# Main Program\r\nif __name__=='__main__':\r\n print(\"Welcome to the zodiac compatibility checker\")\r\n print(\"Would you like to begin?\")\r\n query = validation()\r\n \r\n while (query == 'y'):\r\n print(' ')\r\n print(\"Enter dates in the format ex:'may 11'\")\r\n \r\n # Get Birthday Person 1\r\n print(\"What is the first person's birthday day?\")\r\n month1, day1 = getBirthday()\r\n \r\n # Get Zodiac sign person 1\r\n sign1 = getZodiac(month1, day1)\r\n \r\n # Get Birthday Person 2\r\n print(\"What is the second person's birthday day?\")\r\n month2, day2 = getBirthday()\r\n \r\n # Get Zodiac sign person 2\r\n sign2 = getZodiac(month2, day2)\r\n \r\n # Determine their compatibility\r\n compatibility = getCompatibility(sign1, sign2)\r\n \r\n # Output the result\r\n print(\" \")\r\n print(\"Person 1 is a \"+sign1)\r\n print(\"Person 2 is a \"+sign2)\r\n print(' ')\r\n print(\"These two people are\", compatibility)\r\n \r\n print(\"Do you want to retry?\")\r\n query = validation()\r\n \r\n print(\"Goodbye!\")","sub_path":"newZodiac.py","file_name":"newZodiac.py","file_ext":"py","file_size_in_byte":4874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"183926219","text":"import warnings\r\nwarnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')\r\nfrom gensim.summarization.summarizer import summarize\r\nfrom gensim.summarization import keywords\r\n \r\nimport requests\r\n \r\n# getting text document from Internet\r\ntext = requests.get('http://rare-technologies.com/the_matrix_synopsis.txt').text\r\n \r\n \r\n#getting text document from file\r\nfname=\"C:\\\\Users\\\\pochaudh\\\\Desktop\\\\python image\\\\demo.txt\"\r\nwith open(fname, 'r') as myfile:\r\n text=myfile.read()\r\n \r\n \r\n#getting text document from web, below function based from 3\r\nfrom bs4 import BeautifulSoup\r\nfrom urllib.request import urlopen\r\n \r\ndef get_only_text(url):\r\n \"\"\" \r\n return the title and the text of the article\r\n at the specified url\r\n \"\"\"\r\n page = urlopen(url)\r\n soup = BeautifulSoup(page, \"lxml\")\r\n text = ' '.join(map(lambda p: p.text, soup.find_all('p')))\r\n return soup.title.text, text \r\n \r\n \r\nprint ('Summary:')\r\nprint (summarize(text, ratio=0.01))\r\n \r\nprint ('\\nKeywords:')\r\nprint (keywords(text, ratio=0.01))\r\n \r\nurl=\"https://en.wikipedia.org/wiki/Deep_learning\"\r\ntext = get_only_text(url)\r\n \r\nprint ('Summary:') \r\nprint (summarize(str(text), ratio=0.01))\r\n \r\nprint ('\\nKeywords:')\r\n \r\n# higher ratio => more keywords\r\nprint (keywords(str(text), ratio=0.01))","sub_path":"python image/SummarizerWikiGensin.py","file_name":"SummarizerWikiGensin.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"15740260","text":"import time\n\nstart = time.time()\n\nhailstone_sequence = 0\nmax_number = 1000000\nresult = 0\ndic = dict()\n\n# solution 1\n# def getIterativeSequence(num):\n# if num % 2 == 0:\n# num /= 2\n# else:\n# num = (3 * num) + 1\n#\n# return num\n#\n#\n# for i in range(2, max_number):\n# count = 0\n# tmp = i\n# while tmp != 1:\n# count += 1\n# tmp = getIterativeSequence(tmp)\n#\n# if hailstone_sequence < count:\n# hailstone_sequence = count\n# result = i\n\ndef getIterativeSequenceCount(num):\n tmp = num\n cnt = 0\n while tmp != 1:\n if i not in dic:\n if tmp % 2 == 0:\n tmp /= 2\n else:\n tmp = (3 * tmp) + 1\n cnt += 1\n else:\n cnt += (dic[i] - 1)\n break\n dic[num] = cnt\n\n return cnt\n\n\nfor i in range(2, max_number):\n count = getIterativeSequenceCount(i)\n\n if hailstone_sequence < count:\n hailstone_sequence = count\n result = i\n\nprint(\"result : [%s] elapsed [%s] seconds.\" % (result, (time.time() - start)))\n","sub_path":"euler/problem14/chjsik.py","file_name":"chjsik.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"211846332","text":"from time import sleep\n\nfrom dbnd import log_metric, pipeline, task\nfrom dbnd._core.current import current_task_run\n\n\n@task\ndef operation_int(\n input_a, input_b=0, pause=0.0, log_metrics=True, external_resources=0\n):\n # type: (int, int, float, bool, int) -> int\n if log_metrics:\n log_metric(\"input_a\", input_a)\n log_metric(\"input_b\", input_b)\n\n tr = current_task_run()\n for i in range(external_resources):\n tr.set_external_resource_urls(\n {\"url_%s_%d\" % (tr.task.task_id, i): \"http://localhost\"}\n )\n if pause:\n sleep(pause)\n return input_a + input_b\n\n\n@task\ndef init_acc_int(input=0):\n # type: (int) -> int\n log_metric(\"input\", input)\n return input\n\n\n@pipeline\ndef large_pipe_int(width=10, depth=1, pause=0.0):\n # type: (int, int, float) -> int\n res = init_acc_int()\n for i in range(width):\n acc = init_acc_int()\n for d in range(depth):\n acc = operation_int(acc, i, pause)\n res = operation_int(res, acc, pause)\n\n return res\n","sub_path":"plugins/dbnd-test-scenarios/src/dbnd_test_scenarios/pipelines/large_pipeline.py","file_name":"large_pipeline.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"233901008","text":"class Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n \"\"\"\n Time complexity:\n Since, in each step, the number of subsets doubles as we add each\n element to all the existing subsets, therefore, we will have a total of\n O(2N) subsets, where ‘N’ is the total number of elements in the input\n set. And since we construct a new subset from an existing set,\n therefore, the time complexity of the above algorithm will be O(N*2^N).\n\n Space complexity:\n All the additional space used by our algorithm is for the output\n list. Since we will have a total of O(2^N) subsets, the space complexity\n of our algorithm is also O(2^N).\n \"\"\"\n\n subsets = []\n # start by adding the empty subset\n subsets.append([])\n\n for current_num in nums:\n for i in range(len(subsets)):\n # create a new subset from the existing subset and insert the\n # current element to it\n new_subset = list(subsets[i])\n new_subset.append(current_num)\n subsets.append(new_subset)\n\n return subsets\n","sub_path":"Problems/Leetcode/78_Subsets.py","file_name":"78_Subsets.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"529851444","text":"import unittest\n\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n def lowestCommonAncestor(self, root, p, q):\n \"\"\"\n :type root: TreeNode\n :type p: TreeNode\n :type q: TreeNode\n :rtype: TreeNode\n \"\"\"\n p_val = p.val\n q_val = q.val\n if p_val > q_val:\n p_val, q_val = q_val, p_val\n high_root = root\n while True:\n if p_val <= high_root.val and q_val >= high_root.val:\n return high_root\n if q_val < high_root.val:\n high_root = high_root.left\n else:\n high_root = high_root.right\n\n\nclass TestCase(unittest.TestCase):\n def test_one(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"235.py","file_name":"235.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"421531260","text":"# -*- coding: utf-8 -*-\n# Copyright 2018-2019 by Boris Feld\n\n\"\"\"Curses interface\n\"\"\"\nfrom __future__ import print_function\n\nimport asyncio\n\nimport urwid\n\nfrom balto.displayer.curses_widgets import (\n FOOTER,\n PALETTE,\n PROGRESS_BAR,\n STATUS,\n RootParentNode,\n get_selected_tests,\n set_selected_tests,\n)\n\n\nclass CursesTestDisplayer(object):\n def __init__(self, tests, topnode, walker):\n self.tests = tests\n self.topnode = topnode\n self.walker = walker\n\n self.test_number = None\n self.current_test_number = 0\n\n def refresh_screen(self):\n self.topnode.refresh()\n self.walker._modified()\n\n async def parse_message(self, message):\n msg_type = message.get(\"_type\")\n\n if msg_type == \"session_start\":\n self.test_number = message[\"test_number\"]\n self.current_test_number = 0\n elif msg_type == \"test_result\":\n # Ignore invalid json\n if \"id\" not in message or \"outcome\" not in message:\n return\n\n self.tests[message[\"id\"]] = message\n\n test_number = self.current_test_number + 1\n self.current_test_number = test_number\n\n # Update progress bar\n PROGRESS_BAR.set_completion(float(test_number) / self.test_number)\n\n self.refresh_screen()\n\n elif msg_type == \"test_collection\":\n # Ignore invalid json\n if \"id\" not in message:\n return\n\n # Force a status\n message[\"outcome\"] = \"not_run\"\n\n self.tests[message[\"id\"]] = message\n\n test_number = self.current_test_number + 1\n self.current_test_number = test_number\n\n # Update progress bar\n PROGRESS_BAR.set_completion(float(test_number) / self.test_number)\n\n self.refresh_screen()\n\n elif msg_type == \"session_end\":\n # PROGRESS_BAR.set_completion(0)\n pass\n else:\n raise Exception(message)\n\n\nclass CursesTestInterface(object):\n def __init__(self, repository, eventloop, tests, suites, em, task_list):\n self.repository = repository\n self.eventloop = eventloop\n self.tests = tests\n self.suites = suites\n self.em = em\n self.task_list = task_list\n\n self.urwid_loop = urwid.MainLoop(\n self._get_urwid_view(),\n PALETTE,\n event_loop=urwid.AsyncioEventLoop(loop=self.eventloop),\n unhandled_input=self.unhandled,\n )\n\n self.displayer = CursesTestDisplayer(self.tests, self.topnode, self.walker)\n\n # Register the callbacks\n self.em.register(self.displayer.parse_message)\n\n def _get_urwid_view(self):\n self.topnode = RootParentNode(self.tests)\n self.walker = urwid.TreeWalker(self.topnode)\n listbox = urwid.TreeListBox(self.walker)\n\n listbox.offset_rows = 1\n footer = urwid.AttrWrap(FOOTER, \"foot\")\n return urwid.Frame(urwid.AttrWrap(listbox, \"body\"), footer=footer)\n\n def run(self):\n if len(self.tests.tests) == 0:\n self.collect_all_tests()\n\n self.urwid_loop.run()\n\n def unhandled(self, key):\n if key in (\"ctrl c\", \"q\"):\n raise urwid.ExitMainLoop()\n elif key == \"a\":\n self.select_all_tests()\n elif key in (\"r\", \"enter\"):\n tests = list(get_selected_tests())\n\n if len(tests) == 0:\n self.launch_all_tests()\n return\n\n self.launch_specific_tests(tests)\n elif key == \"f\":\n self.select_tests(\"failed\")\n elif key == \"s\":\n self.select_tests(\"skipped\")\n elif key == \"p\":\n self.select_tests(\"passed\")\n else:\n STATUS.set_text(\"Key pressed DEBUG: %s\" % repr(key))\n\n def select_all_tests(self):\n all_tests = []\n\n for suite in self.suites.values():\n for test_name in suite.get_tests_name():\n all_tests.append((suite, test_name))\n\n set_selected_tests(all_tests)\n\n self.displayer.refresh_screen()\n STATUS.set_text(\"Selected %d tests\" % (len(get_selected_tests())))\n\n def launch_all_tests(self):\n all_tests = []\n\n for suite in self.suites.values():\n for test_name in suite.get_tests_name():\n all_tests.append((suite, test_name))\n\n set_selected_tests(all_tests)\n\n c = self._launch_all_tests()\n task = asyncio.ensure_future(c, loop=self.eventloop)\n asyncio.wait(task)\n\n PROGRESS_BAR.set_completion(0)\n STATUS.set_text(\"Running all tests\")\n\n def collect_all_tests(self):\n c = self._collect_all_tests()\n task = asyncio.ensure_future(c, loop=self.eventloop)\n self.task_list.append(task)\n\n PROGRESS_BAR.set_completion(0)\n STATUS.set_text(\"Collecting all tests\")\n\n def launch_specific_tests(self, tests):\n c = self._launch_specific_tests(tests)\n task = asyncio.ensure_future(c, loop=self.eventloop)\n self.task_list.append(task)\n\n PROGRESS_BAR.set_completion(0)\n STATUS.set_text(\"Running %s tests\" % len(get_selected_tests()))\n\n def select_tests(self, outcome):\n tests = self.tests.get_test_by_outcome(outcome)\n set_selected_tests(tests)\n\n self.displayer.topnode.refresh()\n self.displayer.walker._modified()\n STATUS.set_text(\"Selected %d %s tests\" % (len(tests), outcome))\n\n async def _collect_all_tests(self):\n tasks = [\n suite.collect_all(self.repository, self.em, loop=self.eventloop)\n for suite in self.suites.values()\n ]\n return await asyncio.gather(*tasks, loop=self.eventloop)\n\n async def _launch_all_tests(self):\n tasks = [\n suite.launch_all(self.repository, self.em, loop=self.eventloop)\n for suite in self.suites.values()\n ]\n return await asyncio.gather(*tasks, loop=self.eventloop)\n\n async def _launch_specific_tests(self, tests):\n # Dispatch test by suite\n test_by_suite = {}\n\n for suite, test in tests:\n test_by_suite.setdefault(suite, []).append(test)\n\n tasks = []\n for suite, suite_tests in test_by_suite.items():\n tasks.append(\n suite.launch_tests(\n self.repository, self.em, self.eventloop, suite_tests\n )\n )\n\n return await asyncio.gather(*tasks, loop=self.eventloop)\n","sub_path":"balto/displayer/curses.py","file_name":"curses.py","file_ext":"py","file_size_in_byte":6535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"174358703","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp.osv import fields, orm\nimport openerp.addons.decimal_precision as dp\n\nclass sale_order_discount(orm.Model):\n _name = 'sale.order.discount'\n _description = 'Sconto Globale'\n _order = 'sequence ASC' \n _columns= {\n 'name': fields.many2one('account.discount.type', string='Nome', required = True),\n 'application': fields.selection([('partner','Cliente'),('payment','Termine di Pagamento'),('general','Generale')], string='Applicazione', required=True),\n 'type': fields.selection([('perc','%'),('fisso','Fisso')], string='Tipo', required=True),\n 'sequence': fields.integer('Sequence'),\n 'value': fields.float('Valore'),\n 'sale_id': fields.many2one('sale.order', string='Ordine'),\n } \n _defaults={\n 'sequence': 0,\n 'type': 'perc',\n 'application': 'general',\n }\n \n def onchange_discount_name(self, cr, uid, ids, name, type, context=None):\n value = {}\n if name:\n disc_obj = self.pool.get('account.discount.type').browse(cr,uid,name)\n if not type:\n value['type'] = disc_obj.type\n return {'value':value}\n\nclass sale_order_line(orm.Model):\n\n _inherit = \"sale.order.line\"\n\n _columns = {\n 'free': fields.selection([\n ('gift', 'Gift on Amount Total'),\n ('base_gift', 'Gift on Amount Untaxed')],\n 'Free')\n }\n\n def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False,\n context=None):\n \"\"\"Prepare the dict of values to create the new invoice line for a\n sales order line. This method may be overridden to implement custom\n invoice generation (making sure to call super() to establish\n a clean extension chain).\n\n :param browse_record line: sale.order.line record to invoice\n :param int account_id: optional ID of a G/L account to force\n (this is used for returning products including service)\n :return: dict of values to create() the invoice line\n \"\"\"\n res = {}\n res = super(sale_order_line, self)._prepare_order_line_invoice_line(\n cr, uid, line, account_id, context)\n res.update({\n 'free': line.free,\n })\n return res\n\nclass sale_order_with_discount(orm.Model):\n\n _inherit = 'sale.order'\n\n def _get_order(self, cr, uid, ids, context=None):\n result = {}\n for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context):\n result[line.order_id.id] = True\n return result.keys()\n\n\n def _amount_all_wrapper(self, cr, uid, ids, field_name, arg, context=None):\n return super(sale_order_with_discount,self)._amount_all_wrapper(cr, uid, ids, field_name, arg, context=context)\n\n def _amount_all(self, cr, uid, ids, field_name, arg, context=None): \n \n res = super(sale_order_with_discount,self)._amount_all(cr,uid,ids,field_name,arg,context=context) \n cur_obj = self.pool.get('res.currency')\n\n for order in self.browse(cr, uid, ids, context=context):\n \n cur = order.pricelist_id.currency_id\n res[order.id]['displayed_global_discount_total'] = 0.0\n res[order.id]['global_discount_total'] = 0.0 \n res[order.id]['amount_untaxed_free'] = 0.0\n res[order.id]['amount_tax_free'] = 0.0 \n \n original_total_value = 0.0\n tax_lines = {} \n \n for line in order.order_line:\n if line.free not in ['gift', 'base_gift'] and (not line.product_id or (line.product_id and not line.product_id.no_discount)): \n \n original_tax_value = 0.0\n for tax in line.tax_id:\n original_tax_value += line.price_subtotal * tax.amount \n original_untaxed_value = line.price_subtotal\n original_total_value = original_untaxed_value + original_tax_value\n \n for discount in order.global_discount_lines:\n \n val = original_tax_value\n val1 = original_untaxed_value\n val2 = original_total_value\n \n if discount.type=='fisso':\n res[order.id]['global_discount_total'] += discount.value\n res[order.id]['displayed_global_discount_total'] += discount.value\n perc = discount.value / val1 \n else:\n perc = discount.value/100\n \n sc = val*perc \n val -= sc\n sc1 = val1*perc\n val1 -= sc1\n \n if discount.type=='perc':\n res[order.id]['global_discount_total']+= sc+sc1\n res[order.id]['displayed_global_discount_total'] += sc1\n \n original_tax_value = cur_obj.round(cr, uid, cur, val)\n original_untaxed_value = cur_obj.round(cr, uid, cur, val1)\n original_total_value = original_tax_value + original_untaxed_value\n \n if line.free in ['gift', 'base_gift']:\n res[order.id]['amount_untaxed_free'] += line.price_subtotal\n if line.free == 'gift':\n for tax in line.tax_id:\n if tax.amount in tax_lines:\n tax_lines[tax.amount] += line.price_subtotal\n else:\n tax_lines[tax.amount] = line.price_subtotal\n\n for tl in tax_lines:\n res[order.id]['amount_tax_free'] += tax_lines[tl] * tl\n \n res[order.id]['amount_untaxed'] = res[order.id]['amount_untaxed'] - res[order.id]['amount_untaxed_free'] - res[order.id]['displayed_global_discount_total']\n res[order.id]['amount_tax'] = res[order.id]['amount_tax'] - res[order.id]['amount_tax_free'] - (res[order.id]['global_discount_total']-res[order.id]['displayed_global_discount_total'])\n res[order.id]['amount_total'] = res[order.id]['amount_untaxed'] + res[order.id]['amount_tax'] - res[order.id]['global_discount_total']\n \n if original_total_value: \n res[order.id]['global_discount_percentual'] = res[order.id]['displayed_global_discount_total']/(res[order.id]['amount_untaxed'] + res[order.id]['displayed_global_discount_total'])\n else:\n res[order.id]['global_discount_percentual'] = 0.0\n \n return res\n\n\n\n _columns = {\n\n 'global_discount_lines': fields.one2many('sale.order.discount', 'sale_id', string='Sconti Globali'), \n 'amount_untaxed_free' : fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), multi='sums', string='\"For Free\" Amount', \n store={\n 'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),\n 'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),\n }), \n 'amount_tax_free' : fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='\"For Free\" Tax', multi='sums',\n store={\n 'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),\n 'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),\n }), \n 'displayed_global_discount_total' : fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Totale Sconti', multi='sums', help=\"The amount without tax.\", track_visibility='always',\n store={\n 'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line','global_discount_lines'], 10),\n 'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),\n }),\n 'global_discount_percentual': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Percentuale Sconti', multi='sums', help=\"The amount without tax.\", track_visibility='always',\n store={\n 'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line','global_discount_lines'], 10),\n 'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),\n }), \n 'global_discount_total': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Totale Sconti', multi='sums', help=\"The amount without tax.\", track_visibility='always',\n store={\n 'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line','global_discount_lines'], 10),\n 'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),\n }),\n 'amount_untaxed': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Untaxed Amount', multi='sums', help=\"The amount without tax.\", track_visibility='always',\n store={\n 'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line','global_discount_lines'], 10),\n 'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),\n }),\n 'amount_tax': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Taxes', multi='sums', help=\"The tax amount.\", \n store={\n 'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line','global_discount_lines'], 10),\n 'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),\n }),\n 'amount_total': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Total', multi='sums', help=\"The total amount.\",\n store={\n 'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line','global_discount_lines'], 10),\n 'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),\n }), \n }\n\n def _prepare_invoice(self, cr, uid, order, lines, context=None):\n\n res = super(sale_order_with_discount,self)._prepare_invoice(cr, uid, order=order, lines=lines, context=context)\n if res and order.global_discount_lines:\n discount_lines = []\n for line in order.global_discount_lines:\n discount_lines.append(self.pool.get('account.invoice.discount').create(cr,uid,{'name':line.name.id,'application':line.application, 'type':line.type,'sequence':line.sequence,'value':line.value})) \n res['global_discount_lines'] = [(6, 0, discount_lines)]\n return res\n \n def onchange_partner_id(self, cr, uid, ids, part, context=None):\n res = super(sale_order_with_discount, self).onchange_partner_id(cr, uid, ids, part, context = context)\n if part:\n to_delete = []\n discount_lines = []\n sequence = 0 \n part = self.pool.get('res.partner').browse(cr, uid, part, context=context)\n if context.get('discount_lines', False):\n discount_lines = context.get('discount_lines', False) \n for i in range(len(discount_lines)):\n if isinstance(discount_lines[i], list):\n if discount_lines[i][0] == 0:\n if discount_lines[i][2].get('application',False) == 'partner':\n to_delete.append(i)\n elif discount_lines[i][2].get('sequence',False) and discount_lines[i][2].get('sequence') > sequence:\n sequence = discount_lines[i][2].get('sequence')\n \n elif discount_lines[i][0] == 4:\n discount = self.pool.get('sale.order.discount').browse(cr,uid,discount_lines[i][1])\n if discount.application == 'partner':\n to_delete.append(i)\n elif discount.sequence > sequence:\n sequence = discount.sequence \n \n elif isinstance(discount_lines[i], dict):\n if discount_lines[i].get('application',False) == 'partner':\n to_delete.append(i)\n elif discount_lines[i].get('sequence',False) and discount_lines[i].get('sequence') > sequence:\n sequence = discount_lines[i].get('sequence')\n\n to_delete = to_delete[::-1]\n for i in to_delete:\n del discount_lines[i]\n \n for line in part.global_discount_lines:\n discount_lines.append({'name':line.name.id,'type':line.type,'value':line.value, 'application':line.application, 'sequence': sequence+1})\n res['value'].update({'global_discount_lines':discount_lines}) \n return res\n \n def onchange_payment_term(self, cr, uid, ids, payment, context=None):\n res = {}\n if payment:\n discount_lines = []\n to_delete = [] \n sequence = 0 \n payment = self.pool.get('account.payment.term').browse(cr, uid, payment, context=context)\n if context.get('discount_lines', False):\n discount_lines = context.get('discount_lines', False) \n for i in range(len(discount_lines)):\n if isinstance(discount_lines[i], list):\n if discount_lines[i][0] == 0:\n if discount_lines[i][2].get('application',False) == 'payment':\n to_delete.append(i)\n elif discount_lines[i][2].get('sequence',False) and discount_lines[i][2].get('sequence') > sequence:\n sequence = discount_lines[i][2].get('sequence')\n \n elif discount_lines[i][0] == 4:\n discount = self.pool.get('sale.order.discount').browse(cr,uid,discount_lines[i][1])\n if discount.application == 'payment':\n to_delete.append(i)\n elif discount.sequence > sequence:\n sequence = discount.sequence \n \n elif isinstance(discount_lines[i], dict):\n if discount_lines[i].get('application',False) == 'payment':\n to_delete.append(i)\n elif discount_lines[i].get('sequence',False) and discount_lines[i].get('sequence') > sequence:\n sequence = discount_lines[i].get('sequence')\n elif context.get('partner', False):\n part = self.pool.get('res.partner').browse(cr, uid, context['partner'], context=context) \n for line in part.global_discount_lines:\n discount_lines.append({'name':line.name.id,'type':line.type,'value':line.value, 'application':line.application, 'sequence': sequence+1})\n\n\n to_delete = to_delete[::-1]\n for i in to_delete:\n del discount_lines[i]\n \n for line in payment.global_discount_lines:\n discount_lines.append({'name':line.name.id,'type':line.type,'value':line.value, 'application':line.application, 'sequence': sequence+1})\n res = {'value': {'global_discount_lines':discount_lines}} \n return res ","sub_path":"account_homage_discount/models/sale_order.py","file_name":"sale_order.py","file_ext":"py","file_size_in_byte":17402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"483628536","text":"from sys import stdin\n\ndef main():\n while True:\n line=stdin.readline().strip()\n if len(line)==0:break\n lista=[int(x) for x in line.split()]\n lista=lista[1:]\n list2=[]\n for i in range(len(lista)-1):\n list2.append(abs(lista[i]-lista[i+1]))\n list2.sort()\n v=True\n for i in range(len(list2)):\n v= v and i+1==list2[i]\n if v:\n print(\"Jolly\")\n else:\n print(\"Not jolly\")\nmain()\n","sub_path":"ejercicios/Misc/jollyjumper.py","file_name":"jollyjumper.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"360464510","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('simulacro_22N', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='reporte',\n name='n_reporte',\n field=models.CharField(blank=True, max_length=1, null=True, verbose_name=b'Reporte de las', choices=[(b'1', b'8:00 am'), (b'2', b'10:00 am'), (b'3', b'1:00 pm'), (b'4', b'3:00 pm'), (b'5', b'5:00 pm'), (b'6', b'6:00 pm')]),\n ),\n ]\n","sub_path":"simulacro_22N/migrations/0002_auto_20151203_2226.py","file_name":"0002_auto_20151203_2226.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"267728734","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass SelfAttention(nn.Module):\n\tdef __init__(self, k, heads=8):\n\t\tsuper(SelfAttention, self).__init__()\n\t\tself.k, self.heads = k, heads\n\n\t\t# these compute the queries, keys, values for all\n\t\t# heads (as a single concatenated vector)\n\t\tself.tokeys = nn.Linear(k, k * heads, bias=False)\n\t\tself.toqueries = nn.Linear(k, k * heads, bias=False)\n\t\tself.tovalues = nn.Linear(k, k * heads, bias=False)\n\n\t\t# this unifies the outputs of the different heads into a single k-vector\n\t\tself.unifyheads = nn.Linear(heads * k, k)\n\n\tdef forward(self, x):\n\t\tb, t, k = x.size()\n\t\th = self.heads\n\n\t\tqueries = self.toqueries(x).view(b, t, h, k)\n\t\tkeys = self.tokeys(x).view(b, t, h, k)\n\t\tvalues = self.values(x).view(b, t, h, k)\n\t\t# - fold heads into the batch dimension\n\t\tkeys = keys.transpose(1, 2).contiguous().view(b * h, t, k)\n\t\tqueries = queries.transpose(1, 2).contiguous().view(b * h, t, k)\n\t\tvalues = values.transpose(1, 2).contiguous().view(b * h, t, k)\n\n\t\tqueries = queries / (k ** (1/4))\n\t\tkeys = keys / (k ** (1/4))\n\n\t\t# - get dot product of queries and keys, and scale\n\t\tdot = torch.bmm(queries, keys.transpose(1, 2))\n\t\t# - dot has size (b*h, t, t) containing raw weights\n\n\t\tdot = F.softmax(dot, dim=2)\n\t\t# - dot now contains row-wise normalized weights\n\n\t\t# apply the self attention to the values\n\t\tout = torch.bmm(dot, values).view(b, h, t, k)\n\n\t\t# - swap h, t back and unify heads\n\t\tout = out.transpose(1, 2).contiguous().view(b, t, h * k)\n\t\treturn self.unifyheads(out)\n\nclass TransformerBlock(nn.Module):\n\tdef __init__(self, k, heads):\n\t\tsuper().__init__()\n\n\t\tself.attention = SelfAttention(k, heads=heads)\n\n\t\tself.norm1 = nn.LayerNorm(k)\n\t\tself.norm2 = nn.LayerNorm(k)\n\n\t\tself.ff = nn.Sequential(\n\t\t\tnn.Linear(k, 4*k),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Linear(4*k, k)\n\t\t)\n\n\tdef forward(self, x):\n\t\tattended = self.attention(x)\n\t\tx = self.norm1(attended + x)\n\n\t\tfedforward = self.ff(x)\n\t\treturn self.norm2(fedforward + x)\n\n\nclass Transformer(nn.Module):\n\tdef __init__(self, k, heads, depth, seq_length, num_tokens, num_classes):\n\t\tsuper().__init__()\n\n\t\tself.num_tokens = num_tokens\n\t\tself.token_emb = nn.Embedding(num_tokens, k)\n\t\tself.pos_emb = nn.Embedding(seq_length, k)\n\n\t\t# the sequence of transformer blocks that does all the heavy lifting\n\t\ttblocks = []\n\t\tfor i in range(depth):\n\t\t\ttblocks.append(TransformerBlock(k=k, heads=heads))\n\t\tself.tblocks = nn.Sequential(*tblocks)\n\n\t\t# maps the final output sequence to class logits\n\t\tself.toprobs = nn.Linear(k, num_classes)\n\n\tdef forward(self, x):\n\t\t\"\"\"\n\t\t:param x: A (b,t) tensor of integer values representing words \n\t\t\t\t(in some predetermined vocabulary)\n\t\t:return: A (b, c) tensor of log-probabilities over the classes\n\t\t\t\t(where c is the nr. of classes)\n\t\t\"\"\"\n\n\t\t# generate token embeddings\n\t\ttokens = self.token_emb()\n\t\tb, t, k = tokens.size()\n\n\t\t# generate position embeddings\n\t\tpositions = torch.arange(t)\n\t\tpositions = self.pos_emb(positions)[None, :, :].expand(b, t, k)\n\n\t\tx = tokens + positions\n\t\tx = self.tblocks(x)\n\n\t\t# average-pool over the t dimension and project to class probabilities\n\t\tx = self.toprobs(x.mean(dim=1))\n\t\treturn F.log_softmax(x, dim=1)","sub_path":"nlp/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":3174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"279371358","text":"# Author: Samuel Jim Nnamdi\n# Title: Fill empty columns in the dataset\n\n# ...\n# Import the required libraries\n# ...\n\n# Perform label encoding on the target Data\n\ncategorical_cols = category_columns= ['workclass', 'race', 'education','marital-status', 'occupation']\nlabel_encoder = preprocessing.LabelEncoder()\n\n# Map each category to a Numerical Value\n\nmapping_dict = {}\nfor col in categorical_cols:\n\tread_dataset[col] = label_encoder.fit_transform(read_dataset[col])\n\tle_name_mapping = dict(zip(label_encoder.classes_, label_encoder.transform(label_encoder.classes_)))\nmapping_dict = le_name_mapping ","sub_path":"label_encoding.py","file_name":"label_encoding.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"440407472","text":"from django.contrib import messages\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required\n\nfrom .forms import RegisterForm,EditForm\n\n# Create your views here.\n\n\n'''\n表单如何传递next想了一会儿,避免后面忘记,把这个过程剖析一下.\n\n第一步:\n从注册链接点击,交给href=\"{% url 'users:register' %}?next={{ request.path }}\"处理\nhttp://127.0.0.1:8000/users/register?next=/index(假设从index页面点击)\nget 请求\n\n第二步\nurl 'users:register'对应views.register 处理get请求\n从get请求获取next\n渲染到users/register.html页面--传递的上下文变量是{form:未填写的空form,next(next-1):从get请求获取}\n\n第三步\n用户第一次打开注册页面users/register页面\n页面中存在的 \n其中,这个input的值value=\"{{ next }}\",就是上面第二步渲染而传过来的next(next-2)\n\n第四步\n用户填写form表单后提交,同时把页面的next提交上去\n提交的内容是{form:用户填写好的,next(next-3),从第三步获取的next(next-3))\n\n第五步\nform的action属性,action=\"{% url 'users:register' %}\"\n表单提交后还是交给views.register处理,这次是处理post请求\n再次获取next(post请求传递的next)\n\n'''\ndef register(request):\n # form = UserCreationForm()\n redirect_to = request.POST.get('next', request.GET.get('next',''))\n if request.method == 'POST':\n form = RegisterForm(request.POST,request.FILES)\n\n if form.is_valid():\n form.save()\n if redirect_to:\n return redirect(redirect_to)\n else:\n\n return redirect(reverse('blogs:index'))\n else:\n form = RegisterForm()\n return render(request, 'users/register.html', {\"form\":form, 'next':redirect_to})\n\n@login_required(login_url='/users/login/')\ndef edit(request,pk):\n # 判断request的请求方法,如果是post方法,那么就处理数据\n if request.method == 'POST':\n # 获取前台传过来的数据,用来生成form对象\n form = EditForm(request.POST,instance=request.user)\n if form.is_valid():\n user = form.save(commit=False)\n # 如果用户不修改头像,request.FILES无法获取头像,就用原来的\n user.headshot = request.FILES.get('headshot',request.user.headshot)\n user.save()\n # 保存成功,提示用户\n messages.success(request,'用户信息修改成功')\n form = EditForm(instance=request.user)\n # 如果是get方法,返回用户信息修改页面\n return render(request,'users/edit_form.html',{'form':form})\n\n\n\n\n\n\n\n","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"306122732","text":"import json\nimport os\nimport requests\n\nimport AMQP_setup\n\nmonitorBindingKey = '#.error'\n\ndef receiveError():\n AMQP_setup.check_setup()\n\n queue_name = 'Error'\n\n # set up a consumer and start to wait for coming messages\n AMQP_setup.channel.basic_consume(queue=queue_name, on_message_callback=callback, auto_ack=True)\n AMQP_setup.channel.start_consuming() # an implicit loop waiting to receive messages; \n #it doesn't exit by default. Use Ctrl+C in the command window to terminate it.\n\ndef callback(channel, methos, properties, body):\n print(\"\\nReceived an error by \" + __file__)\n processError(body)\n saveToDatabase(body)\n print() # print a new line feed\n \n\ndef processError(errorMsg):\n print(\"Printing the error message: \")\n try:\n error = json.loads(errorMsg)\n print(\"--JSON:\", error)\n except Exception as e:\n print(\"--NOT JSON:\", e)\n print(\"--DATA:\", errorMsg)\n print()\n\n#find a way to grab username from frontend.... gg\ndef saveToDatabase(errorMsg):\n errorMsg = json.loads(errorMsg)\n query = 'mutation MyMutation {insert_Error(objects: {Description: \"'+errorMsg[\"message\"]+'\"}){affected_rows}}'\n url = 'https://esd-healthiswell-69.hasura.app/v1/graphql'\n myobj = {'x-hasura-admin-secret': 'Qbbq4TMG6uh8HPqe8pGd1MQZky85mRsw5za5RNNREreufUbTHTSYgaTUquaKtQuk',\n 'content-type': 'application/json'}\n r = requests.post(url, headers=myobj, json={'query': query})\n\nif __name__ == \"__main__\":\n print(\"\\nThis is \" + os.path.basename(__file__), end='')\n print(\": monitoring routing key '{}' in exchange '{}' ... \".format(monitorBindingKey, AMQP_setup.exchangename))\n receiveError()","sub_path":"Microservice/logging/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"503315903","text":"# Andreza Santana\n# Converte um valor binário para decimal\n\nbinario = input()\n\nindex = []\ncaractere = []\nfor i in range(len(binario)):\n index.append(i)\n caractere.append(binario[i])\n\ntamanho = len(index)-1\nindex2 = list(range(tamanho, -1, -1))\n\nsoma = 0\nfor x in range(len(caractere)):\n potencia = 2**index2[x]\n decimal = int(caractere[x]) * potencia\n soma += decimal\n print(f\"{caractere[x]} * {potencia} = {decimal}\")\nprint(f\"{binario}(2) = {soma}(10)\")\n","sub_path":"u4/binarioDecimal/binarioDecimal.py","file_name":"binarioDecimal.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"159998241","text":"from django.core import paginator\nfrom django.db.models import Count, Q\nfrom django.shortcuts import render, get_object_or_404,HttpResponseRedirect\nfrom .forms import *\nfrom .models import *\n\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\n# Create your views here.\ndef searchview(request):\n\t\tqueryset = Post.postmanager.all()\n\t\tquery = request.GET.get('query')\n\t\tlenquery=len(query)\n\t\tif lenquery >=3:\n\t\t\tgetquerypost = queryset.filter(title__icontains=query,content__icontains=query) \n\t\t\tcount=getquerypost.count()\n\t\t\tprint(count)\n\t\telse:\n\t\t\t getquerypost = Post.objects.none()\n\t\t\t count=getquerypost.count()\n\t\t\t print(count)\n\t\tcontext = {\n\t\t\t'query':query,\n\t\t\t'count':count,\n\t\t\t'queryset': getquerypost\n\t\t}\n\t\treturn render(request, 'Eblog/search_results.html', context)\n\ndef bloghome(request):\n\n\trecent_posts = Post.postmanager.order_by('-publish_date')[0:3]\n\tcategory=Category.objects.all()\n\tall_posts = Post.postmanager.all()#it is same as all_posts= Post.objects.filter(status=\"published\")\n\tpaginator=Paginator(all_posts,6)#creating an instance of Paginator taking all posts and create 6 items per page\n\tpage_var='page'\n\tpage=request.GET.get(page_var)#get the string \n\ttry:\n\t\tpaginate_queryset=paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tpaginate_queryset=paginator.page(1)\n\texcept EmptyPage:\n\t\tpaginate_queryset=paginator.page(paginator.num_pages) \n\t \n\tprint(paginate_queryset) \n\tprint(recent_posts) \n\tprint(category) \n \n\tcontext={\n\t\t'recent_posts':recent_posts,\n\t\t'category':category,\n\t\t'queryset' : paginate_queryset,\n\t\t'page_var':page_var , \n\t\t}\n\treturn render(request, 'Eblog/bloghome.html',context)\n\ndef post_single(request, post):\n\tpost = get_object_or_404(Post, slug=post, status='published')\n\trecent_posts = Post.postmanager.order_by('-publish_date')[0:3]\n\tcategory=Category.objects.all().exclude(name=\"Default\")\n\t\n\tdef get_client_ip(request):\n\t\tx_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n\t\tif x_forwarded_for:\n\t\t\tip = x_forwarded_for.split(',')[0]\n\t\telse:\n\t\t\tip = request.META.get('REMOTE_ADDR')\n\t\treturn ip\n\n\tip = get_client_ip(request)\n\tif IpModel.objects.filter(ip=ip).exists():\n\t\t\tprint(\"Ip Already exist\")\n\t\t\tpost.views.add(IpModel.objects.get(ip=ip))\n\t \n\telse: \n\t\t\tIpModel.objects.create(ip=ip)\n\t\t\tpost.views.add(IpModel.objects.get(ip=ip))\n\n\tcomment_form = NewCommentForm()\n #display comments whose status are true\n\tallcomments = post.comments.filter(status=True)\n\tpage = request.GET.get('page', 1)#displays first page\n\tpaginator = Paginator(allcomments, 11)#we wanna show 10 items per page.\n\ttry:\n\t\tcomments = paginator.page(page)#request for certain paginator\n\texcept PageNotAnInteger:#when page url has not an integer value, send user to page 1\n\t\tcomments = paginator.page(1)\n\t\t\n\texcept EmptyPage: #when your page has just 2 but user types for page 10\n\t\tcomments = paginator.page(paginator.num_pages)\n\tuser_comment = None\n\n\tif request.method == 'POST':\n\t\tcomment_form = NewCommentForm(request.POST)\n\t\tif comment_form.is_valid():\n\t\t\tgetuser=request.user.customer\n\t\t\tuser_comment = comment_form.save()\n\t\t\tuser_comment.post = post\n\t\t\tuser_comment.user = getuser\n\t\t\tprint(user_comment.user)\n\t\t\tprint(user_comment.post)\n\t\t\tuser_comment.save()\n\t\t\treturn HttpResponseRedirect('/blog/' + post.slug)\n\telse:\n\t\tcomment_form = NewCommentForm()\n\n\tcontext={\n\t\t\t\t\t\t'post': post, \n\t\t\t\t\t\t'recent_posts': recent_posts,\n\t\t\t\t\t\t 'category':category,\n\t\t\t\t\t\t'comments': user_comment, \n\t\t\t\t\t\t'comments': comments, \n\t\t\t\t\t\t'comment_form': comment_form, \n\t\t\t\t\t\t'allcomments': allcomments,\n\t} \n\treturn render(request, 'Eblog/blogpost.html', context)\n\ndef category_post(request,cat_title):\n\trecent_posts = Post.postmanager.order_by('-publish_date')[0:3]\n\tcategory=Category.objects.all().exclude(name=\"Default\")\n\t\n\tcat_post=Post.postmanager.filter(category=cat_title)\n\tcatpost_count=Post.postmanager.filter(category=cat_title).count()\n\tprint(cat_post)\n\t\n\tpaginator=Paginator(cat_post,6)#creating an instance of Paginator taking all posts and create 6 items per page\n\tpage_var='page'\n\tpage=request.GET.get(page_var)#get the string \n\ttry:\n\t\tpaginate_queryset=paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tpaginate_queryset=paginator.page(1)\n\texcept EmptyPage:\n\t\tpaginate_queryset=paginator.page(paginator.num_pages) \n\t\t\t\n\tcontext={\n\t\t\t\t 'recent_posts':recent_posts,\n\t\t\t\t 'category':category,\n\t\t\t\t 'cat_title':cat_title,\n\t\t\t\t 'post_count': catpost_count,\n\t\t\t\t 'cat_post':cat_post,\n\t\t\t\t \n\t\t\t\t 'queryset' : paginate_queryset,\n\t\t\t\t 'page_var':page_var , \n\t}\n\treturn render(request,'Eblog/category_post.html',context)\n\n","sub_path":"EBlog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"608704653","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/6/3 13:47\n# @Author :Wang Guosong\n# @File : eg_managed_attributes.py\n# @Software : PyCharm\n\n# class Person:\n# def getName(self):\n# if not valid():\n# raise\n\nattribute = property()\n\nclass Person:\n def __init__(self, name):\n self._name = name\n def getName(self):\n print('fetch ...')\n return self._name\n def setName(self, value):\n print('change ...')\n self._name = value\n def delNmae(self):\n print('remove ...')\n del self._name\n name = property(getName, setName, delNmae, 'name property docs')\n\nbob = Person('Bob Smith')\nprint(bob.name)\nbob.name = 'Robert Smith'\nprint(bob.name)\ndel bob.name\n\nprint('-' * 20)\nsue = Person('Sue Jones')\nprint(sue.name)\nprint(Person.name.__doc__)\n\nclass PropSquare:\n def __init__(self, start):\n self.value = start\n def getX(self):\n return self.value ** 2\n def setX(self, value):\n self.value = value\n X = property(getX, setX)\n\nP = PropSquare(3)\nQ = PropSquare(32)\n\nprint(P.X)\nP.X = 4\nprint(P.X)\nprint()\n\nclass Person:\n def __init__(self, name):\n self.name = name\n\n @property\n def name(self):\n \"name property docs\"\n print('fetch...')\n return self._name\n\n @name.setter\n def name(self, value):\n print('change...')\n self._name = value\n\n @name.deleter\n def name(self):\n print('remove...')\n del self._name\n\nbob = Person('Bob Smith')\nprint(bob.name)\nbob.name = 'Robert Smith'\nprint(bob.name)\ndel bob.name\n\nprint('-' * 20)\nsue = Person('Sue Jones')\nprint(sue.name)\nprint(Person.name.__doc__)\n\nclass Name:\n \"name descriptor docs\"\n def __get__(self, instance, owner):\n print('fetch...')\n return instance._name\n def __set__(self, instance, value):\n print('change...')\n instance._name = value\n def __delete__(self, instance):\n print('remove...')\n del instance._name\n\nclass Person:\n def __init__(self, name):\n self._name = name\n name = Name()\n\nbob = Person('Bob Smith')\nprint(bob.name)\nbob.name = 'Robert Smith'\nprint(bob.name)\ndel bob.name\n\nclass DescBoth:\n def __init__(self, data):\n self.data = data\n def __get__(self, instance, owner):\n return '%s, %s' % (self.data, instance.data)\n def __set__(self, instance, value):\n instance.data = value\n\nclass Client:\n def __init__(self, data):\n self.data = data\n managed = DescBoth('spam')\n\nI = Client('eggs')\nI.managed\nI.managed = 'SPAM'\nI.managed\n\nclass DescSquare(object):\n def __get__(self, instance, owner):\n return instance._square ** 2\n def __set__(self, instance, value):\n instance._square = value\n\nclass DescCube(object):\n def __get__(self, instance, owner):\n return instance._cube ** 3\n\nclass Powers(object):\n square = DescSquare()\n cube = DescCube()\n def __init__(self, x, y):\n self._square = x\n self._cube = y\n\nX = Powers(3, 4)\nprint(X.square)\nprint(X.cube)\nX.square = 5\nprint(X.square)\n\n\nclass GetAttr:\n eggs = 88\n def __init__(self):\n self.spam = 77\n def __len__(self):\n print('__len__: 42')\n return 42\n def __getattr__(self, attr):\n print('getattr: ' + attr)\n if attr == '__str__':\n return lambda *args: '[Getattr str]'\n else:\n return lambda *args: None\n\nclass GetAttribute(object):\n eggs = 88\n def __init__(self):\n self.spam = 77\n def __len__(self):\n print('__len__: 42')\n return 42\n def __getattribute__(self, attr):\n if attr == '__str__':\n return lambda *args: '[GetAttribute str]'\n else:\n return lambda *args: None\n\n# for Class in GetAttr, GetAttribute:\n# print('\\n' + Class.__name__.ljust(50, \"=\"))\n#\n# X = Class()\n# X.eggs\n# X.spam\n# X.other\n# len(X)\n\n\n\n\n\n\n\nclass Person:\n def __init__(self, name):\n self.name = name\n def __getattr__(self, attr):\n print('get:' + attr)\n if attr == 'name':\n return self._name\n else:\n raise AttributeError(attr)\n\n def __setattr__(self, attr, value):\n print('set:' + attr)\n if attr == 'name':\n attr = 'name'\n self.__dict__[attr] = value\n\n def __delattr__(self, attr):\n print('del:' + attr)\n if attr == 'name':\n attr = '_name'\n del self.__dict__[attr]\n\nbob = Person('Bob Smith')\nprint(bob.name)\nbob.name = 'Robert Smith'\n\nclass AttrSquare: # Add (object) for 2.X\n def __init__(self, start):\n self.value = start # Triggers __setattr__!\n\n def __getattribute__(self, attr): # On all attr fetches\n if attr == 'X':\n return self.value ** 2 # Triggers __getattribute__ again!\n else:\n return object.__getattribute__(self, attr)\n\n def __setattr__(self, attr, value): # On all attr assignments\n if attr == 'X':\n attr = 'value'\n object.__setattr__(self, attr, value)\n\nA = AttrSquare(3) # 2 instances of class with overloading\nB = AttrSquare(32) # Each has different state information\nprint(A.X) # 3 ** 2\nA.X = 4\nprint(A.X) # 4 ** 2\nprint(B.X) # 32 ** 2 (1024)","sub_path":"example/managed_attribute/eg_managed_attributes.py","file_name":"eg_managed_attributes.py","file_ext":"py","file_size_in_byte":5242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"377374459","text":"#encoding: utf-8\n'''\n02. 「パトカー」+「タクシー」=「パタトクカシーー」\n「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ.\n\n'''\nstr1 = 'パトカー'\nstr2 = 'タクシー'\nstr3 = ''\nfor c1, c2 in zip(str1, str2): # zip:複数の引数を同時に回してくれる\n str3 += c1 + c2\nprint(str3)\n","sub_path":"src/chap1/02.py","file_name":"02.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"194521835","text":"#! python3\r\n# gff3_to_fasta.py\r\n# This program reads a genome fasta file and corresponding gff3 file in a format \r\n# output by PASA and retrieves the main and/or alternative isoform transcripts \r\n# from each locus\r\n\r\nimport os, argparse, re\r\nfrom Bio import SeqIO\r\n\r\n# Define functions for later use\r\ndef validate_args(args):\r\n # Validate input file locations\r\n if not os.path.isfile(args.fasta):\r\n print('I am unable to locate the genome fasta file (' + args.fasta + ')')\r\n print('Make sure you\\'ve typed the file name or location correctly and try again.')\r\n quit()\r\n if not os.path.isfile(args.gff3):\r\n print('I am unable to locate the input gff3 gene annotation file (' + args.gff3 + ')')\r\n print('Make sure you\\'ve typed the file name or location correctly and try again.')\r\n quit()\r\n # Validate behaviour arguments\r\n if args.locusSeqs == None:\r\n print('You need to specify the locusSeqs argument for this program to run.')\r\n quit()\r\n if args.seqType == None:\r\n print('You need to specify the seqType argument for this program to run.')\r\n quit()\r\n # Format output names\r\n mainOutputFileName = None\r\n nuclOutputFileName = None\r\n protOutputFileName = None\r\n if args.seqType == 'cds' or args.seqType == 'both':\r\n nuclOutputFileName = args.outputFileName + '.nucl'\r\n protOutputFileName = args.outputFileName + '.aa'\r\n if args.seqType == 'transcript' or args.seqType == 'both':\r\n mainOutputFileName = args.outputFileName + '.trans'\r\n # Handle file overwrites\r\n if args.seqType == 'transcript' or args.seqType == 'both':\r\n if os.path.isfile(mainOutputFileName) and args.force != True:\r\n print('There is already a file named ' + mainOutputFileName + '. Either specify a new file name, delete these older file(s), or provide the -force argument either \"Y\" or \"y\"')\r\n quit()\r\n elif os.path.isfile(mainOutputFileName) and args.force == True:\r\n os.remove(mainOutputFileName)\r\n if args.seqType == 'cds' or args.seqType == 'both':\r\n # Nucl\r\n if os.path.isfile(nuclOutputFileName) and args.force != True:\r\n print('There is already a file named ' + nuclOutputFileName + '. Either specify a new file name, delete these older file(s), or provide the -force argument either \"Y\" or \"y\"')\r\n quit()\r\n elif os.path.isfile(nuclOutputFileName) and args.force == True:\r\n os.remove(nuclOutputFileName)\r\n # Prot\r\n if os.path.isfile(protOutputFileName) and args.force != True:\r\n print('There is already a file named ' + protOutputFileName + '. Either specify a new file name, delete these older file(s), or provide the -force argument either \"Y\" or \"y\"')\r\n quit()\r\n elif os.path.isfile(protOutputFileName) and args.force == True:\r\n os.remove(protOutputFileName)\r\n # Return file names\r\n return mainOutputFileName, nuclOutputFileName, protOutputFileName\r\n\r\ndef reverse_comp(seq):\r\n reversedSeq = seq[::-1].lower()\r\n # Decode characters\r\n reversedSeq = reversedSeq.replace('a', 'T')\r\n reversedSeq = reversedSeq.replace('t', 'A')\r\n reversedSeq = reversedSeq.replace('c', 'G')\r\n reversedSeq = reversedSeq.replace('g', 'C')\r\n return reversedSeq\r\n\r\ndef group_process(currGroup, gffExonDict, gffCDSDict):\r\n full_mrnaGroup = [] # This will hold processed mRNA positions.\r\n full_mrnaCDS = []\r\n mrnaGroup = [] # This will be a temporary storage for mRNA lines.\r\n for entry in currGroup:\r\n # Handle the first line in the group: we just want the gene ID\r\n if entry[2] == 'gene':\r\n geneID = idRegex.search(entry[8]).group(1)\r\n # Handle mRNA lines: this will start a subgroup corresponding to the mRNA\r\n elif entry[2] == 'mRNA':\r\n if mrnaGroup == []: # i.e., if this is the first mRNA line in this gene group, we just need to start building it.\r\n mrnaGroup.append(entry)\r\n else: # i.e., there is more than one mRNA in this gene group, so we need to process the group we've built then initiate a new one.\r\n # Process current mrnaGroup\r\n for subentry in mrnaGroup:\r\n if subentry[2] == 'mRNA':\r\n full_mrnaGroup.append([idRegex.search(subentry[8]).group(1), []])\r\n full_mrnaCDS.append([idRegex.search(subentry[8]).group(1), []])\r\n elif subentry[2] == 'exon':\r\n coords = subentry[3] + '-' + subentry[4] # +1 here to make Python act 1-based like gff3 format.\r\n full_mrnaGroup[-1][-1].append(coords)\r\n elif subentry[2] == 'CDS':\r\n coords = subentry[3] + '-' + subentry[4] # +1 here to make Python act 1-based like gff3 format.\r\n full_mrnaCDS[-1][-1].append(coords)\r\n # Initiate new mrnaGroup\r\n full_mrnaGroup[-1] += [subentry[0],subentry[6]] # Append contig ID and orientation.\r\n full_mrnaCDS[-1] += [subentry[0],subentry[6]]\r\n mrnaGroup = [entry]\r\n else:\r\n mrnaGroup.append(entry)\r\n # Process the mrnaGroup that's currently sitting in the pipe (so to speak)\r\n for subentry in mrnaGroup:\r\n if subentry[2] == 'mRNA':\r\n full_mrnaGroup.append([idRegex.search(subentry[8]).group(1), []])\r\n full_mrnaCDS.append([idRegex.search(subentry[8]).group(1), []])\r\n elif subentry[2] == 'exon':\r\n coords = subentry[3] + '-' + subentry[4] # +1 here to make Python act 1-based like gff3 format.\r\n full_mrnaGroup[-1][-1].append(coords)\r\n elif subentry[2] == 'CDS':\r\n coords = subentry[3] + '-' + subentry[4] # +1 here to make Python act 1-based like gff3 format.\r\n full_mrnaCDS[-1][-1].append(coords)\r\n full_mrnaGroup[-1] += [subentry[0],subentry[6]] # Append contig ID and orientation.\r\n full_mrnaCDS[-1] += [subentry[0],subentry[6]]\r\n # Put info into the coordDict and move on\r\n gffExonDict[geneID] = full_mrnaGroup\r\n gffCDSDict[geneID] = full_mrnaCDS\r\n # Return dictionaries\r\n return gffExonDict, gffCDSDict\r\n\r\ndef pasa_parse(gff3File):\r\n # Establish values for storing results\r\n currGroup = []\r\n gffExonDict = {}\r\n gffCDSDict = {}\r\n pasaProts = {}\r\n # Loop through gff3 file\r\n with open(gff3File, 'r') as fileIn:\r\n for line in fileIn:\r\n # Skip filler lines\r\n if line == '\\n':\r\n continue\r\n # Grab the PASA predicted ORF sequences\r\n if line.startswith('#PROT'):\r\n sl = line.rstrip('\\n').split('\\t')\r\n geneID = sl[0].split()[1]\r\n pasaProt = sl[1]\r\n pasaProts[geneID] = pasaProt\r\n continue\r\n elif line.startswith('#'):\r\n continue\r\n # Get details\r\n sl = line.rstrip('\\n').split('\\t')\r\n lineType = sl[2]\r\n # Building gene group/process it\r\n if lineType == 'gene':\r\n if currGroup == []:\r\n # First iteration: just play it cool, add the sl to the group\r\n currGroup.append(sl)\r\n continue\r\n else:\r\n # Process group if we're encountering a new group\r\n gffExonDict, gffCDSDict = group_process(currGroup, gffExonDict, gffCDSDict)\r\n currGroup = [sl]\r\n elif lineType == 'rRNA' or lineType == 'tRNA': # Skip lines that aren't coding\r\n continue\r\n else:\r\n # Keep building group until we encounter another 'gene' lineType\r\n currGroup.append(sl)\r\n # Process the last mrnaGroup\r\n gffExonDict, gffCDSDict = group_process(currGroup, gffExonDict, gffCDSDict)\r\n # Return dictionaries\r\n return gffExonDict, gffCDSDict, pasaProts\r\n\r\ndef longest_iso(mrnaList):\r\n longestMrna = ['', 0] # We pick out the representative gene based on length. If length is identical, we'll end up picking the entry listed first in the gff3 file since our > condition won't be met. I doubt this will happen much or at all though.\r\n for mrna in mrnaList:\r\n mrnaLen = 0\r\n for pair in mrna[1]:\r\n coords = pair.split('-')\r\n mrnaLen += (int(coords[1]) - int(coords[0]) + 1)\r\n if mrnaLen > longestMrna[1]:\r\n longestMrna = [mrna, mrnaLen]\r\n mrnaList = [longestMrna[0]]\r\n return mrnaList\r\n\r\n# Set up regex for later use\r\nidRegex = re.compile(r'ID=(.+?);')\r\n\r\n##### USER INPUT SECTION\r\n\r\nusage = \"\"\"%(prog)s reads in genome fasta file and corresponding gff3 file in a format output by PASA and retrieves the main\r\nand/or alternative isoform transcripts or CDS' for each locus. Alternatively, you can grab the CDS regions which will produce nucleotide\r\nand AA files (name format == OUTPUT.nucl / OUTPUT.aa)\r\n\"\"\"\r\n\r\np = argparse.ArgumentParser(description=usage)\r\np.add_argument(\"-i\", \"-input\", dest=\"fasta\",\r\n help=\"genome fasta file\")\r\np.add_argument(\"-g\", \"-gff\", dest=\"gff3\",\r\n help=\"gff3 file\")\r\np.add_argument(\"-l\", \"-locusSeqs\", dest=\"locusSeqs\", choices = ['main', 'isoforms'],\r\n help=\"type of transcripts to extract from each locus (main == just the \")\r\np.add_argument(\"-s\", \"-seqType\", dest=\"seqType\", choices = ['transcript', 'cds', 'both'],\r\n help=\"type of sequence to output (transcripts == full gene model including UTRs if annotated, cds == coding regions)\")\r\np.add_argument(\"-o\", \"-output\", dest=\"outputFileName\",\r\n help=\"output fasta file name containing transcript sequences\")\r\np.add_argument(\"-f\", \"-force\", dest=\"force\", action='store_true',\r\n help=\"By default this program will not overwrite existing files. Specify this argument to allow this behaviour at your own risk.\", default=False)\r\n\r\nargs = p.parse_args()\r\nmainOutputFileName, nuclOutputFileName, protOutputFileName = validate_args(args)\r\n\r\n# Load the fasta file and parse its contents\r\nrecords = SeqIO.to_dict(SeqIO.parse(open(args.fasta, 'r'), 'fasta'))\r\n\r\n# Parse the gff3 file\r\ngffExonDict, gffCDSDict, pasaProts = pasa_parse(args.gff3)\r\n\r\n# Produce output files\r\ndictObjs = [gffExonDict, gffCDSDict, pasaProts]\r\nfileNames = [mainOutputFileName, nuclOutputFileName, protOutputFileName]\r\nlongestIsos = set() # This will retain values for protein output\r\nfor i in range(len(dictObjs)):\r\n # Don't output unwanted files\r\n if fileNames[i] == None:\r\n continue\r\n # Process the values in the dictObj if we're looking at nucleotide dictionaries and output to file\r\n with open(fileNames[i], 'w') as fileOut:\r\n for key, value in dictObjs[i].items():\r\n # Pick out longest isoform if relevant [note: longest is with relation to TRANSCRIPT, not CDS]\r\n if args.locusSeqs == 'main' and i != 2: # Note that, if we're outputting CDS, we'll always enter here when i == 1; thus, the longestIsos set will be ready for the pasaProts output below\r\n longestID = longest_iso(dictObjs[0][key])[0][0]\r\n longestIsos.add(longestID) # This is for protein output\r\n for x in range(len(value)):\r\n if value[x][0] == longestID:\r\n chosenIndex = x\r\n value = [value[chosenIndex]]\r\n # If we're looking at the pasaProts dictionary, simply dump values to fasta\r\n if i == 2 and dictObjs[i] != []: # This lets us handle GFF3's produced by EVM; we won't get a protein, but we won't crash either\r\n with open(fileNames[i], 'w') as fileOut:\r\n for k, v in dictObjs[i].items():\r\n if args.locusSeqs == 'main':\r\n if k in longestIsos:\r\n fileOut.write('>' + k + '\\n' + v + '\\n')\r\n else:\r\n fileOut.write('>' + k + '\\n' + v + '\\n')\r\n break # We're all done; pasaProts is the last dictionary\r\n # Loop into mrnas associated with this gene model and build the sequence\r\n for mrna in value:\r\n # Retrieve genomic sequence\r\n genomeSeq = str(records[mrna[2]].seq)\r\n # Reverse the list if we're looking at a '-' model so we start at the 3' end of the gene model\r\n if mrna[3] == '-':\r\n mrna[1].reverse()\r\n # Join sequence segments\r\n transcript = ''\r\n for pair in mrna[1]:\r\n coords = pair.split('-')\r\n segment = genomeSeq[int(coords[0])-1:int(coords[1])] # Make it 1-based by -1 to the first coordinate\r\n transcript += segment\r\n # Reverse comp if necessary\r\n if mrna[3] == '-':\r\n transcript = reverse_comp(transcript)\r\n # Output to file\r\n fileOut.write('>' + mrna[0] + '\\n' + transcript + '\\n')\r\n\r\n# Done!\r\nprint('Program completed successfully!')\r\n","sub_path":"gff3_to_fasta.py","file_name":"gff3_to_fasta.py","file_ext":"py","file_size_in_byte":15857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"416688886","text":"# TD3 agent\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom memory import ReplayMemory\nfrom model import Actor, Critic\n\nfrom const import *\n\n\nclass TD3(object):\n def __init__(self, env, writer=None):\n \"\"\"\n Twin Delayed Deep Deterministic Policy Gradient Algorithm(TD3)\n \"\"\"\n self.env = env\n self.writer = writer\n\n state_dim = env.observation_space.shape[0]\n action_dim = env.action_space.shape[0]\n self.max_action = env.action_space.high[0]\n\n # Randomly initialize network parameter\n self.actor = Actor(state_dim, action_dim).to('cuda')\n self.critic = Critic(state_dim, action_dim).to('cuda')\n\n # Initialize target network parameter\n self.target_actor = Actor(state_dim, action_dim).to('cuda')\n self.target_actor.load_state_dict(self.actor.state_dict())\n self.target_critic = Critic(state_dim, action_dim).to('cuda')\n self.target_critic.load_state_dict(self.critic.state_dict())\n\n # Replay memory\n self.memory = ReplayMemory(state_dim, action_dim)\n\n self.gamma = gamma\n self.tau = tau\n\n # network parameter optimizer\n self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=actor_lr)\n self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=critic_lr, weight_decay=weight_decay)\n\n def get_action(self, state, initial_act=False):\n if initial_act:\n return self.env.action_space.sample()\n action = self.actor(torch.from_numpy(state).to('cuda', torch.float))\n action = np.random.normal(0, 0.1) + action.detach().cpu().numpy()\n return np.clip(action, -1, 1)\n\n def store_transition(self, state, action, state_, reward, done):\n self.memory.store_transition(state, action, state_, reward, done)\n\n def soft_update(self, target_net, net):\n \"\"\"Target parameters soft update\"\"\"\n for target_param, param in zip(target_net.parameters(), net.parameters()):\n target_param.data.copy_(\n self.tau * param.data + (1 - self.tau) * target_param.data\n )\n\n def update(self, time_step, batch_size=64):\n states, actions, states_, rewards, terminals = self.memory.sample(batch_size)\n\n # Update Critic\n with torch.no_grad():\n noise = (\n torch.randn_like(actions) * policy_noise\n ).clamp(-noise_clip, noise_clip)\n\n actions_ = (\n self.target_actor(states_) + noise\n ).clamp(-self.max_action, self.max_action)\n\n target_q1, target_q2 = self.target_critic(states_, actions_)\n y = rewards.unsqueeze(1) + terminals.unsqueeze(1) * gamma * torch.min(target_q1, target_q2)\n q1, q2 = self.critic(states, actions)\n critic_loss = F.mse_loss(q1, y) + F.mse_loss(q2, y)\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n if self.writer and time_step:\n self.writer.add_scalar(\"loss/critic\", critic_loss.item(), time_step)\n\n # Delayed Policy Update\n if time_step % policy_freq == 0:\n # Update Actor\n actor_loss = -1 * self.critic.Q1(states, self.actor(states)).mean()\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n if self.writer:\n self.writer.add_scalar(\"loss/actor\", actor_loss.item(), time_step)\n\n # target parameter soft update\n self.soft_update(self.target_actor, self.actor) # update target actor network\n self.soft_update(self.target_critic, self.critic) # update target critic network\n\n def save_model(self, path='models/'):\n torch.save(self.actor.state_dict(), path + 'actor')\n torch.save(self.critic.state_dict(), path + 'critic')\n torch.save(self.target_actor.state_dict(), path + 'target_actor')\n torch.save(self.target_critic.state_dict(), path + 'target_critic')\n\n def load_model(self, path='models/'):\n self.actor.load_state_dict(torch.load(path + 'actor'))\n self.critic.load_state_dict(torch.load(path + 'critic'))\n self.target_actor.load_state_dict(torch.load(path + 'target_actor'))\n self.target_critic.load_state_dict(torch.load(path + 'target_critic'))\n","sub_path":"high_dimensional/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":4435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"199222941","text":"import os\nimport copy\nimport json\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.core.exceptions import FieldError, ValidationError\nfrom rest_framework import serializers\nfrom libs.k8s.jobs import BaseJob\nfrom libs.k8s.jobs.annotation_archiver import AnnotationArchiver\nfrom libs.k8s.jobs.rosbag_extractor import RosbagExtractor\nfrom libs.k8s.jobs.rosbag_analyzer import RosbagAnalyzer\nfrom datetime import datetime, timezone\nfrom projects.jobs.models import Job\nfrom projects.jobs.const import STATUS_MAP, UNKNOWN_LIMIT_TIME\nfrom projects.project_manager import ProjectManager\nfrom projects.originals.original_manager import OriginalManager\nfrom projects.datasets.dataset_manager import DatasetManager\nfrom projects.storages.storage_manager import StorageManager\nfrom api.settings import PER_PAGE\nfrom api.common import validation_check\nfrom accounts.account_manager import AccountManager\nfrom automan_website import settings\n\n\nclass JobSerializer(serializers.ModelSerializer):\n class Meta:\n model = Job\n fields = ('job_type', 'job_config')\n\n @classmethod\n def list_jobs(cls, project_id, sort_key, is_reverse=False, per_page=PER_PAGE, page=1, search_keyword=\"\"):\n validation_check(per_page, page)\n begin = per_page * (page - 1)\n try:\n if is_reverse is False:\n jobs = Job.objects.order_by(sort_key).filter(\n Q(project_id=project_id),\n Q(job_type__contains=search_keyword) | Q(job_config__contains=search_keyword)\n )[begin:begin + per_page]\n else:\n jobs = Job.objects.order_by(sort_key).reverse().filter(\n Q(project_id=project_id),\n Q(job_type__contains=search_keyword) | Q(job_config__contains=search_keyword)\n )[begin:begin + per_page]\n except FieldError:\n jobs = Job.objects.order_by(\"id\").filter(\n Q(project_id=project_id),\n Q(job_type__contains=search_keyword) | Q(job_config__contains=search_keyword)\n )[begin:begin + per_page]\n records = []\n for job in jobs:\n record = {}\n record['id'] = job.id\n record['job_type'] = job.job_type\n if job.status not in [STATUS_MAP['succeeded'], STATUS_MAP['failed']]:\n status, start_time, completion_time = cls.__get_job_status(job.id, job.job_type)\n if job.status != STATUS_MAP['unknown'] and status == STATUS_MAP['unknown']:\n job.unknown_started_at = datetime.now(timezone.utc)\n job.status = status if status else STATUS_MAP['unknown']\n job.started_at = start_time\n job.completed_at = completion_time\n if job.status == STATUS_MAP['unknown'] and cls.__is_unknown_time_limit(job.unknown_started_at):\n job.status = STATUS_MAP['failed']\n if job.status == STATUS_MAP['failed']:\n namespace = cls.__generate_job_namespace()\n pod_log = BaseJob().logs(cls.__generate_job_name(job.id, job.job_type), namespace)\n job.pod_log = pod_log[0:min(len(pod_log), 1023)]\n job.save()\n record['status'] = job.status\n record['started_at'] = str(job.started_at) if job.started_at else ''\n record['completed_at'] = str(job.completed_at) if job.completed_at else ''\n record['registered_at'] = str(job.registered_at)\n record['description'] = cls.get_job_description(job.job_type, job.job_config)\n record['pod_log'] = job.pod_log\n record['user_id'] = job.user_id\n records.append(record)\n contents = {}\n contents['count'] = cls.job_total_count(project_id)\n contents['records'] = records\n return contents\n\n @classmethod\n def get_job_description(cls, job_type, job_config_json):\n job_config = json.loads(job_config_json)\n desc = {}\n if job_type == 'analyzer':\n automan_config = job_config['automan_config']\n desc['path'] = automan_config['path']\n desc['lanel_type'] = automan_config['label_type']\n elif job_type == 'extractor':\n for key in job_config['raw_data_config']:\n desc[key] = job_config['raw_data_config'][key]\n elif job_type == 'archiver':\n for key in job_config['archive_config']:\n desc[key] = job_config['archive_config'][key]\n return json.dumps(desc)\n\n @classmethod\n def job_total_count(cls, project_id):\n jobs = Job.objects.filter(project_id=project_id)\n return jobs.count()\n\n @classmethod\n @transaction.atomic\n def archive(cls, user_id, project_id, dataset_id, original_id, annotation_id, include_image: bool):\n original = OriginalManager().get_original(project_id, original_id, status='analyzed')\n storage_manager = StorageManager(project_id, original['storage_id'])\n storage_config = copy.deepcopy(storage_manager.storage['storage_config'])\n original_path = storage_manager.get_original_filepath(original['name'])\n storage_config.update({\n 'path': original_path,\n 'storage_id': original['storage_id']})\n automan_config = cls.__get_automan_config(user_id)\n automan_config.update({\n 'path': '/projects/' + str(project_id) + '/annotations/' + str(annotation_id) + '/',\n 'presigned': '/projects/' + str(project_id) + '/storages/upload/'})\n\n archive_config = cls.__get_archive_info(\n storage_manager.storage['storage_type'], user_id, project_id, dataset_id, annotation_id, original_id, include_image)\n job_config = {\n 'storage_type': storage_manager.storage['storage_type'],\n 'storage_config': storage_config,\n 'automan_config': automan_config,\n 'archive_config': archive_config,\n }\n job_config_json = json.dumps(job_config)\n new_job = Job(\n job_type='archiver',\n user_id=user_id,\n project_id=project_id,\n job_config=job_config_json)\n new_job.save()\n job = AnnotationArchiver(**job_config)\n job.create(cls.__generate_job_name(new_job.id, 'archiver'))\n res = job.run(namespace=settings.JOB_NAMESPACE)\n return res\n\n @staticmethod\n def __get_archive_info(storage_type, user_id, project_id, dataset_id, annotation_id, original_id, include_image):\n dataset = DatasetManager().get_dataset(user_id, dataset_id)\n file_path = dataset['file_path'].rsplit('/', 2)\n archive_name = file_path[1] + '_' + datetime.now().strftime('%s')\n archive_dir = file_path[0]\n if storage_type == 'AWS_S3':\n archive_dir = dataset['file_path'].replace('datasets', 'archives')\n\n return {\n 'project_id': project_id,\n 'dataset_id': dataset_id,\n 'annotation_id': annotation_id,\n 'original_id': original_id,\n 'archive_dir': archive_dir,\n 'archive_name': archive_name,\n 'include_image': include_image,\n }\n\n @classmethod\n @transaction.atomic\n def extract(cls, user_id, project_id, original_id, candidates, name):\n original = OriginalManager().get_original(project_id, original_id, status='analyzed')\n storage_manager = StorageManager(project_id, original['storage_id'])\n storage_config = copy.deepcopy(storage_manager.storage['storage_config'])\n original_path = storage_manager.get_original_filepath(original['name'])\n output_dir = storage_manager.get_dataset_dirname(original['name'], candidates)\n print('output_dirname: ' + output_dir)\n storage_config.update({\n 'path': original_path,\n 'output_dir': output_dir,\n 'storage_id': original['storage_id']})\n automan_config = cls.__get_automan_config(user_id)\n automan_config.update({\n 'path': '/projects/' + project_id + '/datasets/',\n 'presigned': '/projects/' + project_id + '/storages/upload/'})\n raw_data_config = cls.__get_raw_data_config(project_id, original_id, candidates, name)\n job_config = {\n 'storage_type': storage_manager.storage['storage_type'],\n 'storage_config': storage_config,\n 'automan_config': automan_config,\n 'raw_data_config': raw_data_config,\n }\n\n job_config_json = json.dumps(job_config)\n new_job = Job(\n job_type='extractor',\n user_id=user_id,\n project_id=project_id,\n job_config=job_config_json)\n new_job.save()\n\n if original['file_type'] == 'rosbag':\n job = RosbagExtractor(**job_config)\n job.create(cls.__generate_job_name(new_job.id, 'extractor'))\n res = job.run(namespace=settings.JOB_NAMESPACE)\n return res\n else:\n raise ValidationError()\n\n @classmethod\n @transaction.atomic\n def analyze(cls, user_id, project_id, original_id):\n project = ProjectManager().get_project(project_id, user_id)\n label_type = project['label_type']\n original = OriginalManager().get_original(project_id, original_id, status='uploaded')\n storage_manager = StorageManager(project_id, original['storage_id'])\n original_path = storage_manager.get_original_filepath(original['name'])\n storage_config = copy.deepcopy(storage_manager.storage['storage_config'])\n storage_config.update({'path': original_path})\n automan_config = cls.__get_automan_config(user_id)\n automan_config.update({'path': '/projects/' + project_id + '/originals/' + str(original_id) + '/',\n 'label_type': label_type})\n job_config = {\n 'storage_type': storage_manager.storage['storage_type'],\n 'storage_config': storage_config,\n 'automan_config': automan_config,\n }\n job_config_json = json.dumps(job_config)\n new_job = Job(\n job_type='analyzer',\n user_id=user_id,\n project_id=project_id,\n job_config=job_config_json)\n new_job.save()\n if original['file_type'] == 'rosbag':\n job = RosbagAnalyzer(**job_config)\n job.create(cls.__generate_job_name(new_job.id, 'analyzer'))\n res = job.run(namespace=settings.JOB_NAMESPACE)\n return res\n else:\n raise ValidationError()\n\n @staticmethod\n def __get_automan_config(user_id):\n jwt = AccountManager.create_jwt(user_id)\n url = os.environ.get(\"AUTOMAN_URL\")\n port = os.environ.get(\"AUTOMAN_PORT\")\n host = 'http://' + url + ':' + port\n automan_config = {\n 'host': host,\n 'jwt': jwt,\n }\n return automan_config\n\n @staticmethod\n def __get_raw_data_config(project_id, original_id, candidates, name):\n records = {}\n for candidate_id in candidates:\n original_manager = OriginalManager()\n candidate = original_manager.get_dataset_candidate(candidate_id)\n analyzed_info = json.loads(candidate['analyzed_info'])\n records[analyzed_info['topic_name']] = candidate_id\n\n raw_data_config = {\n 'project_id': int(project_id),\n 'original_id': original_id,\n 'candidates': candidates,\n 'records': records,\n 'name': name,\n }\n return raw_data_config\n\n @classmethod\n def __is_unknown_time_limit(cls, unknown_started):\n if not unknown_started:\n return False\n time = datetime.now(timezone.utc) - unknown_started\n if time.seconds > UNKNOWN_LIMIT_TIME:\n return True\n return False\n\n @classmethod\n def __get_job_status(cls, id, job_type):\n namespace = cls.__generate_job_namespace()\n try:\n res = BaseJob().fetch(cls.__generate_job_name(id, job_type), namespace)\n except Exception:\n return None, None, None\n if res['is_succeeded']:\n content = res['content']\n status = cls.__get_status_from_k8s_response(content)\n return status, content.start_time, content.completion_time\n else:\n return None, None, None\n\n @staticmethod\n def __get_status_from_k8s_response(content):\n if content.succeeded:\n return STATUS_MAP['succeeded']\n elif content.failed:\n return STATUS_MAP['failed']\n elif content.active:\n return STATUS_MAP['active']\n else:\n return STATUS_MAP['unknown']\n\n @staticmethod\n def __generate_job_name(id, job_type):\n return job_type + '-' + str(id)\n\n # FIXME: Consider security\n @staticmethod\n def __generate_job_namespace():\n return settings.JOB_NAMESPACE\n","sub_path":"automan/api/projects/jobs/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":13008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"644652496","text":"import base64\nimport datetime\nimport io\n\nimport dash\nfrom dash.dependencies import Input, Output, State\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\n\nimport pandas as pd\nimport plotly.graph_objects as go\n\nfrom rasa_chatlog_processor import RasaChalogProcessor\nimport copy\nimport dash_bootstrap_components as dbc\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\nsuppress_callback_exceptions = True\napp = dash.Dash(__name__,\n # external_stylesheets=external_stylesheets\n external_stylesheets=[dbc.themes.BOOTSTRAP]\n )\nPAGE_SIZE = 10\n\ntabs_styles = {\n 'height': '44px'\n}\ntab_style = {\n 'borderBottom': '1px solid #d6d6d6',\n 'padding': '6px',\n 'fontWeight': 'bold'\n}\n\ntab_selected_style = {\n 'borderTop': '1px solid #d6d6d6',\n 'borderBottom': '1px solid #d6d6d6',\n 'backgroundColor': '#119DFF',\n 'color': 'white',\n 'padding': '6px'\n}\napp.layout = html.Div(\n id='main-div',\n style={\n 'display': 'flex',\n 'flexDirection': 'column'\n },\n children=[\n dcc.Upload(\n id='upload-data',\n children=html.Div([\n 'Drag and Drop or ',\n html.A('Select Files')\n ]),\n style={\n 'width': '100%',\n 'height': '60px',\n 'lineHeight': '60px',\n 'borderWidth': '1px',\n 'borderStyle': 'dashed',\n 'borderRadius': '5px',\n 'textAlign': 'center',\n 'margin': '10px'\n },\n # Allow multiple files to be uploaded\n multiple=True\n ),\n html.Div(\n className=\"d-flex flex-wrap\",\n children=[\n html.Div(id='first-pie', className=\"col-md-6 h-50\"),\n html.Div(id='second-pie', className=\"col-md-6 h-50\"),\n html.Div(id='third-pie', className=\"col-md-6 h-50\"),\n html.Div(id='forth-pie', className=\"col-md-6 h-50\"),\n ]\n ),\n\n dcc.Tabs(\n id=\"tab_parent\",\n value=\"thank\",\n children=[\n dcc.Tab(id=\"thank\", label='Thank', value=\"thank\", style=tab_style, selected_style=tab_selected_style,\n children=[html.Div(id='thank-table'), ]\n ),\n dcc.Tab(label='Shipping', value=\"shipping\", style=tab_style, selected_style=tab_selected_style,\n children=[html.Div(id='shipping-table'), ]\n ),\n dcc.Tab(label='Handover', value=\"handover\", style=tab_style, selected_style=tab_selected_style,\n children=[html.Div(id='handover-table'), ]\n ),\n dcc.Tab(label='Silence', value=\"silence\", style=tab_style, selected_style=tab_selected_style,\n children=[html.Div(id='silence-table'), ]\n ),\n dcc.Tab(label='Other', value=\"other\", style=tab_style, selected_style=tab_selected_style,\n children=[html.Div(id='other-table'), ]\n ),\n dcc.Tab(label='Agree', value=\"agree\", style=tab_style, selected_style=tab_selected_style,\n children=[html.Div(id='agree-table'), ]\n ),\n ],\n ),\n\n html.Div(id='df-data', style={'display': 'none'}),\n\n ])\n\n\ndef create_trace_uc_propotion_in_month(total: int, uc1: int, uc2: int):\n not_uc1_uc2 = total - uc1 - uc2\n colors = ['mediumturquoise', 'darkorange', 'lightgreen']\n trace = go.Pie(\n labels=['Other', 'UC 1', 'UC 2'],\n values=[not_uc1_uc2, uc1, uc2],\n hoverinfo='label+percent',\n textinfo='label+value+percent',\n textfont_size=15,\n marker=dict(colors=colors, line=dict(color='#000000', width=2))\n )\n first_pie = html.Div(\n className=\"six columns chart_div pretty_container\",\n children=[\n html.P(\"UC1 and UC2 proportion in June\"),\n dcc.Graph(\n figure={\"data\": [trace]},\n style={\"height\": \"90%\", \"width\": \"98%\"},\n config=dict(displayModeBar=False),\n ),\n ],\n ),\n return first_pie\n\n\ndef create_trace_outcome_proportion_in_uc(outcome_uc1: dict, outcome_uc2: dict):\n uc_1_values = [value for index, value in outcome_uc1.items()]\n uc_2_values = [value for index, value in outcome_uc2.items()]\n values = [sum(x) for x in zip(uc_1_values, uc_2_values)]\n trace = go.Pie(\n labels=['thanks', 'shipping', 'handover', \"silence\", \"other\", \"agree\"],\n values=values,\n direction=\"clockwise\",\n sort=False,\n rotation=120,\n hoverinfo='label+percent',\n textinfo='label+value',\n textfont_size=15,\n marker=dict(line=dict(color='#000000', width=2))\n )\n second_pie = html.Div(\n className=\"six columns chart_div pretty_container\",\n children=[\n html.P(\"Outcomes proportion in UC1 and UC2 conversations\"),\n dcc.Graph(\n figure={\"data\": [trace]},\n style={\"height\": \"90%\", \"width\": \"98%\"},\n config=dict(displayModeBar=False),\n ),\n ],\n ),\n return second_pie\n\n\ndef create_trace_outcome_uc1(outcome_uc1: dict):\n values = [value for index, value in outcome_uc1.items()]\n labels = ['thanks', 'shipping', 'handover', \"silence\", \"other\", \"agree\"]\n trace_1 = go.Pie(labels=labels, values=values, scalegroup='one',\n name=\"UC1\", direction=\"clockwise\", sort=False, rotation=120, hoverinfo='label+percent',\n textinfo='label+value', textfont_size=15,\n marker=dict(line=dict(color='#000000', width=2)))\n\n third_pie = html.Div(\n className=\"six columns chart_div pretty_container\",\n children=[\n html.P(\"Outcomes of UC1\"),\n dcc.Graph(\n figure={\"data\": [trace_1]},\n style={\"height\": \"90%\", \"width\": \"98%\"},\n config=dict(displayModeBar=False),\n ),\n ],\n ),\n return third_pie\n\n\ndef create_trace_outcome_uc2(outcome_uc2: dict):\n values = [value for index, value in outcome_uc2.items()]\n labels = ['thanks', 'shipping', 'handover', \"silence\", \"other\", \"agree\"]\n trace_2 = go.Pie(labels=labels, values=values, scalegroup='one',\n name=\"UC2\", direction=\"clockwise\", sort=False, rotation=120, hoverinfo='label+percent',\n textinfo='label+value', textfont_size=15,\n marker=dict(line=dict(color='#000000', width=2)))\n forth_pie = html.Div(\n className=\"six columns chart_div pretty_container\",\n children=[\n html.P(\"Outcomes of UC2\"),\n dcc.Graph(\n figure={\"data\": [trace_2]},\n style={\"height\": \"90%\", \"width\": \"98%\"},\n config=dict(displayModeBar=False),\n ),\n ],\n ),\n return forth_pie\n\n\ndef parse_contents(contents, filename, date):\n content_type, content_string = contents.split(',')\n\n decoded = base64.b64decode(content_string)\n try:\n if 'csv' in filename:\n # Assume that the user uploaded a CSV file\n df = pd.read_csv(\n io.StringIO(decoded.decode('utf-8')))\n elif 'xls' in filename:\n # Assume that the user uploaded an excel file\n df = pd.read_excel(io.BytesIO(decoded))\n except Exception as e:\n print(e)\n return html.Div([\n 'There was an error processing this file.'\n ])\n\n return df\n\n\ndef generate_table(df: pd.DataFrame):\n return html.Div([\n dash_table.DataTable(\n id='datatable-paging',\n page_action=\"native\",\n page_current=0,\n page_size=10,\n style_header={\n 'backgroundColor': 'white',\n 'fontWeight': 'bold'\n },\n style_data={ # style cho ca header va cell\n # 'whiteSpace': 'normal',\n # 'height': 'auto',\n # 'lineHeight': '15px',\n },\n style_cell={\n 'overflow': 'hidden',\n 'textOverflow': 'ellipsis',\n 'minWidth': '0px',\n 'width': '160px', 'maxWidth': '300px',\n 'textAlign': \"left\",\n },\n style_cell_conditional=[\n {\n 'if': {'column_id': c},\n 'width': '20px'\n } for c in ['use_case']\n ],\n tooltip_data=[\n {\n column: {'value': str(value), 'type': 'markdown'}\n for column, value in row.items()\n } for row in df.to_dict('rows')\n ],\n style_data_conditional=[\n {\n 'if': {'row_index': 'odd'},\n 'backgroundColor': 'rgb(248, 248, 248)'\n }\n ],\n tooltip_duration=None,\n fixed_columns={'headers': True, 'data': 1},\n # style_table={'overflowX': 'auto'},\n style_table={'minWidth': '100%'},\n\n data=df.to_dict('records'),\n columns=[{'name': i, 'id': i} for i in df.columns]\n ),\n ])\n\n\ndef get_number_of_each_uc(df: pd.DataFrame):\n total = len(list(dict.fromkeys(list(df[\"conversation_id\"]))))\n uc1 = len(df[df[\"use_case\"] == \"uc_1\"])\n uc2 = len(df[df[\"use_case\"] == \"uc_2\"])\n return total, uc1, uc2\n\n\ndef get_number_of_each_outcome_each_uc(df: pd.DataFrame):\n \"\"\" thank -> shipping -> handover -> silence -> other -> agree\"\"\"\n uc_outcome = {\n \"uc_1\": {\"thank\": 0, \"shipping_order\": 0, \"handover_to_inbox\": 0, \"silence\": 0, \"other\": 0, \"agree\": 0},\n \"uc_2\": {\"thank\": 0, \"shipping_order\": 0, \"handover_to_inbox\": 0, \"silence\": 0, \"other\": 0, \"agree\": 0},\n }\n uc1_uc_2_conversation_id = list(df[(df[\"use_case\"] == \"uc_1\") | (df[\"use_case\"] == \"uc_2\")][\"conversation_id\"])\n uc1_uc_2_conversation_id = list(dict.fromkeys(uc1_uc_2_conversation_id))\n\n for id in uc1_uc_2_conversation_id:\n sub_df = df[df[\"conversation_id\"] == id]\n use_case = list(filter(lambda x: x != \"\", list(sub_df[\"use_case\"])))[0]\n outcome = list(filter(lambda x: x != \"\", list(sub_df[\"outcome\"])))[0]\n uc_outcome[use_case][outcome] += 1\n return uc_outcome[\"uc_1\"], uc_outcome[\"uc_2\"]\n\n\ndef get_conversation_each_outcome(df: pd.DataFrame):\n column_list = [\"conversation_id\", \"use_case\", \"sender_id\", \"user_message\", \"bot_message\", \"created_time\", \"intent\",\n \"entities\"]\n thank_df = df[df[\"conversation_id\"].isin(list(df[df[\"outcome\"] == \"thanks\"][\"conversation_id\"]))][column_list]\n shipping_order_df = df[df[\"conversation_id\"].isin(list(df[df[\"outcome\"] == \"shipping_order\"][\"conversation_id\"]))][\n column_list]\n handover_df = df[df[\"conversation_id\"].isin(list(df[df[\"outcome\"] == \"handover_to_inbox\"][\"conversation_id\"]))][\n column_list]\n silence_df = df[df[\"conversation_id\"].isin(list(df[df[\"outcome\"] == \"silence\"][\"conversation_id\"]))][column_list]\n other_df = df[df[\"conversation_id\"].isin(list(df[df[\"outcome\"] == \"other\"][\"conversation_id\"]))][column_list]\n agree_df = df[df[\"conversation_id\"].isin(list(df[df[\"outcome\"] == \"agree\"][\"conversation_id\"]))][column_list]\n return thank_df, shipping_order_df, handover_df, silence_df, other_df, agree_df\n\n\n@app.callback(\n Output('df-data', 'children'),\n [Input('upload-data', 'contents')],\n [\n State('upload-data', 'filename'),\n State('upload-data', 'last_modified')\n ]\n)\ndef handle_df(list_of_contents, list_of_names, list_of_dates):\n if list_of_contents is not None:\n children = [\n parse_contents(c, n, d) for c, n, d in\n zip(list_of_contents, list_of_names, list_of_dates)]\n df = children[0]\n processor = RasaChalogProcessor()\n df = processor.process_rasa_chatlog(\"06\", \"abc\", df)\n return df.to_json(date_format='iso', orient='split')\n else:\n return None\n\n\n@app.callback(\n [\n Output('first-pie', 'children'),\n Output('second-pie', 'children'),\n Output('third-pie', 'children'),\n Output('forth-pie', 'children'),\n Output('thank-table', 'children'),\n Output('shipping-table', 'children'),\n Output('handover-table', 'children'),\n Output('silence-table', 'children'),\n Output('other-table', 'children'),\n Output('agree-table', 'children'),\n ],\n [\n Input('df-data', 'children')\n ],\n)\ndef update_output(df):\n if df is not None:\n df = pd.read_json(df, orient=\"split\")\n\n total, uc1, uc2 = get_number_of_each_uc(df[[\"conversation_id\", \"use_case\"]])\n outcome_uc1, outcome_uc2 = get_number_of_each_outcome_each_uc(df[[\"conversation_id\", \"use_case\", \"outcome\"]])\n\n first_pie = create_trace_uc_propotion_in_month(total, uc1, uc2)\n second_pie = create_trace_outcome_proportion_in_uc(outcome_uc1, outcome_uc2)\n third_pie = create_trace_outcome_uc1(outcome_uc1)\n forth_pie = create_trace_outcome_uc2(outcome_uc2)\n\n thank_df, shipping_order_df, handover_df, silence_df, other_df, agree_df = get_conversation_each_outcome(df[[\n \"conversation_id\", \"use_case\", \"outcome\", \"sender_id\", \"user_message\", \"bot_message\", \"created_time\",\n \"intent\", \"entities\"]])\n\n thank_df = generate_table(thank_df)\n shipping_order_df = generate_table(shipping_order_df)\n handover_df = generate_table(handover_df)\n silence_df = generate_table(silence_df)\n other_df = generate_table(other_df)\n agree_df = generate_table(agree_df)\n\n return first_pie, second_pie, third_pie, forth_pie, thank_df, shipping_order_df, handover_df, silence_df, other_df, agree_df\n else:\n return \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"\n\n\nif __name__ == '__main__':\n app.run_server(debug=False)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":14163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"330097634","text":"#\n# Copyright 2013 Simone Campagna\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n__author__ = \"Simone Campagna\"\n\nfrom distutils.core import setup\nimport os\nimport sys\n\nscripts = [\n\t'bin/sheru',\n]\n\ntry:\n dirname = os.path.dirname(os.path.abspath(sys.argv[0]))\n py_dirname = os.path.join(dirname, 'packages')\n sys.path.insert(0, py_dirname)\n\n from sheru import conf\n version = conf.VERSION\nfinally:\n del sys.path[0]\n\nsetup(\n name = \"sheru\",\n version = str(version),\n requires = [],\n description = \"Tool to manage remote shell profiles\",\n author = \"Simone Campagna\",\n author_email = \"simone.campagna11@gmail.com\",\n url=\"https://github.com/simone-campagna/sheru\",\n download_url = 'https://github.com/simone-campagna/sheru/archive/{}.tar.gz'.format(version),\n packages = [\n 'sheru',\n ],\n package_dir = {\n '': 'packages',\n },\n scripts = scripts,\n package_data = {},\n)\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"212146701","text":"import xn2v\nfrom xn2v import CSFGraph\nimport os\nimport unittest\nfrom unittest import TestCase\nfrom xn2v.word2vec import SkipGramWord2Vec\n\n\nclass TestSkipGramWord2Vec(TestCase):\n\n def test_embedding(self):\n training_file = os.path.join(\n os.path.dirname(__file__),\n 'data',\n 'karate.train'\n )\n output_file = os.path.join(\n os.path.dirname(__file__),\n 'data',\n 'disease.embedded'\n )\n training_graph = CSFGraph(training_file)\n training_graph.print_edge_type_distribution()\n\n p = 1\n q = 1\n gamma = 1\n useGamma = False\n hetgraph = xn2v.hetnode2vec.N2vGraph(\n training_graph, p, q, gamma, useGamma)\n\n walk_length = 80\n num_walks = 25\n walks = hetgraph.simulate_walks(num_walks, walk_length)\n\n worddictionary = training_graph.get_node_to_index_map()\n reverse_worddictionary = training_graph.get_index_to_node_map()\n\n numberwalks = []\n for w in walks:\n nwalk = []\n for node in w:\n i = worddictionary[node]\n nwalk.append(i)\n numberwalks.append(nwalk)\n\n model = SkipGramWord2Vec(numberwalks, worddictionary=worddictionary,\n reverse_worddictionary=reverse_worddictionary, num_steps=100)\n model.train(display_step=10)\n model.write_embeddings(output_file)\n","sub_path":"tests/test_skip_gram_word2vec.py","file_name":"test_skip_gram_word2vec.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"457875529","text":"from flask import Flask, render_template, redirect, Response\nfrom flask_bootstrap import Bootstrap\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField\nfrom wtforms.validators import DataRequired\nfrom music_parser import parser\n\n\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n\nclass MyForm(FlaskForm):\n name = StringField('Логин', validators=[DataRequired()])\n\n\napp = Flask(__name__)\napp.secret_key = b'\\xb6V\\x07sx\\xbc8=^\\xd1-\\xa8\\x9c\\xd0\"H'\napp.register_error_handler(404, page_not_found)\nbootstrap = Bootstrap(app)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/music/')\ndef music():\n form = MyForm()\n return render_template('search.html', form=form)\n\n\n@app.route('/submit', methods=('GET', 'POST'))\ndef submit():\n form = MyForm()\n if form.validate_on_submit():\n return redirect(f'/show/{form.name.data}')\n return render_template('search.html', form=form)\n\n\n@app.route('/show/', methods=('GET', 'POST'))\ndef show():\n form = MyForm()\n return render_template('search.html', form=form)\n\n\n@app.route('/show/', methods=('GET', 'POST'))\ndef show_name(name):\n result = parser.main(name)\n return render_template('result.html', artists=result[0], genres=result[1])\n\n\n@app.route('/getInfo/', methods=('GET', 'POST'))\ndef get_info(artist_id):\n print(artist_id)\n ret = parser.sub_query(artist_id)\n return Response(response=ret, status=200, mimetype=\"application/json\")\n\n\nif __name__ == '__main__':\n app.run()\n\n\n\n\n\n\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"587259183","text":"import os\nfrom django.test import TestCase\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\nfrom project.apps.shop.models import Item, Sale, Employee, History\n\n\nclass ResponseTestCase(TestCase):\n\n fixtures = ['initial_data.json', 'initial_shop.json']\n\n def setUp(self):\n \"\"\"\n :return:\n \"\"\"\n # User as Byuer\n self.user = User.objects.create_user(username='testuser', password='12345')\n login = self.client.login(username='testuser', password='12345')\n\n def test_home_point_return_200(self):\n \"\"\"\n :return:\n \"\"\"\n print('\\nRunning test [test_home_point_return_200]')\n\n self.response = self.client.get('/')\n print('Response status code:', self.response.status_code)\n\n self.assertEqual(self.response.status_code, 200)\n\n def test_home_page_contains_items(self):\n \"\"\"\n :return:\n \"\"\"\n print('\\nRunning test [test_home_page_contains_items]')\n\n self.response = self.client.get('/')\n item_list = Item.objects.values_list('name', flat=True).all()[:9]\n items_names = ', '.join(list(item_list))\n print('Contains item names:', items_names)\n\n for item_name in list(item_list):\n self.assertContains(self.response, item_name)\n\n def test_detail_item(self):\n \"\"\"\n :return:\n \"\"\"\n print('\\nRunning test [test_detail_item]')\n\n image_path = os.path.join(settings.BASE_DIR, 'fixtures/test_upload_image.jpeg')\n\n item = Item(\n name=\"Test Item\",\n price=100.25,\n description='Item Description',\n image=SimpleUploadedFile(\n name='test_image.jpg',\n content=open(image_path, 'rb').read(),\n content_type='image/jpeg'\n )\n )\n\n item.save()\n\n saved_image_path = Item.objects.get(id=item.id).image.path\n print('Saved image path:', saved_image_path)\n self.failUnless(open(saved_image_path), 'Image not found')\n\n detail_url = '/item-detail/%d/' % item.id\n print('Detail page URL:', detail_url)\n\n self.response = self.client.get(detail_url)\n self.assertEqual(self.response.status_code, 200)\n print('Response status code:', self.response.status_code)\n\n def test_save_sale(self):\n \"\"\"\n :return:\n \"\"\"\n print('\\nRunning test [test_save_sale]')\n\n item_id = Item.objects.latest('id').id\n employee_id = Employee.objects.first().id\n\n item_buy_url = '/item-buy/%d/' % item_id\n print('Item buy AJAX URL:', item_buy_url)\n\n result = self.client.post(item_buy_url, {\n 'buyer': self.user,\n 'employee_id': employee_id,\n 'amount': 10,\n 'price': 100.25\n }, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n\n print(result)\n self.assertEqual(result.status_code, 200)\n\n sale_count = Sale.objects.count()\n\n sale = Sale(\n item_id=item_id,\n buyer=self.user,\n employee_id=employee_id,\n amount=2,\n price=100.25\n )\n\n sale.save()\n print('Sale total count: %d, sale inserted id: %d' % (sale_count, sale.id))\n self.assertGreater(sale.id, sale_count)\n\n def test_employee_sales_ordering(self):\n \"\"\"\n :return:\n \"\"\"\n print('\\nRunning test [test_employee_sales_ordering]')\n\n employee = Employee(\n user=self.user,\n name='Test Employee'\n )\n\n employee.save()\n\n self.response = self.client.get('/sales')\n print('Response status code:', self.response.status_code)\n\n self.assertEqual(self.response.status_code, 200)\n\n def test_buyer_purchases_ordering(self):\n \"\"\"\n :return:\n \"\"\"\n print('\\nRunning test [test_buyer_purchases_ordering]')\n\n self.response = self.client.get('/purchases')\n print('Response status code:', self.response.status_code)\n\n self.assertEqual(self.response.status_code, 200)\n\n def test_history_price_triggered(self):\n \"\"\"\n :param mock:\n :return:\n \"\"\"\n print('\\nRunning test [test_post_save_signal_history_price_triggered]')\n\n # get last Item\n item = Item.objects.latest('id')\n print('Current Item ID %d price: %.2f' % (item.id, item.price))\n\n item.price += 10.10\n item.save()\n\n print('New Item price: %.2f' % item.price)\n\n history_price = History.objects.filter(item=item).latest('id')\n print('Changed in history Item ID %d price %.2f' % (history_price.item.id, history_price.price))\n","sub_path":"project/apps/shop/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"364642697","text":"# -*- coding = utf-8 -*-\n# @time:2022/9/28 13:56\n# Author:\n# @File:mmgpmedia.py\n# @Software:PyCharm\n\n\nfrom lxml import etree\nimport requests\nimport re\n\n\nclass Mmgb(object):\n\n def __init__(self):\n self.url = 'https://www.mmgpmedia.com/web/YW/index.html'\n\n self.headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '\n '(KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'}\n\n def get_index(self, pageNum):\n '''\n https://cms.offshoremedia.net/front/list/latest?pageNum=1&pageSize=10&siteId\n =752495108259188736&channelId=752840161313955840\n https://cms.offshoremedia.net/front/list/latest?pageNum=2&pageSize=10&siteId\n =752495108259188736&channelId=752840161313955840\n https://cms.offshoremedia.net/front/list/latest?pageNum=3&pageSize=10&siteId\n =752495108259188736&channelId=752840161313955840\n '''\n\n api_url = 'https://cms.offshoremedia.net/front/list/latest?pageNum={' \\\n '}&pageSize=10&siteId=752495108259188736&channelId=752840161313955840' \\\n ''.format(\n pageNum)\n\n response = requests.get(url=api_url, headers=self.headers).json()\n\n # print(response)\n\n page_total = response['info']['total']\n\n for i in response['info']['list']:\n contentTitle = i['contentTitle'] # 标题\n contentStaticPage = i['contentStaticPage'] # 详情页链接\n contentTitleImg = i['contentTitleImg'] # 图片链接\n contentPublishTime = i['contentPublishTime'] # 发布时间\n\n print(contentTitle)\n print(contentStaticPage)\n print(contentTitleImg)\n\n\nif __name__ == '__main__':\n mmgb = Mmgb()\n mmgb.get_index(pageNum=1)\n\n# cookies = {\n# 'zh_choose': 'n',\n# }\n#\n# headers = {\n# 'Connection': 'keep-alive',\n# 'Pragma': 'no-cache',\n# 'Cache-Control': 'no-cache',\n# 'Upgrade-Insecure-Requests': '1',\n# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (\n# KHTML, like Gecko) Chrome/96.0.4664.93 Safari/537.36',\n# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,\n# image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n# 'Accept-Language': 'zh-CN,zh;q=0.9',\n# # 'Cookie': 'zh_choose=n',\n# }\n#\n# response = requests.get(\n# 'http://www.mmgpmedia.com/static/content/YW/2022-09-27/1024410136443228160.html',\n# cookies=cookies, headers=headers, verify=False)\n#\n# response.encoding = 'utf-8'\n# print(response.text)\n\n'''\n\nts: 1664413684706\nrs: Pg7NsoS8LTyWbzuT\nsignature: a09535026d63254fba6b280fee8f0dba3f9b7263fe4860bed4e8cb3a0f47120a\ncallback: jQuery111209405707990736758_1664413681228\ntype: 2\npage_size: 10\npage: 1\n_: 1664413681233\n\n\nts: 1664413969594\nrs: 8SO7CvoZ7D9EOcdj\nsignature: b8521efc052c718e23bae24050da9d3b581d43a70e575c673295081509c933a4\ncallback: jQuery111206853280760062974_1664413966090\ntype: 2\npage_size: 10\npage: 1\n_: 1664413966098\n\n\n\n'''\n","sub_path":"PoliceProject/mmgpmedia.py","file_name":"mmgpmedia.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"203701528","text":"from sys import stdin\n\ndef check(num):\n visited[num] = True\n stack = [num]\n\n while stack:\n node = stack.pop(0)\n\n for next_node in tree[node]:\n if tree[node][next_node] == 1:\n if not visited[next_node]:\n visited[next_node] = True\n stack.append(next_node)\n tree[node][next_node] = 0\n tree[next_node][node] = 0\n else:\n return False\n return True\n\ncase = 1\nwhile True:\n N, M = map(int, stdin.readline().split())\n if N + M == 0:\n break\n parent_cnt = [0] * (N+1)\n tree = [{} for _ in range(N + 1)]\n for _ in range(M):\n x, y = map(int, stdin.readline().split())\n tree[x][y] = 1\n tree[y][x] = 1\n cnt = 0\n visited = [False] * (N + 1)\n for num in range(1, N + 1):\n if not visited[num]:\n if check(num):\n cnt += 1\n if cnt == 0:\n print(f'Case {case}: No trees.')\n elif cnt == 1:\n print(f'Case {case}: There is one tree.')\n else:\n print(f'Case {case}: A forest of {cnt} trees.')\n\n case += 1\n","sub_path":"6주차/28_트리/4803_yonghee.py","file_name":"4803_yonghee.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"91875238","text":"n = int(input(\"Número: \"))\ntriangular = False\nnum = []\nx = 1\nwhile x * (x + 1) * (x + 2) <= n:\n if x * (x + 1) * (x + 2) == n:\n triangular = True\n x += 1\nif triangular:\n print(\"{} * {} * {} = {}\".format(x - 1, x, x + 1, n))\n print(\"Verdadeiro.\")\nelse:\n print(\"Falso.\")\n","sub_path":"p1lp1/Testes de Conhecimento Inicial/triangular.py","file_name":"triangular.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"220031082","text":"from __future__ import print_function\n\nimport random\n\n\ndef attack(args, sender, online_users):\n print(\"{0}: !attack {1}\".format(sender, repr(args)))\n usage = '!attack '\n if len(args) != 1:\n return usage\n\n if args[0].lower() not in online_users:\n return \"Sorry, user {0} is not online at the moment!\".format(args[0])\n\n if args[0].lower() == sender.lower():\n return \"RKN forbids me to do that!\"\n\n if args[0].lower() == \"arachnobot\":\n return \"Don't ya dare hurt ma bot!\"\n\n # print (\"{0} vs {1}\".format(sender, args[0]))\n result = [\"Battle: @{0} vs @{1}!\".format(sender, args[0])]\n\n attack_d = random.randint(1, 6)\n defence_d = random.randint(1, 6)\n\n result.append(\"@{0} rolls {1}, @{2} rolls {3}\".format(sender, attack_d, args[0], defence_d))\n\n if attack_d > defence_d:\n result.append(\"@{0} wins!\".format(sender))\n elif attack_d < defence_d:\n result.append(\"@{0} wins!\".format(args[0]))\n else:\n result.append(\"It's a draw!\")\n\n return result\n","sub_path":"src/lib/commands/attack.py","file_name":"attack.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"80729989","text":"import tensorflow as tf\n\nimport sys\nsys.path.append('../../')\n\nfrom models.memorynet.model import MemoryNet\nfrom train.trainer import Trainer\nfrom tasks.babi.data import DataSource\n\n\nif __name__ == '__main__':\n\n batch_size = 32\n\n datasrc = DataSource(datadir='../../../datasets/babi/en-10k/', task_id=19,\n batch_size=batch_size)\n\n # get vocab size from data source\n vocab_size = datasrc.metadata['vocab_size']\n memsize = datasrc.metadata['memory_size']\n sentence_size = datasrc.metadata['sentence_size']\n\n # instantiate model\n model = MemoryNet(hdim=20, num_hops=3, memsize=memsize, \n sentence_size=sentence_size, vocab_size=vocab_size,\n lr = 0.001)\n\n # gpu config\n config = tf.ConfigProto()\n #config.gpu_options.allow_growth = True\n\n with tf.Session(config=config) as sess:\n # init session\n sess.run(tf.global_variables_initializer())\n\n # init trainer\n trainer = Trainer(sess, model, datasrc, batch_size)\n\n # fit model\n trainer.fit(epochs=10000, verbose=False)\n","sub_path":"tensorsoup/apps/babi/memnet.py","file_name":"memnet.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"380830220","text":"# Copyright Contributors to the Amundsen project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport logging\nfrom collections import namedtuple\n\nfrom pyhocon import ConfigTree # noqa: F401\nfrom typing import List, Any # noqa: F401\n\nfrom databuilder.extractor.base_bigquery_extractor import BaseBigQueryExtractor\nfrom databuilder.models.application import Application\n\n\nDatasetRef = namedtuple('DatasetRef', ['datasetId', 'projectId'])\nTableKey = namedtuple('TableKey', ['schema', 'table_name'])\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass BigQueryApplicationExtractor(BaseBigQueryExtractor):\n\n \"\"\" \n\n \"\"\"\n #PROJECT_ID_KEY = 'project_id'\n\n def init(self, conf):\n # type: (ConfigTree) -> None\n BaseBigQueryExtractor.init(self, conf)\n self.grouped_tables = set([])\n\n def _retrieve_tables(self, dataset):\n # type: () -> Any\n for page in self._page_table_list_results(dataset):\n if 'tables' not in page:\n continue\n\n for table in page['tables']:\n tableRef = table['tableReference']\n table_id = tableRef['tableId']\n\n # BigQuery tables that have 8 digits as last characters are\n # considered date range tables and are grouped together in the UI.\n # ( e.g. ga_sessions_20190101, ga_sessions_20190102, etc. )\n if self._is_sharded_table(table_id):\n # If the last eight characters are digits, we assume the table is of a table date range type\n # and then we only need one schema definition\n table_prefix = table_id[:-BigQueryApplicationExtractor.DATE_LENGTH]\n if table_prefix in self.grouped_tables:\n # If one table in the date range is processed, then ignore other ones\n # (it adds too much metadata)\n continue\n\n table_id = table_prefix\n self.grouped_tables.add(table_prefix)\n\n table = self.bigquery_service.tables().get(\n projectId=tableRef['projectId'],\n datasetId=tableRef['datasetId'],\n tableId=tableRef['tableId']).execute(num_retries=BigQueryApplicationExtractor.NUM_RETRIES)\n\n # BigQuery tables also have interesting metadata about partitioning\n # data location (EU/US), mod/create time, etc... Extract that some other time?\n #cols = []\n # Not all tables have schemas\n #if 'schema' in table:\n # schema = table['schema']\n # if 'fields' in schema:\n # total_cols = 0\n # for column in schema['fields']:\n # total_cols = self._iterate_over_cols('', column, cols, total_cols + 1)\n\n table_app = Application(\n task_id='la_task', # type: str\n dag_id='el_id', # type: str,\n application_url_template='el_template', # type: str\n db_name='bigquery', # type: str\n cluster=tableRef['projectId'], # type: str\n schema=tableRef['datasetId'], # type: str\n table_name=table_id, # type: str\n exec_date='20200811'\n )\n \n #database='bigquery',\n #cluster=tableRef['projectId'],\n #schema=tableRef['datasetId'],\n #name=table_id,\n #description=table.get('description', ''),\n #columns=cols,\n #is_view=table['type'] == 'VIEW')\n\n yield(table_app)\n\n \n def get_scope(self):\n # type: () -> str\n return 'extractor.bigquery_table_metadata'\n","sub_path":"databuilder/extractor/bigquery_application_extractor.py","file_name":"bigquery_application_extractor.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"379227993","text":"from configuration.client.models import AgentConfig\nfrom xilema.utils import get_no_abstract_subclasses\nfrom configuration.models import Configuration\n\n\nclass ConfigurationFilter(object):\n \"\"\"\n This class gets the configurations added after an\n specific configuration\n \"\"\"\n\n def __init__(self, instance):\n \"\"\"\n The instance param is an instance of\n CheckerConfig model\n \"\"\"\n self.instance = instance\n self.configs = []\n self.models = []\n\n def __delete_parent_config(self):\n \"\"\"\n This method deletes the instances of parent configuration\n \"\"\"\n numbers = []\n confs = []\n for conf in self.configs:\n if not conf.id in numbers:\n numbers.append(conf.id)\n confs.append(conf)\n self.configs = confs\n return self.configs\n\n def get_configs(self):\n \"\"\"\n This method gets the configurations made after an\n specific configuration\n \"\"\"\n models = get_no_abstract_subclasses(AgentConfig)\n models.reverse() # the models are reversed for not repeat the configs\n last = Configuration.objects.filter(id=self.instance.reported_config)\n if last:\n last = last[0]\n for mod in models:\n objects = mod.objects.filter(\n is_active=True, last_update__gte=last.create_time)\n self.configs.extend(objects)\n self.__delete_parent_config()\n return self.configs\n\n def get_model_name(self):\n \"\"\"\n This method gets the model names from a list of configurations\n \"\"\"\n models = []\n for c in self.configs:\n models.append(c._meta.object_name.lower())\n d = set(models)\n while d:\n self.models.append(d.pop())\n return self.models\n","sub_path":"comunication/config/config_filter.py","file_name":"config_filter.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"71684965","text":"#!/usr/bin/python\n\nimport cv2\nimport numpy\nfrom optparse import OptionParser\nimport os\nfrom PIL import Image\n\n\ndef ask_infile():\n\t# ask_infile allows the user to browse and select the infile\n\timport Tkinter, Tkconstants, tkFileDialog\n\troot = Tkinter.Tk()\n\tfile = tkFileDialog.askopenfilename(parent = root,\n\t\tinitialdir = \"/home/cbernard/clones/Single-cell_RNA-Seq_data_analysis\",\n\t\ttitle = 'Please, select a filtered heatmap in csv')\n\treturn file\n\ndef get_binary_mat(file):\n\t# get_binary_img returns a binary image of the heatmap where\n\t# every element > '0' is assigned 255 while every element == '0' or 'NA'\n\t# is assigned '0'\n\tdata = numpy.genfromtxt(file, skip_header = 1, delimiter = \",\",\n\t\tmissing_values = \"NA\", filling_values = 0)\n\t# Get rid of the first column\n\tdata = data[:,1:]\n\t# the mat is turned into binary mat\n\tdata[data != 0] = 255\n\t# while elements on diagonal are assigned '0'\n\tdi = numpy.diag_indices(data.shape[0])\n\tdata[di] = 0\n\treturn data.astype(numpy.uint8)\n\ndef dilation(bin_mat, height, width, n, shape):\n\tkernel = cv2.getStructuringElement(shape, (width, height))\n\tdil_mat = cv2.dilate(bin_mat, kernel, iterations = n)\n\treturn dil_mat\n\ndef erosion(bin_mat, height, width, n, shape):\n\tkernel = cv2.getStructuringElement(shape, (width, height))\n\tero_mat = cv2.erode(bin_mat, kernel, iterations = n)\n\treturn ero_mat\n\ndef __main__():\n\n\tparser = OptionParser()\n\tparser.add_option(\"--asso_score\", dest = \"assoScore\", action = \"store\", nargs = 1, metavar = \"method\", type = \"str\")\n\topt, arg = parser.parse_args()\n\n\tif 1 == 1:\n\t\tinfile = \"/home/cbernard/clones/filtered-heatmap_toto100.csv\"\n\t\tinfile=\"/home/cbernard/clones/Single-cell_RNA-Seq_data_analysis/Results/GeneExp_Associations_Matrices/1-Clustered_Matrices/MINE_matrices/Batch_A471_biopsy/1000_mostVariantGenes/Raw_Data/MIC-p^2_nonlinearity/Filtered-Heatmap_MIC-p2_(non-linearity)_mat.csv\"\n\telse:\n\t\tinfile = ask_infile()\n\n\toutdir = os.path.join(os.path.dirname(infile), 'Groups_of_Associated_Genes')\n\tif not os.path.exists(outdir):\n\t\tos.makedirs(outdir)\n\n\tbase = os.path.basename(infile.rsplit('.', 1)[0])\n\toutcsv = os.path.join(outdir, base + '_IJ_gene_groups.csv')\n\toutimg = os.path.join(outdir, base + '_only_gene_groups.png')\n\n\tmat = get_binary_mat(infile)\n\traw_img = Image.fromarray(mat)\n\n\tif opt.assoScore == \"DCOR\":\n\n\t\t# Closing\n\t\tdil_mat = dilation(mat, 3, 3, 4, cv2.MORPH_RECT)\n\t\tfinal_mat = erosion(dil_mat, 10, 10, 1, cv2.MORPH_RECT)\n\n\telif opt.assoScore == \"PEARSON\" or opt.assoScore == \"KENDALL\" or opt.assoScore == \"SPEARMAN\":\n\n\t\t# Opening (enhance mostly horizontal lines)\n\t\thor_ero_mat = erosion(mat, 1, 4, 1, cv2.MORPH_RECT)\n\t\thor_op_mat = dilation(hor_ero_mat, 3, 15, 1, cv2.MORPH_RECT)\n\n\t\t# Opening (enhance mostly vertical lines)\n\t\tver_ero_mat = erosion(mat, 4, 1, 1, cv2.MORPH_RECT)\n\t\tver_op_mat = dilation(ver_ero_mat, 15, 3, 1, cv2.MORPH_RECT)\n\n\t\t# Fill a matrix with the two opened matrices\n\t\tfinal_mat = hor_op_mat + ver_op_mat\n\t\tfinal_mat[final_mat < 0] = 255\n\n\telif opt.assoScore == \"MIC\":\n\n\t\t# Opening\n\t\tero_mat = erosion(mat, 2, 2, 1, cv2.MORPH_RECT)\n\t\tfinal_mat = dilation(ero_mat, 3, 3, 2, cv2.MORPH_RECT)\n\n\telif opt.assoScore == \"toto\":\n\t\t# Opening\n\t\tero_mat = erosion(mat, 3, 3, 1, cv2.MORPH_RECT)\n\t\tfinal_mat = dilation(ero_mat, 3, 3, 2, cv2.MORPH_RECT)\n\telse:\n\n\t\tfinal_mat = mat\n\n\t# Erase False Positives\n\tfinal_mat[mat == 0] = 0\n\tfinal_img = Image.fromarray(final_mat)\n\n\tif 1 == 1:\n\t\traw_img.show()\n\t\tfinal_img.show()\n\n\tfinal_img.save(outimg)\n\n\ti, j = numpy.nonzero(final_mat)\n\tindices_mat = numpy.vstack((i + 1, j + 1)).T\n\tnumpy.savetxt(outcsv, indices_mat, delimiter = \",\", header = 'I, J', fmt = '%d')\n\nif __name__==\"__main__\":__main__()","sub_path":"Scripts/1-Detect_Associations_Within_Pairs_of_Gene_Expressions/4-Isolate_&_Plot_Clusters_of_Genes_within_Heatmap/draft/1-isolate_groups_of_associated_genes.py","file_name":"1-isolate_groups_of_associated_genes.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"620374105","text":"import sys\nimport random\n\nf1 = sys.argv[1]\ndata = open(f1)\nfeatures=[]\ni=0\nl=data.readline()\n\n#reading data from data file and adding 1 to each row\nwhile(l!=''):\n split_data = l.split()\n split_data_len =len(split_data)\n l2=[]\n\n######adding 1 to each row for getting intercept\n for i in range(0,split_data_len,1):\n l2.append(float(split_data[i]))\n if i ==(split_data_len-1):\n l2.append(float(1))\n######adding split data and 1 to features \n features.append(l2)\n l = data.readline()\n\nrows =len(features)\ncols=len(features[0])\n\n#reading labels from label file and converting 0 to -1 \nf2 = sys.argv[2]\ntrainlabels=open(f2)\nlabels={}\ncount_labels=[0]*2\nl = trainlabels.readline()\n \nwhile(l!=''):\n split_labels= l.split()\n if int(split_labels[0])==0:\n labels[int(split_labels[1])] = -1\n else:\n labels[int(split_labels[1])] = int(split_labels[0])\n count_labels[int(split_labels[0])] +=1 \n l = trainlabels.readline()\n \n###### transpose function\ndef transpose(m):\n Trans = [[m[j][i] for j in range(len(m))] for i in range(len(m[0]))]\n return Trans\n\n###### dot product function\ndef dot_Prod(m,n,c):\n dp=0\n for j in range(0,c,1):\n dp += m[j]*n[j]\n return dp\n\n###### change in weights \ndef delf(features,labels,weights):\n \n rows =len(features)\n cols=len(features[0])\n #### update w\n delta_weights=[0]*cols\n for i in range(0, rows, 1):\n if (labels.get(i) != None):\n dp = dot_Prod(weights, features[i], cols)\n for j in range(0, cols, 1):\n delta_weights[j] += ((-labels.get(i) + dp)*features[i][j])\n \n return delta_weights\n\n###### error function\ndef squared_error(features,labels,weights):\n \n rows =len(features)\n cols=len(features[0])\n error = 0\n for i in range(0, rows, 1):\n if (labels.get(i) != None):\n error += (-labels.get(i) + dot_Prod(weights, features[i], cols))**2\n \n return error \n\n \n######### best learning rate \ndef bestEta(features,labels,weights,eta_list):\n \n eta_error ={}\n error = 0\n new_weights = [0]*(len(weights))\n \n for eta in eta_list:\n del_f = delf(features,labels,weights)\n \n for j in range(0, cols, 1):\n new_weights[j] = weights[j] - eta * del_f[j]\n \n error= squared_error(features,labels,new_weights)\n \n eta_error[error]= eta\n \n return eta_error[min(eta_error.keys())]\n\n\n######### MODEL TRAINING\n\n\ncond =1\nerror = 0\ntheta =0.001\ncounter = 0\nweights = [0]* cols\neta_list = [1, .1, .01, .001, .0001, .00001, .000001,\n .0000001, .00000001, .000000001, .0000000001,\n .00000000001 ]\n#### Initailizing weights to random \nfor j in range(cols):\n weights[j] = random.uniform(-0.001,0.001)\n\n########## error initialization \n \nerror = squared_error(features,labels,weights)\n\nwhile(True):\n counter += 1\n \n del_f = delf(features,labels,weights)\n eta = bestEta(features,labels,weights,eta_list)\n for i,df in enumerate(del_f):\n weights[i] -= eta*df\n \n new_error = squared_error(features,labels,weights)\n \n if error - new_error < theta:\n break\n error = new_error\n \n \n'''print(\"Loop count = \",counter)\nprint(\"Final Weights = \",weights)\nprint(\"Final Error=\",error)\nprint(\"Predicted Row ID\")\nprint(\"Output\")'''\n# Prediction\nfor i in range(0, rows, 1):\n\tif (labels.get(i) == None):\n\t\tdp = dot_Prod(weights, features[i], cols)\n\t\tif(dp>0):\n\t\t\tprint(\"1\",i)\n\t\telse:\n\t\t\tprint(\"0\",i)\n#distance from origin\n\ndef List_Transpose(m):\n Trans = [m[i] for i in range(len(m))]\n return Trans\n\n#w0 for calculation of distance from origin as it is weight for intercept\nw0=weights[cols-1] \nw=[0]*(cols-1)\nfor j in range(0,cols-1,1):\n w[j]= weights[j]\n\nw_t=List_Transpose(w)\nw_p =dot_Prod(w_t,w,cols-1) \ndist_org=0\ndist_org =w0/(w_p**0.5)\n#print(\"Distance from Origin=\",dist_org)\n \n \n \n \n \n \n\n\n\n \n\n\n\n\n\n\n\n\n","sub_path":"Assignment5/least_squares_adaptive_eta.py","file_name":"least_squares_adaptive_eta.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"283816048","text":"import pandas as pd\n\n\nclass GeneFilter:\n \"\"\"\n Takes gene x sample matrix, creates the subset of matrix by discarding gene which\n are not in the gene_list_file.\n\n \"\"\"\n\n def __init__(self, matrix_file, gene_list_file, outfile):\n self.matrix_file = matrix_file\n self.gene_list_file = gene_list_file\n self.outfile = outfile\n\n def filter(self):\n matrix = pd.read_csv(self.matrix_file, sep='\\t', index_col='gene_id')\n genes = pd.read_csv(self.gene_list_file, sep='\\t')\n matrix = matrix.reindex(genes['gene_id']).dropna(how='all')\n print(matrix.info())\n matrix.to_csv(self.outfile, sep='\\t')\n return matrix\n\n\nclass SampleFilter:\n \"\"\"\n create the subset of the matrix by sample names\n \"\"\"\n\n def __init__(self, matrix_file, sample_list_file, outfile):\n self.matrix_file = matrix_file\n self.sample_list_file = sample_list_file\n self.outfile = outfile\n\n def filter(self):\n matrix = pd.read_csv(self.matrix_file, sep='\\t', index_col='gene_id')\n samples = [line.rstrip() for line in open(self.sample_list_file, 'r')]\n matrix = matrix[samples]\n matrix.to_csv(self.outfile, sep='\\t')\n print(matrix.info())\n return matrix\n\n\nclass MatrixFilter:\n\n def __init__(self, matrix_file, name_list_file, outfile):\n self.matrix_file = matrix_file\n self.name_list_file = name_list_file\n self.outfile = outfile\n\n def get_name_list(self):\n return [line.rstrip() for line in open(self.name_list_file, 'r')]\n\n\nclass RowFilter(MatrixFilter):\n\n def apply_filter(self):\n matrix = pd.read_csv(self.matrix_file, sep='\\t', index_col=0)\n\n if isinstance(self.name_list_file, list):\n row_ids = self.name_list_file\n else:\n row_ids = self.get_name_list()\n\n matrix = matrix.reindex(row_ids).dropna(how='all')\n print(matrix.info())\n matrix.to_csv(self.outfile, sep='\\t')\n return matrix\n\n\nclass ColumnFilter(MatrixFilter):\n\n def apply_filter(self):\n matrix = pd.read_csv(self.matrix_file, sep='\\t', index_col=0)\n if isinstance(self.name_list_file, list):\n column_ids = self.name_list_file\n else:\n column_ids = self.get_name_list()\n\n matrix = matrix[column_ids]\n print(matrix.info())\n matrix.to_csv(self.outfile, sep='\\t')\n return matrix\n","sub_path":"Matrix/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"206906678","text":"import pandas as pd\nno_of_movies=9740\ndata=pd.read_csv('./ml-latest-small/movies.csv',usecols=[0,1])\nmovies={}\nlength=data.shape[0]\nmax_movieID=data.iloc[length-1,0]\nfor i in range(0,length):\n row=data.iloc[i,[0,1]]\n movies[row[0]]=row[1]\n\nratings={}\nrating_length=100800\ndata=pd.read_csv('./ml-latest-small/ratings.csv',usecols=[0,1,2])\nlen2=data.shape[0]\nmax_userID=data.iloc[len2-1,0]\nratings={}\nfor i in range(0,len2):\n row=data.iloc[i,[0,1,2]]\n try:\n if(row[1]<=max_movieID):\n ratings[int(row[0])][row[1]]=row[2]\n except KeyError:\n ratings[int(row[0])]={}\n# print(ratings)\n","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"628904451","text":"import discord\nfrom discord.ext import commands\nfrom discord import DMChannel\nimport random\nimport time\nimport keep_alive\nimport webbrowser\nimport json\nimport os\nos.system('pip install pyfiglet')\nos.system('pip install rsap')\n\nimport pyfiglet\nfrom discord.utils import get\nimport praw\nfrom discord.voice_client import VoiceClient\nimport time, datetime\nfrom discord.ext.commands import Bot\nfrom multiprocessing.connection import Client\nfrom discord.ext.commands.cooldowns import BucketType\nfrom bs4 import BeautifulSoup\nimport urllib\nfrom urllib import request\nfrom datetime import datetime\nimport platform\nimport base64\nimport asyncio\nfrom datetime import datetime\nfrom rsap import AsyncRSAP\nfrom rsap import RSAP\n\nbru_lib = ['https://cdn.discordapp.com/attachments/800269388372508692/802405673761636383/really-bruh-picture-2.jpg', \n'https://cdn.discordapp.com/attachments/800269388372508692/802405633789919272/artworks-000533739219-8qcl2j-t500x500.jpg', \n'https://cdn.discordapp.com/attachments/800269388372508692/802405627770961940/bruh_4x.png', \n'https://cdn.discordapp.com/attachments/800269388372508692/802405585043456050/11d913c39a0aff5067d7ce5b28144eb2.jpg', \n'https://cdn.discordapp.com/attachments/800269388372508692/802405582895316992/9DzrfV60_400x400.jpg',\n'https://cdn.discordapp.com/attachments/800269388372508692/802405582102462484/5dd60cfe2a9f024f6595cb91d8a33e77.jpg',\n'https://cdn.discordapp.com/attachments/800269388372508692/802405655646306344/i_luff_u.jpg']\nclass fun(commands.Cog):\n def __init__(self, client):\n self.client = client\n @commands.is_owner()\n @commands.command()\n async def figlet(self, ctx, *, text):\n bner = pyfiglet.figlet_format(text)\n await ctx.send(bner)\n \n\n @commands.command()\n async def slap(self, ctx,*,name):\n await ctx.send(f'{ctx.author.mention} just slapped :wave: {name}, lol :laughing:')\n\n @commands.command()\n async def kill(self, ctx, *, name):\n responses1 = [f'{ctx.author.mention} killed {name} with a sniper',\n f'{ctx.author.mention} shot {name} on the head',\n f'{name} was stabbed by {ctx.author.mention}',\n f\"{name} drank tea made up of an elephant's sh*t and died\",\n f\"{ctx.author.mention} slit {name}'s head using Thor's Stormbreaker.\",\n f\"{ctx.author.mention} stole Captain's shield and smacked it on {name}'s jaw.\",\n '\\n'f'Unfortunately {name} died :sad:',\n f'{ctx.author.mention} killed {name} with a hulk smash.',\n f'An african elephant just sat on {name}.',\n f\"{name}'s teacher caught him cheating in his exams and killed him with a metal scale.\",]\n await ctx.send(random.choice(responses1))\n\n @commands.command()\n async def roast(self, ctx, *, name):\n responses = [\n \n f'I would slap you {name}, but that would be animal abuse :laughing:',\n f'You are the reason that god created middle finger ',\n f'Is your ass jealous of the amount of shit that comes out of your mouth? :poop:',\n f\"I don't want to waste time roasting asses like you\",\n f\"You're so ugly, when you were born, your Mom said 'What a treasure', but your dad said let's throw it away in the bin :laughing:\",\n f\"You must have been born on a highway, because thats where most accidents happen\",\n f\"How this animal is able to use discord???\",\n f\"You are the God's only mistake.\",\n f\"You are the God's biggest mistake\",\n f\"I would refer to die rather than seeing your face.\",\n f\"Hey {ctx.author.mention} you better see yourself before roasting others :laughing:\",\n ]\n await ctx.send(f'Roasting {name} \"\\n\" {random.choice(responses)}')\n\n @commands.command()\n async def pw(self, ctx):\n s = \"abcdefghijklmnopqrstuvwxyz01234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ!@#$%&\"\n passlen = 8\n p = \"\".join(random.sample(s,passlen ))\n msg = p.format(ctx)\n\n await ctx.send(f'Hey {ctx.author.mention}, your password has been sent in the dms to you.')\n await ctx.author.send(f'Here is your generated 8 digit password|| {msg} ||')\n\n @commands.command()\n async def dice(self, ctx):\n die = ['1', '2', '3', '4', '5', '6']\n dice = discord.Embed(title=f':game_die: Dice Game',\n colour = discord.Colour.blue())\n dice.set_author(name=ctx.author, icon_url=ctx.author.avatar_url)\n dice.set_thumbnail(url='https://cdn.discordapp.com/attachments/782647287494082611/792315877994266654/tenor.gif')\n dice.add_field(name=f'Dice game', value=f'You got **{random.choice(die)}** on your dice')\n dice.set_footer(text='Astro 999 v1.2.0', icon_url='https://cdn.discordapp.com/attachments/800269388372508692/802434552063590420/Slide1.jpg')\n await ctx.send(embed = dice)\n\n @commands.command()\n async def toss(self, ctx):\n op = ['Heads', 'Tails']\n toss = discord.Embed(\n title = f\"{ctx.author}'s toss game :coin:\",\n description = f'{ctx.author} flipped the coin!!'\n )\n toss.set_author(name=ctx.author, icon_url=ctx.author.avatar_url)\n toss.set_thumbnail(url = 'https://cdn.discordapp.com/attachments/800269388372508692/800357830355714088/tenor.gif')\n toss.add_field(name = f'--------', value=f'You got **{random.choice(op)}** on your coin.')\n toss.set_footer(text='Astro 999 v1.2.0', icon_url='https://cdn.discordapp.com/attachments/800269388372508692/802434552063590420/Slide1.jpg')\n await ctx.send(embed = toss)\n\n\n @commands.command()\n async def fsociety(self, ctx):\n fs = discord.Embed(\n title = 'F*** SOCIETY!!!!!!!', \n colour = discord.Colour.red()\n )\n fs.set_image(url = 'https://media.discordapp.net/attachments/762886017253638164/794601784383504394/tenor.gif')\n await ctx.send(embed = fs)\n\n @commands.command()\n @commands.cooldown(1, 5, commands.BucketType.user)\n async def bruh(self, ctx):\n bruh = discord.Embed(title = 'BRUHHHHH!!!!!!!', colour=discord.Color.teal())\n bruh.set_image(url = random.choice(bru_lib))\n await ctx.send(embed = bruh)\n\n @commands.command()\n @commands.cooldown(1, 5, commands.BucketType.user)\n async def hack(self, ctx, *, name):\n message = await ctx.send(f\"Hacking {name}\")\n await asyncio.sleep(1)\n msg_sec = await message.edit(content=f\"Colleting IP Address...\")\n await asyncio.sleep(1)\n msg_third = await message.edit(content = f'Collecting Email.....')\n await asyncio.sleep(1)\n msg_fourth = await message.edit(content = f'Selling on Dark Web....')\n await asyncio.sleep(1)\n await message.edit(content = f'Email Grabbed = \"nibbaredtube.gmail.com\"')\n await asyncio.sleep(1)\n await message.edit(content = f'Getting Address...')\n await asyncio.sleep(1)\n await message.edit(content = f'Sending FBI to house....')\n await asyncio.sleep(1)\n await message.edit(content = f'Hacked {name} successfully...')\n\n @commands.command()\n async def bitcoin(self, ctx):\n page = urllib.request.urlopen('https://www.coindesk.com/price/bitcoin').read()\n html = BeautifulSoup(page, 'html.parser')\n price_large = html.find(class_ = 'price-large')\n\n price_large1 = str(price_large)\n\n price_large2 = price_large1[54:63]\n bit_embed = discord.Embed(\n title = 'Current Bitcoin price', \n colour = discord.Colour.red()\n )\n bit_embed.add_field(name = \":coin:\", value=f'${price_large2}')\n #await ctx.send(f'Current Bitcoin Price {price_large2}')\n await ctx.send(embed = bit_embed)\n\n \n @commands.command()\n async def secmsg(self, ctx, member : discord.Member, *, text):\n if member is None:\n await ctx.send('You gotta mention a valid user to send the message dumb.')\n\n \n await member.send(f\"Someone from {ctx.guild} send you this \\n-------------------------------\\n{text}\")\n await ctx.channel.purge(limit = 10)\n\n \n\n \n\n \n ''''\n @commands.command()\n async def gay(self, ctx, member: discord.Member):\n await ctx.send(\"Gay Rate:\"\n '\\n :flag_lesbian: ')'''\n'''\n @commands.command()\n async def join(self, ctx):\n channel = ctx.author.voice.channel\n await channel.connect()\n\n @commands.command()\n async def leave(self, ctx):\n server = ctx.message.server\n voice_client = client.voice_client_in(server)\n await voice_client.disconnect()\n \n\n \n\n'''\n'''\n @commands.command()\n @commands.is_owner()\n async def new_sports(self, ctx):\n client = gnewsclient.NewsClient(language='english', \n location='world', \n topic='Sports', \n max_results=5)\n\n news_list = client.get_news()\n for item in news_list:\n new_link = item['link']\n\n new_em = discord.Embed(title='Latest news in Sports all around the world', colour=discord.Colour.red())\n new_em.add_field(name=item['title'], value=item['link'])\n new_em.add_field(name='--', value='----')\n await ctx.send(embed = new_em)\n \n for item in news_list:\n await ctx.send(item['title'])\n print(item['title'])''' ''' \n print(\"Title : \", item['title'])\n print(\"Link : \", item['link'])\n print(\"\")'''\n\n \n \n\n \n\n\n \n\n\n\n \n\n\n\ndef setup(client):\n client.add_cog(fun(client))\n","sub_path":"astro-bot/cogs/fun.py","file_name":"fun.py","file_ext":"py","file_size_in_byte":9725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"390345721","text":"#!/usr/bin/env python3\n\n\"\"\"\n pharmacy_sales_summary.py\n \n The scripts reads the pharmacy raw datafile and gives the summary\n aggregates on druglevel. The script creates the output file that \n has the summary :- \n - No. of unique drug subcriber\n - Total sales amount of drug\n \n\"\"\"\n\nimport csv\nfrom collections import defaultdict,Counter,OrderedDict\nfrom operator import itemgetter\nfrom timer import timeit\nimport sys\n\n#-----------------------------------------------------------------------\n\n#=======================================================================\n# Writing functions\n#=======================================================================\n\n@timeit\ndef read_pharmacy_input(infile):\n\t\"\"\"\n\tThis function reads the pharamcy file and returns\n\tthe counter and dictionary for the total sale and\n\tunique subscribers.\n\t\"\"\"\n\trecords = []\n\tdrug_count= defaultdict(set)\n\tdrug_cost= Counter()\n\ttry:\n\t\twith open(infile) as rows:\n\t\t\treader = csv.reader(rows)\n\t\t\theaders=next(reader) # skip the header row\n\t\t\tfor rowno,row in enumerate(reader,start=1):\n\t\t\t\ttry:\n\t\t\t\t\tdrug_count[row[3]].add(row[1]+row[2])\n\t\t\t\t\tdrug_cost[row[3]]+=float(row[4])\n\t\t\t\texcept ValueError as err:\n\t\t\t\t\tprint ('Row: ', rowno, 'bad row: ', row)\n\t\t\t\t\tprint ('Row: ', rowno, 'Reason: ', err)\n\t\t\t\t\tcontinue # skips to the next row\n\texcept IOError:\n\t\tprint (\" File not found \")\n\t\tprint (\"File name: \", infile)\n\treturn drug_count,drug_cost\n\n@timeit\ndef create_summary(targetfile,drug_count,drug_cost):\n\t\"\"\"\n\tThis function creates takes the output file name and creates the output\n\tsummary file.\n\t\"\"\"\n\twith open(targetfile, 'a') as tfile:\n\t\tl=0\n\t\tfor key,val in drug_cost.items():\n\t\t\tl+=1\n\t\t\ttry:\n\t\t\t\tif l == 1:\n\t\t\t\t\theader = 'drug_name,num_prescriber,total_cost\\n'\n\t\t\t\t\ttfile.write(header)\n\t\t\t\twval = key+','+str(len(drug_count[key]))+','+str(int(round(val)))+'\\n'\n#\t\t\t\twval = '\"'+key+'\"'+','+str(len(drug_count[key]))+','+str(val)+'\\n'\n\t\t\t\ttfile.write(wval)\n\t\t\texcept Exception as error:\n\t\t\t\tprint(error)\n\t\t\t\traise \n\n@timeit\ndef main(infile,targetfile):\n\t\"\"\"\n\tthis function executes and create output file\n\t\"\"\"\n\tdrug_count,drug_cost = read_pharmacy_input(infile)\n\tdrug_cost = OrderedDict(sorted(drug_cost.items(), key=itemgetter(1), reverse=True))\n\tcreate_summary(targetfile,drug_count,drug_cost)\n\nif __name__ == '__main__':\n\tinfile = sys.argv[1]\n\ttargetfile = sys.argv[2]\n\tmain(infile,targetfile)\n","sub_path":"src/pharmacy_sales_summary.py","file_name":"pharmacy_sales_summary.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"29682408","text":"## A module to read data from a DSS, this specifically implements the Remote ID standard as released on Oct-2020\n## For more information review: https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/uastech/standards/astm_rid_1.0/remoteid/canonical.yaml \n## and this diagram https://github.com/interuss/dss/blob/master/assets/generated/rid_display.png\n\nfrom functools import wraps\nimport json\nfrom flask_uuid import FlaskUUID\nfrom six.moves.urllib.request import urlopen\n\nimport redis\nfrom datetime import datetime, timedelta\nimport uuid, os\nimport requests\nfrom flask import request\nfrom flask import current_app\nfrom os import environ as env\n# from flask import Blueprint\n# dss_rid_blueprint = Blueprint('rid_dss_operations_bp', __name__)\n\nREDIS_HOST = os.getenv('REDIS_HOST',\"redis\")\nREDIS_PORT = 6379\n\nclass AuthorityCredentialsGetter():\n ''' All calls to the DSS require credentials from a authority, usually the CAA since they can provide access to the system '''\n def __init__(self):\n pass\n \n def get_cached_credentials(self, audience): \n r = redis.Redis()\n \n now = datetime.now()\n cache_key = audience + '_auth_dss_token'\n token_details = r.get(cache_key)\n if token_details: \n token_details = json.loads(token_details)\n created_at = token_details['created_at']\n set_date = datetime.strptime(created_at,\"%Y-%m-%dT%H:%M:%S.%f\")\n if now < (set_date - timedelta(minutes=58)):\n credentials = self.get_read_credentials(audience)\n r.set(cache_key, json.dumps({'credentials': credentials, 'created_at':now.isoformat()}))\n else: \n credentials = token_details['credentials']\n else: \n credentials = self.get_read_credentials(audience)\n access_token = credentials.get('access_token')\n if access_token: # there is no error in the token\n r.set(cache_key, json.dumps({'credentials': credentials, 'created_at':now.isoformat()})) \n r.expire(cache_key, timedelta(minutes=58))\n \n return credentials\n \n \n def get_read_credentials(self, audience): \n payload = {\"grant_type\":\"client_credentials\",\"client_id\": env.get('AUTH_DSS_CLIENT_ID'),\"client_secret\": env.get('AUTH_DSS_CLIENT_SECRET'),\"audience\":audience,\"scope\": 'dss.read_identification_service_areas'} \n url = env.get('DSS_AUTH_URL') + env.get('DSS_AUTH_TOKEN_ENDPOINT') \n token_data = requests.post(url, data = payload)\n t_data = token_data.json() \n return t_data\n\nclass RemoteIDOperations():\n\n def __init__(self):\n self.dss_base_url = env.get('DSS_BASE_URL')\n\n def create_dss_subscription(self, vertex_list, view_port):\n ''' This method PUTS /dss/subscriptions ''' \n \n subscription_response = {\"created\": 0, \"subscription_id\": 0, \"notification_index\": 0}\n \n my_authorization_helper = AuthorityCredentialsGetter()\n audience = env.get(\"DSS_SELF_AUDIENCE\", 0)\n error = True\n\n try: \n assert audience\n except AssertionError as ae:\n current_app.logger.error(\"Error in getting Authority Access Token DSS_SELF_AUDIENCE is not set in the environment\")\n return subscription_response\n\n try:\n auth_token = my_authorization_helper.get_cached_credentials(audience)\n except Exception as e:\n current_app.logger.error(\"Error in getting Authority Access Token %s \" % e)\n return subscription_response \n else:\n error = auth_token.get(\"error\") \n\n\n try: \n assert error is None\n except AssertionError as ae: \n return subscription_response\n else: \n \n current_app.logger.info(\"Successfully received Token\")\n # A token from authority was received, \n new_subscription_id = str(uuid.uuid4())\n dss_subscription_url = self.dss_base_url + '/dss/subscriptions/' + new_subscription_id\n\n callback_url = env.get(\"SUBSCRIPTION_CALLBACK_URL\",\"/isa_callback\") \n now = datetime.now()\n \n current_time = now.isoformat()\n one_hour_from_now = (now + timedelta(hours=1)).isoformat()\n\n headers = {'content-type': 'application/json', 'Authorization': 'Bearer ' + auth_token}\n\n volume_object = {\"spatial_volume\":{\"footprint\":{\"vertices\":vertex_list},\"altitude_lo\":0.5,\"altitude_hi\":400},\"time_start\":current_time,\"time_end\":one_hour_from_now }\n \n payload = {\"extents\": volume_object, \"callbacks\":{\"identification_service_area_url\":callback_url}}\n try:\n dss_r = requests.post(dss_subscription_url, data= json.dumps(payload), headers=headers) \n except Exception as re: \n current_app.logger.error(\"Error in posting to subscription URL %s \" % re)\n return subscription_response\n\n else: \n try: \n assert dss_r.status_code == 200\n subscription_response[\"created\"] = 1\n except AssertionError as ae: \n current_app.logger.error(\"Error in creating subscription in the DSS %s\" % dss_r.text)\n return subscription_response\n else: \t\n dss_response = dss_r.json()\n service_areas = dss_response['service_areas']\n subscription = dss_response['subscription']\n subscription_id = subscription['id']\n notification_index = subscription['notification_index']\n subscription_response['notification_index'] = notification_index\n subscription_response['subscription_id'] = subscription_id\n # iterate over the service areas to get flights URL to poll \n \n flights_url_list = []\n for service_area in service_areas: \n flights_url = service_area['flights_url']\n flights_url_list.append(flights_url)\n\n flights_dict= {'subscription_id': subscription_id,'all_flights_url':flights_url_list, 'notification_index': notification_index, 'view':view_port, 'expire_at':one_hour_from_now}\n\n redis = redis.Redis()\n hash_name = \"all_uss_flights\"\n redis.hmset(hash_name, flights_dict)\n # expire keys in one hour\n redis.expire(name=hash_name, time=timedelta(minutes=60))\n return subscription_response\n\n\n def delete_dss_subscription(self,subscription_id):\n ''' This module calls the DSS to delete a subscription''' \n\n # TODO: Subscriptions expire after a hour but we may need to delete one \n pass\n","sub_path":"dss_ops/rid_dss_operations.py","file_name":"rid_dss_operations.py","file_ext":"py","file_size_in_byte":7049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"466032404","text":"# coding:utf-8\nimport urllib2\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom robobrowser import RoboBrowser\nfrom requests import Session\n\n\n# -----------------------------------------------------------------------------------------------------------------\n# 这个框抓取从一级页面获得的url爬取到的二级页面的内容\n#  主要是人员信息,公司基本信息和公司异常情况三大类\ndef beianxinxi(soup):\n # 备案信息(人员)\n renyuan = {} # {职位:姓名}\n beian = soup.find(attrs={\"class\": \"cont-r-b\"}).find(attrs={\"rel\": \"layout-01_02\"})\n if beian:\n beianxinxi_num = len(beian.find_all(attrs={\"class\": \"center\"})) # 备案信息人员数目\n beianxinxi_content = beian.find_all('td')\n for i in range(beianxinxi_num):\n xingming = beianxinxi_content[i * 3 + 1].string\n zhiwu = beianxinxi_content[i * 3 + 2].string\n if zhiwu in renyuan.keys():\n renyuan[zhiwu][0].append(xingming)\n else:\n renyuan[zhiwu] = [[xingming]]\n return renyuan\n\n\ndef dengjixinxi(soup):\n # 登记信息\n b_info = {} # 基本信息\n targets = [u\"统一社会信用代码\", u\"名称\", u\"类型\", u\"法定代表人\", u\"负责人\", u\"注册资本\", u\"成立日期\", u\"住所\", u\"经营期限自\", \\\n u\"经营期限至\", u\"经营范围\", u\"登记机关\", u\"核准日期\", u\"登记状态\"]\n dengjixinxi_content = soup.find(attrs={\"class\": \"cont-r-b\"}).find(attrs={\"rel\": \"layout-01_01\"})\n if dengjixinxi_content:\n for target in targets:\n if dengjixinxi_content.find(text=target):\n content = dengjixinxi_content.find(text=target).parent.next_sibling.next_sibling.string\n b_info[target] = content.rstrip() if content else \"\"\n else:\n b_info[target] = \"\"\n return b_info\n\n\ndef yichangxinxi(soup):\n # 经营异常信息\n yichang_info = []\n yichang = soup.find(attrs={\"class\": \"cont-r-b\"}).find(attrs={\"rel\": \"layout-01_05\"})\n if yichang:\n yichang_content = yichang.find_all('tr')\n yichang_content.pop(0)\n yichang_content.pop(0)\n yichang_content.pop()\n for yichang in yichang_content:\n yichang_info.append({u\"列入经营异常名录原因\": yichang.find_all('td')[1].string,\n u\"列入日期\": yichang.find_all('td')[2].string,\n u\"移出经营异常名录原因\": yichang.find_all('td')[3].string,\n u\"移出日期\": yichang.find_all('td')[4].string,\n u\"作出决定机关\": yichang.find_all('td')[5].string})\n return yichang_info\n\n\ndef get_detail(url=None):\n content = urllib2.urlopen(url)\n soup = BeautifulSoup(content)\n if u\"该市场主体不在公示范围\" in soup.get_text():\n return None\n beianxinxi_dict = beianxinxi(soup)\n dengjixinxi_dict = dengjixinxi(soup)\n yichangxinxi_dict = {\"yichangxinxi\": [yichangxinxi(soup)]}\n\n return pd.DataFrame({k: v for d in [beianxinxi_dict, dengjixinxi_dict, yichangxinxi_dict] for k, v in d.items()})\n\n\n# -----------------------------------------------------------------------------------------------------------------\n# 这个框获取基本的一级数据,公司名称 ,ID,异常日期和详细信息的url\ndef parse_table(browser):\n tables = browser.find(attrs={\"class\": \"list-table\"}).select('tr')\n tables.pop(0) # pop table title\n name = []\n url = []\n ID = []\n date = []\n for line in tables:\n name += [line.select('td')[0].select('a')[0].string]\n url += [line.select('td')[0].select('a')[0]['href']]\n ID += [line.select('td')[1].string]\n date += [line.select('td')[2].string]\n return pd.DataFrame({'name': name, 'url': url, 'ID': ID, 'date': date})\n\n\n# -----------------------------------------------------------------------------------------------------------------\ndef start_crawl(pages=2):\n session = Session()\n session.verify = False\n url = 'https://www.sgs.gov.cn/notice/search/ent_except_list'\n b = RoboBrowser(session=session)\n b.open(url)\n\n basic_info = pd.DataFrame(columns=['name', 'url', 'ID', 'date'])\n detail_info = pd.DataFrame()\n for i in range(pages): # 改变这个数字控制爬取页数, 网页限制最大50页\n form = b.get_form(id='formInfo')\n if not form:\n continue\n form['condition.pageNo'].value = str(i + 1) # 修改表单控制页数\n form['condition.keyword'].value = ''\n try: # dirty fix...\n b.submit_form(form)\n basic_info = basic_info.append(parse_table(b), ignore_index=True)\n except AttributeError:\n pass\n\n for url in basic_info['url']:\n detail = get_detail(url)\n if isinstance(detail, pd.DataFrame):\n detail_info = detail_info.append(detail, ignore_index=True)\n\n return basic_info, detail_info\n\nif __name__ == \"__main__\":\n basic_info, detail_info = start_crawl(50)\n basic_info.merge(detail_info, left_on='name', right_on=u\"名称\", how='outer').to_excel('SAIC.xlsx', index=None)\n\n","sub_path":"spiders/SAIC_Abnormal/SAIC_spider.py","file_name":"SAIC_spider.py","file_ext":"py","file_size_in_byte":5210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"186727351","text":"# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the License\n# is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing permissions and limitations under\n# the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nimport json\nimport mock\nimport unittest\n\n# import Python so we can mock the parts we need to here.\nimport IPython.core.display\nimport IPython.core.magic\n\nimport datalab.utils.commands\n\n\ndef noop_decorator(func):\n return func\n\n\nIPython.core.magic.register_line_cell_magic = noop_decorator\nIPython.core.magic.register_line_magic = noop_decorator\nIPython.core.magic.register_cell_magic = noop_decorator\nIPython.core.display.HTML = lambda x: x\nIPython.core.display.JSON = lambda x: x\n\n\nclass TestCases(unittest.TestCase):\n\n @mock.patch('datalab.utils.get_item')\n def test_get_chart_data(self, mock_get_item):\n IPython.get_ipython().user_ns = {}\n t = [\n {'country': 'US', 'quantity': 100},\n {'country': 'ZA', 'quantity': 50},\n {'country': 'UK', 'quantity': 75},\n {'country': 'AU', 'quantity': 25}\n ]\n mock_get_item.return_value = t\n ds = datalab.utils.commands.get_data_source_index('t')\n data = datalab.utils.commands._chart_data._get_chart_data('', json.dumps({\n 'source_index': ds,\n 'fields': 'country',\n 'first': 1,\n 'count': 1\n }))\n self.assertEquals({\"data\": {\"rows\": [{\"c\": [{\"v\": \"ZA\"}]}],\n \"cols\": [{\"type\": \"string\", \"id\": \"country\", \"label\": \"country\"}]},\n \"refresh_interval\": 0, \"options\": {}}, data)\n\n data = datalab.utils.commands._chart_data._get_chart_data('', json.dumps({\n 'source_index': ds,\n 'fields': 'country',\n 'first': 6,\n 'count': 1\n }))\n self.assertEquals({\"data\": {\"rows\": [],\n \"cols\": [{\"type\": \"string\", \"id\": \"country\", \"label\": \"country\"}]},\n \"refresh_interval\": 0, \"options\": {}}, data)\n\n data = datalab.utils.commands._chart_data._get_chart_data('', json.dumps({\n 'source_index': ds,\n 'fields': 'country',\n 'first': 2,\n 'count': 0\n }))\n self.assertEquals({\"data\": {\"rows\": [],\n \"cols\": [{\"type\": \"string\", \"id\": \"country\", \"label\": \"country\"}]},\n \"refresh_interval\": 0, \"options\": {}}, data)\n","sub_path":"legacy_tests/kernel/chart_data_tests.py","file_name":"chart_data_tests.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"481885965","text":"import numpy as np\nfrom scipy.special import expit as sigmoid\nfrom sklearn.metrics import accuracy_score as accuracy\n# since \nclass MLP():\n \"\"\"\n For the moment we will deal with \n identity activation function on the \n output and sigmoid activation function.\n What will be the difference other \n perhaps enabling us to interpret it as a \n probability since monotinc?\n No regularization parameter\n Learning rate - train\n Online learning\n It should work also for regression tasks as we are \n not transforming the output units in the last level\n All the bias neurond take the value 1\n \"\"\"\n\n def __init__(self, n_neurons):\n \"\"\"\n Instantiating the NeuralNetwork structure\n\n Arguments:\n n_neurons - list describing the number of 'neurons' at each level without considering bias\n [n_inputs, n_hidden_units1, n_hidden_units_2, ..., n_outputs/class]\n\n layers_weights - list containing matrix of weights for each layer \n \"\"\"\n self.n_neurons = n_neurons # without counting the bias\n self.n_layers = len(self.n_neurons) - 1\n self.layers_weights = self.initialize_weights()\n\n def train(self, X, Y, n_iterations=300, learning_rate=0.1):\n \"\"\"\n Online gradient descent method.\n Next try batch, mini-batch \n\n Arguments:\n\n Y is a matrix if multiple classes\n \"\"\"\n # numb of features - inputs on the first layer - BIAS - Y values\n # create a fnction called instantiate network with weights\n # is a list ordered?\n # account for bias\n X = np.append(np.ones((X.shape[0], 1)), X, axis=1)\n #Y = self.encode(y)\n # perhaps not most efficient\n #n_inputs = [X.shape[1]]\n # true training - ONLINE LEARNING\n for _ in range(n_iterations):\n #A, Z = self.feedForward(X)\n gradients = self.backpropagate(X, Y)\n for index_layer in range(self.n_layers):\n self.layers_weights[index_layer] -= learning_rate * gradients[index_layer].transpose()\n #deltas = [None]*self.n_layers\n\n return self\n\n def predict(self, X):\n \n X = np.append(np.ones((X.shape[0], 1)), X, axis=1)\n A, Z = self.feedForward(X)\n predictions = np.argmax(A[-1], axis=1)\n\n return predictions\n\n def backpropagate(self, X, Y):\n\n A, Z = self.feedForward(X)\n\n # backpropagate\n deltas = [None]*self.n_layers\n deltas[-1] = A[-1] - Y\n for index_layer in np.arange(self.n_layers - 2, -1, -1):\n # the * operator on matrices performs element by element multiplication\n #print(Z[index_layer + 1])\n #print(deltas[index_layer + 1])\n deltas[index_layer] = self.deriv_sigmoid(Z[index_layer + 1]) * np.dot(deltas[index_layer + 1], self.layers_weights[index_layer + 1].transpose()[:, 1:])\n\n # gradients per layer\n gradients = [None]*self.n_layers\n for index_layer in range(self.n_layers):\n gradients[index_layer] = np.dot(deltas[index_layer].transpose(), A[index_layer])\n # do we average the gradients or not? it should not matter as the learning rate is present\n # gradients[index_layer] = gradients[index_layer]/X.shape[0]\n\n return gradients\n\n def feedForward(self, X):\n\n # non-activated neuron outputs\n Z = [None]*(self.n_layers + 1)\n # tranformed/activated neurons outputs\n A = [None]*(self.n_layers + 1)\n\n input_neurons = X\n\n # we are not transforming the output layer\n for index_layer in range(self.n_layers - 1):\n\n A[index_layer] = input_neurons\n # input neurons - neurons from the left\n Z[index_layer + 1] = np.dot(input_neurons, self.layers_weights[index_layer])\n # output neurons without bias\n temp = self.sigmoid(Z[index_layer + 1])\n # add bias term\n output_neurons = np.append(\n np.ones((temp.shape[0], 1)), temp, axis=1)\n #np.concatenate((np.ones([temp.shape[0] ,1]) ,temp), axis=1)\n input_neurons = output_neurons\n\n # pointless in transforming last outputs as sigmoid is a monotonic function\n A[-2] = input_neurons\n Z[-1] = np.dot(input_neurons, self.layers_weights[-1])\n A[-1] = Z[-1]\n\n return A, Z\n\n def initialize_weights(self):\n layers_weights = []\n\n for n_inputs, n_outputs in zip(self.n_neurons, self.n_neurons[1:]):\n # the neuron in the next level does not receive weights\n layers_weights.append(np.random.rand(n_inputs + 1, n_outputs))\n\n return layers_weights\n\n def sigmoid(self, x):\n #return 1 / (1 + np.exp(-1 * x))\n return sigmoid(x)\n\n def deriv_sigmoid(self, x):\n return self.sigmoid(x) * (1 - self.sigmoid(x))\n\nif __name__ == \"__main__\":\n\n import pandas as pd \n DATA_FN = 'MachineLearning/supervised-learning/classification/datasets/gaussian_synthetic_data.csv'\n df = pd.read_csv(DATA_FN, index_col=None)\n df = df.sample(frac=1)\n y = df.pop('target').reset_index(drop=True)\n targets_names = np.unique(y.values)\n n_targets = len(targets_names)\n n_obs = y.shape[0]\n Y = np.zeros((n_obs, n_targets))\n for i in range(n_obs):\n value = y[i]\n index = np.where(targets_names == value)\n Y[i, index] = 1\n X = df.values\n n_neurons = [X.shape[1], 2, Y.shape[1]]\n clf = MLP(n_neurons)\n clf.train(X, Y, n_iterations=300 ,learning_rate=0.001)\n predictions = clf.predict(X)\n print(predictions)\n accuracy\n #print(y)\n #print(predictions)\n #print(accuracy())\n #X = np.array([[0, 0, 1], [0, 1, 1], [1, 0, 1], [0, 1, 0], [1, 0, 0], [1, 1, 1], [0, 0, 0]])\n #y = np.array([[0, 1, 1, 1, 1, 0, 0]]).T\n #n_inputs = [X.shape[1], 4, 1]\n","sub_path":"supervised-learning/classification/multiLayerPerceptron.py","file_name":"multiLayerPerceptron.py","file_ext":"py","file_size_in_byte":5953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"281468456","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\n\nimport arcade\nimport arcade.gui\n\nimport newtrim_3\nfrom newtrim_3 import *\n\n\nfrom silicium_7 import Silicium\n\n\n#sourcecode_file = \"test/9stars.asm\" ORIGINAL\nsourcecode_file = \"test/_9stars.asm\" # EAX EBX ECX EDX become rax rbx rcx rdx\n\n#--------------------------------------------------------------------------------------- CST\n\n# Screen and colors ----------------------\n\nREG_R_color = (30,110,250)\nREG_E_color = (215,90,200)\nREG_X_color = (250,180,25)\n\nREG_H_color = arcade.csscolor.YELLOW\nREG_L_color = arcade.csscolor.LAWNGREEN\n\nSCREEN_WIDTH = 1800\nSCREEN_HEIGHT = 1000\n\n\n# Regs ------------------------------------------\n\nREG_R_width = 300 #400\n\nREG_height = 50\n\nREG_H_height = REG_E_height = REG_R_height = REG_height\n\nREG_E_width = REG_R_width // 2\n\nREG_L_width = REG_H_width = REG_E_width // 2\n\nREG_TOTAL_WIDTH = REG_R_width + REG_E_width + REG_H_width + REG_L_width\n\n\nREG_R_x = 250\nREG_R_y = 800\n\nREG_L_y = REG_H_y = REG_E_y = REG_R_y\n\nRegs_y_decal = 10\nRegs_x_decal = 20\n\n# REG_R_color , REG_E_color\n\nANGLE_TL = tuple((REG_R_x, REG_R_y))\nANGLE_X_REG = REG_R_x\nANGLE_Y_REG = REG_R_y\n\nSTACK_WIDTH = 100\nSTACK_HEIGHT = 700\n\nY_INTERLINE = 70\n\nDECAL_Y_CARTOUCHE_REG = 55\n\n\nDECAL_X_REG_names = -50\n\n\n\n#X_DECALS = [0, REG_R_width, REG_R_width + REG_E_width - 6 * Regs_x_decal,REG_R_width + REG_E_width + REG_H_width - 6 * Regs_x_decal]\nX_DECALS = [0, REG_R_width- 4 * Regs_x_decal, REG_R_width + REG_E_width - 6 * Regs_x_decal, REG_R_width + REG_E_width + REG_H_width - 6 * Regs_x_decal]\n\nX_WIDTH = [REG_R_width, REG_E_width, REG_H_width, REG_L_width]\n\nREGlist = ['A','B','C','D','BP','SP','SI','DI','IP']\narchis = ['R','E','H','L']\n\nLINES = len(REGlist)\nCOLUMNS = len(archis)\n\n\n# Stacks -------\n\nDECAL_X_STACK = 50\n\nANGLE_Y_STACK = ANGLE_Y_REG\n\n#ANGLE_STACK = tuple((ANGLE_X_REG + REG_TOTAL_WIDTH + DECAL_X_STACK, ANGLE_Y_REG))\nANGLE_STACK = tuple((ANGLE_X_REG + REG_TOTAL_WIDTH , ANGLE_Y_STACK-200))\n\nANGLE_LOW = tuple((ANGLE_STACK[0],ANGLE_STACK[1]+30))\nANGLE_HIGH = tuple((ANGLE_STACK[0],ANGLE_STACK[1]-30-STACK_HEIGHT)) # ?\n\nDECAL_X_RAM = 400\nANGLE_RAM = tuple((ANGLE_LOW[0]+ 300, ANGLE_LOW[1] -100))\n\nRAM_WIDTH = 300\nRAM_HEIGHT = 700\n\n\nANGLE_CODE = tuple((ANGLE_RAM[0]+ 400, ANGLE_RAM[1]))\nCODE_WIDTH = 400\nCODE_HEIGHT = 700\n\n\nY_SOURCECODE_DECAL = 20\nX_OPERAND_DECAL = 70\n#---------------------------------------------------------------------------------------\n\n\nmySilicium = Silicium()\n\nprint(dir(mySilicium))\n\nprint(\"mySilicium.rax\",mySilicium.rax,\" \",type(mySilicium.rax))\nprint(\"mySilicium.rax.bin\",mySilicium.rax.bin,\" \",type(mySilicium.rax.bin),\" len \", len(mySilicium.rax.bin))\n\n\nprint(\"mySilicium.rbx\",mySilicium.rbx)\nprint(\"mySilicium.rbx.bin\",mySilicium.rbx.bin)\n\nprint(\"______________________\")\nmySilicium.setup()\n\n\nmySilicium.__dict__[\"rdx\"] = 57\n\n\nprint(\"\\n mySilicium attributes = \", mySilicium.__dict__.keys())\nprint(\"\\n mySilicium attributes = \", mySilicium.__dict__.values())\n\nnot_magic_methods = [f for f in dir(mySilicium) if callable(getattr(mySilicium,f)) and not f.startswith('__')]\nprint(\"\\n not_magic_methods = \", not_magic_methods)\n\nw = input(\"ok\")\n\nclass MyWindow(arcade.Window):\n def __init__(self):\n super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, \"GUI Widgets Example\", resizable=True)\n\n self.sourcecode_lines = list()\n\n \n\n \n\n arcade.set_background_color(arcade.color.MAROON)\n\n def setup(self):\n\n self.sourcecode_lines = trimit(sourcecode_file)\n\n print(\"***************\")\n print(type(self.sourcecode_lines))\n print(self.sourcecode_lines)\n print(len(self.sourcecode_lines))\n print(\"***------------------*****\")\n\n \n\n \n\n def on_draw(self):\n arcade.start_render()\n \n\n \n\n \n\n for i in range(0,LINES):\n for j in range(0,COLUMNS):\n\n \n\n arcade.draw_rectangle_filled(ANGLE_X_REG+X_DECALS[j] , ANGLE_Y_REG -i*Y_INTERLINE, X_WIDTH[j], REG_height, arcade.color.GOLD)\n\n\n for i in range(0,LINES):\n arcade.draw_text(REGlist[i],REG_R_x + DECAL_X_REG_names - REG_E_width, ANGLE_Y_REG -i*Y_INTERLINE,arcade.csscolor.WHITE,18,)\n\n #arcade.draw_text(mySilicium.regs_idx[i], 123+ REG_R_x + DECAL_X_REG_names - REG_E_width, ANGLE_Y_REG -i*Y_INTERLINE,arcade.csscolor.CYAN,18,)\n arcade.draw_text(str(mySilicium.regs_idx[i]), 123+ REG_R_x + DECAL_X_REG_names - REG_E_width, ANGLE_Y_REG -i*Y_INTERLINE,arcade.csscolor.CYAN,18,)\n\n\n\n\n\n\n\n # -------------------------------------------------------------------------------------- REGs\n\n\n arcade.draw_rectangle_filled(REG_R_x, REG_R_y+DECAL_Y_CARTOUCHE_REG, REG_R_width, REG_R_height, arcade.color.BLUE) # R\n arcade.draw_rectangle_filled(REG_R_x + REG_R_width - 4 * Regs_x_decal, REG_E_y+DECAL_Y_CARTOUCHE_REG, REG_E_width, REG_E_height, arcade.color.RED) # E\n arcade.draw_rectangle_filled(REG_R_x + REG_R_width + REG_E_width - 6 * Regs_x_decal, REG_H_y+DECAL_Y_CARTOUCHE_REG, REG_H_width, REG_H_height, arcade.color.ORANGE) # H\n arcade.draw_rectangle_filled(REG_R_x + REG_R_width + REG_E_width + REG_H_width - 6 * Regs_x_decal, REG_H_y+DECAL_Y_CARTOUCHE_REG, REG_H_width, REG_H_height, arcade.color.GREEN) # L\n\n\n\n\n \n\n\n arcade.draw_text('R',REG_R_x, REG_R_y+DECAL_Y_CARTOUCHE_REG,arcade.csscolor.WHITE,18,)\n arcade.draw_text('E',REG_R_x + REG_R_width - 4 * Regs_x_decal, REG_E_y+DECAL_Y_CARTOUCHE_REG,arcade.csscolor.WHITE,18,)\n arcade.draw_text('H',REG_R_x + REG_R_width + REG_E_width - 6 * Regs_x_decal, REG_H_y+DECAL_Y_CARTOUCHE_REG,arcade.csscolor.WHITE,18,)\n arcade.draw_text('L',REG_R_x + REG_R_width + REG_E_width + REG_H_width - 6 * Regs_x_decal, REG_H_y+DECAL_Y_CARTOUCHE_REG,arcade.csscolor.WHITE,18,)\n arcade.draw_text('X',10,10,arcade.csscolor.WHITE,18,)\n\n\n arcade.draw_rectangle_filled(ANGLE_STACK[0] , ANGLE_STACK[1], STACK_WIDTH, STACK_HEIGHT, arcade.color.BLACK)\n arcade.draw_rectangle_outline(ANGLE_STACK[0] , ANGLE_STACK[1], STACK_WIDTH, STACK_HEIGHT, arcade.color.RED)\n\n \n\n arcade.draw_text(\"LOW 0x00\", ANGLE_LOW[0]-DECAL_X_STACK, ANGLE_LOW[1] + 350, arcade.csscolor.BLACK, 18,)\n arcade.draw_text(\"HIGH 0xFF\", ANGLE_LOW[0]-DECAL_X_STACK, ANGLE_LOW[1] -400,arcade.csscolor.BLACK,18,)\n\n\n arcade.draw_text(\"RAM Adresses and values\", ANGLE_RAM[0] -150, ANGLE_LOW[1] + 350, arcade.color.CADET, 18,) \n arcade.draw_rectangle_filled(ANGLE_RAM[0] , ANGLE_RAM[1], RAM_WIDTH, RAM_HEIGHT, arcade.color.CADET)\n\n\n arcade.draw_text(\"CODE ASM\", ANGLE_CODE[0] -150, ANGLE_LOW[1] + 350, arcade.color.BLACK, 18,)\n arcade.draw_rectangle_filled(ANGLE_CODE[0] , ANGLE_CODE[1], CODE_WIDTH, CODE_HEIGHT, arcade.color.BLACK)\n\n\n # --------------------------------------------------------------------------------------\n\n \n\n for line_number in range(0, len(self.sourcecode_lines)):\n \n arcade.draw_text(self.sourcecode_lines[line_number][0], ANGLE_CODE[0] -180, ANGLE_CODE[1] + CODE_HEIGHT//2 - 15 - line_number * Y_SOURCECODE_DECAL, arcade.csscolor.WHITE,14,)\n\n if len(self.sourcecode_lines[line_number]) >1:\n arcade.draw_text(self.sourcecode_lines[line_number][1], ANGLE_CODE[0] -180 + X_OPERAND_DECAL, ANGLE_CODE[1] + CODE_HEIGHT//2 - 15 - line_number * Y_SOURCECODE_DECAL, arcade.csscolor.WHITE,14,)\n\n if len(self.sourcecode_lines[line_number]) >2:\n arcade.draw_text(self.sourcecode_lines[line_number][2], ANGLE_CODE[0] -180 + 2*X_OPERAND_DECAL, ANGLE_CODE[1] + CODE_HEIGHT//2 - 15 - line_number * Y_SOURCECODE_DECAL, arcade.csscolor.WHITE,14,)\n\n\n\n\ndef main():\n window = MyWindow()\n window.setup()\n\n mySilicium.work()\n\n mySilicium.show_status()\n \n arcade.run()\n\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"37.py","file_name":"37.py","file_ext":"py","file_size_in_byte":7911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"27838438","text":"'''Given a string of words, you need to find the highest scoring word.\n\nEach letter of a word scores points according to it's position in the alphabet: a = 1, b = 2, c = 3 etc.\n\nYou need to return the highest scoring word as a string.\n\nIf two words score the same, return the word that appears earliest in the original string.\n\nAll letters will be lowercase and all inputs will be valid.'''\n\ndef high(x):\n arr1 = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26]\n arr2 = [\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\",\"o\",\"p\",\"q\",\"r\",\"s\",\"t\",\"u\",\"v\",\"w\",\"x\",\"y\",\"z\"]\n dictionary = dict(zip(arr2, arr1))\n string = \"\"\n answer = \"\"\n count = 0\n max = 0\n \n for letra in x:\n \n if letra != \" \":\n count += dictionary[letra]\n string += letra\n if count > max:\n max = count\n answer = string \n else:\n count=0\n string = \"\"\n \n return answer\n\nif __name__ == \"__main__\":\n \n\n # TEST CASES #\n\n\n assert high('man i need a taxi up to ubud') == 'taxi'\n assert high('what time are we climbing up the volcano') == 'volcano'\n assert high('take me to semynak') == 'semynak'\n assert high('massage yes massage yes massage') == 'massage'\n assert high('take two bintang and a dance please') == 'bintang'\n","sub_path":"Python_katas/katas_6kyu/highest_scoring_word.py","file_name":"highest_scoring_word.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"228361882","text":"# DESAFIO 086 - Crie um programa que crie uma MATRIZ de DIMENSÃO 3X3 e a preencha com valores lidos pelo teclado.\n#No final, mostre a MATRIZ na tela, com a formatação correta.\n\nmatriz = [[], [], []]\n#matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n\nfor i in range(0, 3):\n for j in range(0, 3):\n matriz[i].append(int(input(f'Digite um valor para [{i}, {j}]: ')))\n #matriz[i][j] = int(input(f'Digite um valor para [{i}, {j}]: '))\n\nfor i in range(0, 3):\n\n for j in range(0, 3):\n print(f'[ {matriz[i][j]:^5} ]', end='') #formatando com 5 espaços, centralizado, pra tentar manter a simetria\n print()","sub_path":"PythonExercícios/ex086 - Matriz em Python.py","file_name":"ex086 - Matriz em Python.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"239156776","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@created on: 2/27/20,\n@author: Shreesha N,\n@version: v0.0.1\n@system name: badgod\nDescription:\n\n..todo::\n\n\"\"\"\nimport argparse\nimport json\n\nimport numpy as np\nimport pandas as pd\n\nfrom alcoaudio.datagen.audio_feature_extractors import preprocess_data, preprocess_data_images, \\\n remove_silent_parts_from_audio\nfrom alcoaudio.utils.class_utils import AttributeDict\nfrom alcoaudio.utils.data_utils import save_h5py, save_npy, save_csv\n\n\ndef parse():\n parser = argparse.ArgumentParser(description=\"alcoaudio_data_processor\")\n parser.add_argument('--configs_file', type=str)\n args = parser.parse_args()\n return args\n\n\nclass DataProcessor:\n def __init__(self, args):\n self.base_path = args.audio_basepath\n self.train_data_file = args.train_data_file\n self.dev_data_file = args.dev_data_file\n self.test_data_file = args.test_data_file\n self.normalise = args.normalise_while_creating\n self.sample_size_in_seconds = args.sample_size_in_seconds\n self.sampling_rate = args.sampling_rate\n self.overlap = args.overlap\n self.data_save_path = args.data_save_path\n self.image_save_path = args.image_data_save_path\n self.method = args.data_processing_method\n\n def process_audio_and_save_h5py(self, data_file, filename_to_save, shuffle=True):\n df = pd.read_csv(data_file)\n if shuffle:\n df = df.sample(frac=1)\n data, labels = preprocess_data(self.base_path, df['WAV_PATH'].values, df['label'].values,\n self.normalise,\n self.sample_size_in_seconds, self.sampling_rate, self.overlap)\n save_h5py(data, self.data_save_path + '/' + filename_to_save + '_data.h5')\n save_h5py(labels, self.data_save_path + '/' + filename_to_save + '_labels.h5')\n\n def process_audio_and_save_npy(self, data_file, filename_to_save, shuffle=True):\n df = pd.read_csv(data_file)\n print('Read a ')\n if shuffle:\n df = df.sample(frac=1)\n data, labels = preprocess_data(self.base_path, df['WAV_PATH'].values, df['label'].values,\n self.normalise,\n self.sample_size_in_seconds, self.sampling_rate, self.overlap)\n save_npy(data, self.data_save_path + '/' + filename_to_save + '_data.npy')\n save_npy(labels, self.data_save_path + '/' + filename_to_save + '_labels.npy')\n\n def process_audio_and_save_npy_challenge(self, data_file, filename_to_save, shuffle=True):\n df = pd.read_csv(data_file, header=None, delimiter='\\t')\n print('Number of audio files ', len(df))\n if shuffle:\n df = df.sample(frac=1)\n\n # Converting 'A' to 1 and 'N' to 0 - according to Challenge's binary decision.\n # Refer Challenge's Readme for further information\n df[1] = df[1].apply(lambda x: 1 if x == 'A' else 0)\n\n # Irregular use of extensions in data, so handling it here\n df[0] = df[0].apply(lambda x: x.replace('WAV', 'wav'))\n data, labels, raw = preprocess_data(self.base_path, df[0].values, df[1].values,\n self.normalise,\n self.sample_size_in_seconds, self.sampling_rate, self.overlap, self.method)\n print('Number of audio files after processing ', len(data))\n save_npy(data, self.data_save_path + '/' + filename_to_save + '_data.npy')\n save_npy(labels, self.data_save_path + '/' + filename_to_save + '_labels.npy')\n save_npy(raw, self.data_save_path + '/' + filename_to_save + '_inline_raw.npy')\n del data\n del labels\n\n def process_audio_and_save_csv(self, data_file, filename_to_save, shuffle=True):\n df = pd.read_csv(data_file, header=None, delimiter='\\t')\n if shuffle:\n df = df.sample(frac=1)\n\n # Converting 'A' to 1 and 'N' to 0 - according to Challenge's binary decision.\n # Refer Challenge's Readme for further information\n df[1] = df[1].apply(lambda x: 1 if x == 'A' else 0)\n\n # Irregular use of extensions in data, so handling it here\n df[0] = df[0].apply(lambda x: x.replace('WAV', 'wav'))\n\n data, labels = preprocess_data_images(self.base_path, self.image_save_path, df[0].values,\n df[1].values,\n self.normalise,\n self.sample_size_in_seconds, self.sampling_rate, self.overlap)\n concat_data = np.concatenate((np.array([data]).T, np.array([labels]).T), axis=1)\n save_csv(concat_data, columns=[\"spectrogram_path\", \"labels\"], filename=\n self.data_save_path + '/' + filename_to_save + '_data_melfilter_specs.csv')\n\n def silent_parts_removal(self, data_file):\n df = pd.read_csv(data_file, header=None, delimiter='\\t')\n print('Number of audio files ', len(df))\n\n # Irregular use of extensions in data, so handling it here\n df[0] = df[0].apply(lambda x: x.replace('WAV', 'wav'))\n remove_silent_parts_from_audio(self.base_path, df[0].values, self.sampling_rate)\n\n def run(self):\n print('Started processing train data . . .')\n self.process_audio_and_save_npy_challenge(self.train_data_file,\n # train_challenge_with_d1_mel_power_to_db_fnot_zr_crossing_opensmile\n filename_to_save='train_challenge_with_d1_mel')\n print('Started processing dev data . . .')\n self.process_audio_and_save_npy_challenge(self.dev_data_file,\n filename_to_save='dev_challenge_with_d1_mel')\n print('Started processing test data . . .')\n self.process_audio_and_save_npy_challenge(self.test_data_file,\n filename_to_save='test_challenge_with_d1_mel')\n\n # print('Started processing train data . . .')\n # self.silent_parts_removal(self.train_data_file)\n # print('Started processing dev data . . .')\n # self.silent_parts_removal(self.dev_data_file)\n # print('Started processing test data . . .')\n # self.silent_parts_removal(self.test_data_file)\n\n def run_images(self):\n print('Started processing train data . . .')\n self.process_audio_and_save_csv(self.train_data_file,\n filename_to_save='train_challenge_with_d1_mel_images')\n print('Started processing dev data . . .')\n self.process_audio_and_save_csv(self.dev_data_file,\n filename_to_save='dev_challenge_with_d1_mel_images')\n print('Started processing test data . . .')\n self.process_audio_and_save_csv(self.test_data_file,\n filename_to_save='test_challenge_with_d1_mel_images')\n\n\nif __name__ == '__main__':\n args = parse().__dict__\n configs = json.load(open(args['configs_file']))\n configs = {**configs, **args}\n configs = AttributeDict(configs)\n\n processor = DataProcessor(configs)\n print(configs)\n processor.run()\n","sub_path":"alcoaudio/datagen/data_processor.py","file_name":"data_processor.py","file_ext":"py","file_size_in_byte":7275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"618292758","text":"from datetime import datetime, timedelta\n\nfrom opentech.apply.funds.tests.factories import ApplicationSubmissionFactory, ApplicationRevisionFactory\nfrom opentech.apply.users.tests.factories import UserFactory, StaffFactory\nfrom opentech.apply.utils.tests import BaseViewTestCase\n\nfrom ..models import ApplicationRevision\n\n\nclass BaseSubmissionViewTestCase(BaseViewTestCase):\n url_name = 'funds:submissions:{}'\n base_view_name = 'detail'\n\n def get_kwargs(self, instance):\n return {'pk': instance.id}\n\n\nclass TestStaffSubmissionView(BaseSubmissionViewTestCase):\n user_factory = StaffFactory\n\n def test_can_view_a_submission(self):\n submission = ApplicationSubmissionFactory()\n response = self.get_page(submission)\n self.assertContains(response, submission.title)\n\n def test_can_progress_stage(self):\n submission = ApplicationSubmissionFactory(status='concept_review_discussion', workflow_stages=2, lead=self.user)\n response = self.post_page(submission, {'form-submitted-progress_form': '', 'action': 'invited_to_proposal'})\n\n # Invited for proposal is a a determination, so this will redirect to the determination form.\n url = self.url_from_pattern('funds:submissions:determinations:form', kwargs={'submission_pk': submission.id})\n self.assertRedirects(response, f\"{url}?action=invited_to_proposal\")\n\n def test_cant_progress_stage_if_not_lead(self):\n submission = ApplicationSubmissionFactory(status='concept_review_discussion', workflow_stages=2)\n self.post_page(submission, {'form-submitted-progress_form': '', 'action': 'invited_to_proposal'})\n\n submission = self.refresh(submission)\n\n self.assertEqual(submission.status, 'concept_review_discussion')\n self.assertIsNone(submission.next)\n\n def test_cant_access_edit_button_on_applicant_submission(self):\n submission = ApplicationSubmissionFactory(status='more_info')\n response = self.get_page(submission)\n self.assertNotContains(response, self.url(submission, 'edit', absolute=False))\n\n\nclass TestApplicantSubmissionView(BaseSubmissionViewTestCase):\n user_factory = UserFactory\n\n def test_can_view_own_submission(self):\n submission = ApplicationSubmissionFactory(user=self.user)\n response = self.get_page(submission)\n self.assertContains(response, submission.title)\n\n def test_sees_latest_draft_if_it_exists(self):\n submission = ApplicationSubmissionFactory(user=self.user)\n draft_revision = ApplicationRevisionFactory(submission=submission)\n submission.draft_revision = draft_revision\n submission.save()\n\n draft_submission = submission.from_draft()\n response = self.get_page(submission)\n\n self.assertContains(response, draft_submission.title)\n\n def test_cant_view_others_submission(self):\n submission = ApplicationSubmissionFactory()\n response = self.get_page(submission)\n self.assertEqual(response.status_code, 403)\n\n def test_get_edit_link_when_editable(self):\n submission = ApplicationSubmissionFactory(user=self.user, status='more_info')\n response = self.get_page(submission)\n self.assertContains(response, 'Edit')\n self.assertContains(response, self.url(submission, 'edit', absolute=False))\n self.assertNotContains(response, 'Congratulations')\n\n def test_get_congratulations_draft_proposal(self):\n submission = ApplicationSubmissionFactory(user=self.user, draft_proposal=True)\n response = self.get_page(submission)\n self.assertContains(response, 'Congratulations')\n\n def test_can_edit_own_submission(self):\n submission = ApplicationSubmissionFactory(user=self.user, draft_proposal=True)\n response = self.get_page(submission, 'edit')\n self.assertContains(response, submission.title)\n\n def test_gets_draft_on_edit_submission(self):\n submission = ApplicationSubmissionFactory(user=self.user, draft_proposal=True)\n draft_revision = ApplicationRevisionFactory(submission=submission)\n submission.draft_revision = draft_revision\n submission.save()\n\n response = self.get_page(submission, 'edit')\n self.assertDictEqual(response.context['object'].form_data, draft_revision.form_data)\n\n def test_cant_edit_submission_incorrect_state(self):\n submission = ApplicationSubmissionFactory(user=self.user, workflow_stages=2)\n response = self.get_page(submission, 'edit')\n self.assertEqual(response.status_code, 403)\n\n def test_cant_edit_other_submission(self):\n submission = ApplicationSubmissionFactory(draft_proposal=True)\n response = self.get_page(submission, 'edit')\n self.assertEqual(response.status_code, 403)\n\n\nclass TestRevisionsView(BaseSubmissionViewTestCase):\n user_factory = UserFactory\n\n def test_create_revisions_on_submit(self):\n submission = ApplicationSubmissionFactory(status='draft_proposal', workflow_stages=2, user=self.user)\n old_data = submission.form_data.copy()\n new_data = submission.raw_data\n new_title = 'New title'\n new_data[submission.must_include['title']] = new_title\n\n self.post_page(submission, {'submit': True, **new_data}, 'edit')\n\n submission = self.refresh(submission)\n\n self.assertEqual(submission.status, 'proposal_discussion')\n self.assertEqual(submission.revisions.count(), 2)\n self.assertDictEqual(submission.revisions.last().form_data, old_data)\n self.assertDictEqual(submission.live_revision.form_data, submission.form_data)\n self.assertEqual(submission.live_revision.author, self.user)\n self.assertEqual(submission.title, new_title)\n\n def test_dont_update_live_revision_on_save(self):\n submission = ApplicationSubmissionFactory(status='draft_proposal', workflow_stages=2, user=self.user)\n old_data = submission.form_data.copy()\n new_data = submission.raw_data\n new_data[submission.must_include['title']] = 'New title'\n self.post_page(submission, {'save': True, **new_data}, 'edit')\n\n submission = self.refresh(submission)\n\n self.assertEqual(submission.status, 'draft_proposal')\n self.assertEqual(submission.revisions.count(), 2)\n self.assertDictEqual(submission.draft_revision.form_data, submission.from_draft().form_data)\n self.assertEqual(submission.draft_revision.author, self.user)\n self.assertDictEqual(submission.live_revision.form_data, old_data)\n\n def test_existing_draft_edit_and_submit(self):\n submission = ApplicationSubmissionFactory(status='draft_proposal', workflow_stages=2, user=self.user)\n draft_data = submission.raw_data.copy()\n draft_data[submission.must_include['title']] = 'New title'\n self.post_page(submission, {'save': True, **draft_data}, 'edit')\n\n submission = self.refresh(submission)\n\n new_title = 'Newer title'\n draft_data[submission.must_include['title']] = new_title\n self.post_page(submission, {'submit': True, **draft_data}, 'edit')\n\n submission = self.refresh(submission)\n\n self.maxDiff = None\n self.assertEqual(submission.revisions.count(), 2)\n self.assertDictEqual(submission.draft_revision.form_data, submission.from_draft().form_data)\n self.assertDictEqual(submission.live_revision.form_data, submission.form_data)\n\n self.assertEqual(submission.title, new_title)\n\n\nclass TestRevisionList(BaseSubmissionViewTestCase):\n base_view_name = 'revisions:list'\n user_factory = StaffFactory\n\n def get_kwargs(self, instance):\n return {'submission_pk': instance.pk}\n\n def test_list_doesnt_include_draft(self):\n submission = ApplicationSubmissionFactory()\n draft_revision = ApplicationRevisionFactory(submission=submission)\n submission.draft_revision = draft_revision\n submission.save()\n\n response = self.get_page(submission)\n\n self.assertNotIn(draft_revision, response.context['object_list'])\n\n def test_get_in_correct_order(self):\n submission = ApplicationSubmissionFactory()\n\n revision = ApplicationRevisionFactory(submission=submission)\n ApplicationRevision.objects.filter(id=revision.id).update(timestamp=datetime.now() - timedelta(days=1))\n\n revision_older = ApplicationRevisionFactory(submission=submission)\n ApplicationRevision.objects.filter(id=revision_older.id).update(timestamp=datetime.now() - timedelta(days=2))\n\n response = self.get_page(submission)\n\n self.assertSequenceEqual(\n response.context['object_list'],\n [submission.live_revision, revision, revision_older],\n )\n","sub_path":"opentech/apply/funds/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":8740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"500363172","text":"'''\nYour function should take in a signle parameter (a string `word`)\nYour function should return a count of how many occurences of ***\"th\"*** occur within `word`. Case matters.\nYour function must utilize recursion. It cannot contain any loops.\n'''\n\n\ndef count_th(word):\n # made count a list because I can't remember how to make it not re-start at 0 with recursion\n count = []\n # starting point for letter search\n x = 0\n\n def helper(x, word):\n # base cases, if it's less than or equal to the len(word), then there can't be any th's\n if x == len(word) - 1:\n return\n elif len(word) <= 1:\n return\n # not base case, then compare the neighboring letters\n elif word[x] + word[x+1] == 'th':\n # if they are th, then add a 1 to the count list\n count.append(1)\n return helper(x+1, word)\n helper(x, word)\n # add up all the nums in count for the answer\n answer = sum(count)\n return answer\n\n\nprint(count_th('the'))\n","sub_path":"recursive_count_th/count_th.py","file_name":"count_th.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"35869079","text":"from typing import Type,TypeVar\n\nclass User: ...\n\nclass BasicUser(User): ...\n\nclass ProUser(BasicUser): ...\n\nclass TeamUser(ProUser): ...\n\nU = TypeVar('U',bound=User)\n\ndef create_new_user(user_class= Type[U]) -> User:\n user = user_class()\n\n return user\n\nozgur = create_new_user(ProUser)\neren = create_new_user(BasicUser)\nmehmet = create_new_user(TeamUser)\n\nprint(type(ozgur))\nprint(type(eren))\nprint(type(mehmet))\n","sub_path":"src/Random/TypeEx.py","file_name":"TypeEx.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"43426993","text":"# coding:utf-8\n__author__ = \"BrianYuan\"\n\nimport csv\n\nfrom collections import Counter\n\nreader = csv.reader(file('data/bug_data_selected.csv', 'rb'), delimiter='#')\n\nassignee_list = []\nassignee_set = set()\n\ntarget_default_list = [\"nova\", \"neutron\", \"keystone\", \"cinder\", \"glance\", \"ceilometer\", \"heat\", \"trove\", \"swift\",\n \"sahara\", \"horizon\"]\n\nindex = 0\nfor row in reader:\n if index == 0:\n index += 1\n continue\n assignee = row[9]\n status = row[4]\n target = row[8]\n if status != \"Fix Released\" and status != \"Fix Committed\":\n continue\n if target not in target_default_list:\n continue\n if assignee:\n assignee_list.append(assignee)\n assignee_set.add(assignee)\n\nc = Counter(assignee_list)\n\noutput = open('data/most_frequent_assignee.txt', 'wb')\nlines = []\nfor assignee in assignee_set:\n if c[assignee] > 10:\n lines.append(assignee + ' '+ str(c[assignee]) + '\\n')\n\noutput.writelines(lines)","sub_path":"find_most_frequent_assignee.py","file_name":"find_most_frequent_assignee.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"320513403","text":"from urllib.parse import urljoin\r\nfrom functools import reduce\r\nimport operator\r\nimport re\r\n\r\nfrom ..core import *\r\nfrom ..vparsers import *\r\nfrom ..utils import *\r\n\r\n\r\nclass OsiedleOliwaParser(SingleWebpageParser):\r\n url = \"http://www.jbmdevelopment.pl/rezerwacje/osiedle-oliwia\"\r\n method = \"GET\"\r\n\r\n schema = [\r\n DataUnit(label=\"Mieszkanie\", parser=StringParser(), id=\"number\"),\r\n DataUnit(label=\"Il. pokoi\", parser=IntParser(), id=\"rooms\"),\r\n DataUnit(label=\"Pow.\", parser=AreaParser(), id=\"area\"),\r\n DataUnit(label=\"Piętro\", parser=FloorParser(), id=\"floor\"),\r\n DataUnit(label=\"alkon/Ogródek\", parser=NoneParser(), id=\"none\"),\r\n DataUnit(label=\"Strony świata\", parser=StringParser(), id=\"location\"),\r\n DataUnit(label=\"Status\", parser=StatusParser(), id=\"status\"),\r\n DataUnit(label=\"Cena\", parser=PriceParser(), id=\"price\"),\r\n DataUnit(label=\"Budyenk\", parser=StringParser(), id=\"building\")\r\n ]\r\n\r\n @tryexcept_wrapper((AttributeError, IndexError), return_value=[])\r\n def find_records(self, soup):\r\n titles = soup.find_all(\"h1\")\r\n tables = soup.find_all(\"table\", {\"class\": \"uk-table\"})\r\n return [\r\n (record, titles[i].text)\r\n for i, table in enumerate(tables)\r\n for record in table.find_all(\"tr\")[1:]\r\n if record.find(\"td\").find(\"h2\") is None\r\n ]\r\n \r\n def split_record(self, data):\r\n record, title = data\r\n return [ td.text for td in record.find_all(\"td\") ] + [title]\r\n \r\n def modify_record(self, record, soup=None):\r\n record[\"fid\"] = \"{building}/{floor}/{number}\".format(**record)\r\n return record\r\n","sub_path":"parsers/jbm/osiedleoliwa.py","file_name":"osiedleoliwa.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"530314510","text":"#2016.4.25,nandtotetris, project6, homework.\n#\n#\n#print('Please input file name:')\n#filename = input()\n#print(filename)\nfilename = '/Users/wzh/Downloads/nand2tetris/projects/06/add/Add.asm'\n\ntry:\n f = open(filename, 'r')\n for line in f:\n print(f.readline())\nfinally:\n if f:\n f.close()\n","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"357244373","text":"\nimport random\nimport time\n\nclass Stash(object):\n def __init__(self, cache, key_prefix=None):\n self.cache = cache\n self.key_prefix = key_prefix or \"\"\n\n def _gen_key(self, key, namespace=None):\n return \"%s%s:%s\" % (self.key_prefix,\n namespace and self._get_namespace_key(namespace) or \"\", key)\n\n def _get_namespace_key(self, name):\n \"\"\"\n Returns the key for the given namespace generating it if necessary.\n The namespace key is used to mass invalidate a group of keys that\n belong to the namespace.\n \"\"\"\n key = \"%snamespace:%s\" % (self.key_prefix, name)\n ns_key = self.cache.get(key)\n if not ns_key:\n ns_key = str(random.randint(0, 2000000000))\n self.cache.set(key, ns_key, 60*60*24*10)\n ns_key = \"ns:%s:%s\" % (name, ns_key)\n return ns_key\n\n def clear_namespace(self, name):\n \"\"\"\n Delete the key for the given namespace thus invalidating all\n keys in that namespace.\n \"\"\"\n key = \"%snamespace:%s\" % (self.key_prefix, name)\n if hasattr(getattr(self.cache, '_cache', None), 'incr'):\n try:\n self.cache._cache.incr(key)\n except ValueError:\n pass\n else:\n self.cache.delete(key)\n\n def __contains__(self, key):\n return key in self.cache\n\n def __call__(self, key, func, timeout=None, namespace=None, raw_key=False):\n if not isinstance(key, basestring):\n return self.__call_many(key, func, timeout, namespace, raw_key)\n\n if not raw_key:\n key = self._gen_key(key, namespace)\n\n val = self.get(key, raw_key=True)\n if val is None:\n if hasattr(timeout, '__call__'):\n start_time = time.time()\n val = func()\n end_time = time.time()\n timeout = timeout(end_time - start_time)\n else:\n val = func()\n self.set(key, val, timeout, raw_key=True)\n\n return val\n\n def __call_many(self, keys, func, timeout=None, namespace=None, raw_key=False):\n okeys = keys\n if not raw_key:\n keys = dict((self._gen_key(k, namespace), k) for k in set(keys))\n\n vals = self.get_many(keys, raw_key=True)\n if keys:\n if hasattr(timeout, '__call__'):\n start_time = time.time()\n newvals = func(keys.values())\n end_time = time.time()\n timeout = timeout(end_time - start_time)\n else:\n newvals = func(keys.values())\n for k, v in newvals.iteritems():\n inv = dict((v,k) for k,v in keys.iteritems())\n self.set(inv[k], v, timeout, raw_key=True)\n vals.update(newvals)\n\n return vals\n\n def get(self, key, namespace=None, raw_key=False):\n if not raw_key:\n key = self._gen_key(key, namespace)\n return self.cache.get(key)\n\n def set(self, key, value, timeout=None, namespace=None, raw_key=False):\n if not raw_key:\n key = self._gen_key(key, namespace)\n self.cache.set(key, value, timeout)\n\n def delete(self, key, namespace=None, raw_key=False):\n if not raw_key:\n key = self._gen_key(key, namespace)\n self.cache.delete(key)\n\n def get_many(self, keys, namespace=None, raw_key=False):\n \"\"\"\n If raw_key is True, keys must be a dict and it WILL BE modified, and\n upon return keys will include only the keys that were not in the cache.\n \"\"\"\n if not raw_key:\n keys = dict((self._gen_key(k, namespace), k) for k in set(keys))\n\n d = {}\n\n if keys:\n for k, v in self.cache.get_many(keys.keys()).iteritems():\n d[keys[k]] = v\n del keys[k]\n\n return d\n","sub_path":"gypsy/utils/stash.py","file_name":"stash.py","file_ext":"py","file_size_in_byte":3888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"566741801","text":"from .Utils import getBedDetails, isPatientExist\nfrom django.db import models\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.forms.forms import Form\n\n\n# Create your models here.\n\n\nclass BedTypes(models.Model):\n \n bed_id = models.IntegerField(primary_key=True)\n bed_available = models.IntegerField()\n bed_name = models.CharField(max_length=20)\n\n\nclass BedSystem(models.Model):\n bed_no = models.PositiveIntegerField()\n bed_type = models.ForeignKey(BedTypes, on_delete=models.CASCADE, null=True)\n free_or_occupy = models.BooleanField(default=False)\n patient_name = models.CharField(max_length=20)\n patient_mobile = models.CharField(max_length=10)\n checkout = models.BooleanField(default=False)\n\n \nclass BedSystemForm(forms.ModelForm):\n b = (\n (1, \"General\"),\n (2, 'Semi Private'),\n (3, 'Private'))\n type = forms.ChoiceField(choices=b, widget=forms.Select(attrs={\"class\": \"form-control\"}))\n\n def clean_patient_mobile(self):\n data = self.cleaned_data[\"patient_mobile\"] \n if len(data) < 10:\n raise forms.ValidationError(\"Please enter 10 number of digit\") \n elif data.isnumeric() is False:\n raise forms.ValidationError(\"Please enter only digit\")\n return data\n\n def clean(self):\n cleaned_data = super().clean()\n data = cleaned_data.get(\"type\")\n bedNo = cleaned_data.get(\"bed_no\")\n p_name = cleaned_data.get(\"patient_name\")\n p_number = cleaned_data.get(\"patient_mobile\")\n bed_details = getBedDetails(data)\n print(bed_details)\n if isPatientExist(p_name, p_number):\n print(isPatientExist(p_name, p_number))\n self.add_error('patient_mobile', 'This patient name and mobile number is already exist in Bed System')\n \n if bed_details is None or bedNo not in bed_details.keys():\n self.add_error('bed_no', 'Bed number is not t Associated with Bed Type')\n elif bedNo in bed_details.keys() and bed_details.get(bedNo) == True:\n self.add_error('bed_no', 'This bed no is already occupy Please choose other one')\n \n class Meta:\n model = BedSystem\n fields = ('type', 'bed_no', 'patient_name', 'patient_mobile')\n widgets = {\n 'bed_no': forms.TextInput(attrs={'class': 'form-control'}),\n 'patient_name': forms.TextInput(attrs={'class': 'form-control'}),\n 'patient_mobile': forms.TextInput(attrs={'class': 'form-control'})\n \n }\n\n","sub_path":"covidApp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"288713533","text":"from gevent.select import select\nfrom gevent.server import StreamServer\nfrom gevent import socket\n\n\ndef handler(socket, address):\n while True:\n if not socket.recv(1000):\n break\n\n\nserver = StreamServer(('127.0.0.1', 0), handler)\nserver.start()\n\ns = socket.create_connection(('127.0.0.1', server.server_port))\nwhile True:\n select([], [s.fileno()] * 10, [])\n","sub_path":"lib/gevent/greentest/xtest__issue91.py","file_name":"xtest__issue91.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"576123979","text":"import configparser\nimport logging\n\nfrom kodijson import Kodi, PLAYER_VIDEO\n\nfrom aiy.voice import tts\n\n# KodiRemote: Send command to Kodi\n# ================================\n#\n\nclass KodiRemote(object):\n\n \"\"\"Sends a command to a kodi client machine\"\"\"\n\n def __init__(self, configPath):\n self.configPath = configPath\n self.kodi = None\n self.action = None\n self.request = None\n\n def run(self, voice_command):\n config = configparser.ConfigParser()\n config.read(self.configPath)\n settings = config['kodi']\n\n number_mapping = [ ('9 ', 'nine ') ]\n\n if self.kodi is None:\n logging.info('No current connection to a Kodi client')\n\n for key in settings:\n if key not in ['username','password']:\n if voice_command.startswith(key):\n voice_command = voice_command[(len(key)+1):]\n self.kodi = Kodi('http://' + settings[key] + '/jsonrpc', kodiUsername, kodiPassword)\n elif self.kodi is None:\n self.kodi = Kodi('http://' + settings[key] + '/jsonrpc', kodiUsername, kodiPassword)\n\n try:\n self.kodi.JSONRPC.Ping()\n except:\n tts.say('Unable to connect to client')\n return\n\n if voice_command.startswith('tv '):\n result = self.kodi.PVR.GetChannels(channelgroupid='alltv')\n channels = result['result']['channels']\n if len(channels) == 0:\n tts.say('No channels found')\n\n elif voice_command == 'tv channels':\n tts.say('Available channels are')\n for channel in channels:\n tts.say(channel['label'])\n\n else:\n for k, v in number_mapping:\n voice_command = voice_command.replace(k, v)\n\n channel = [item for item in channels if (str(item['label']).lower() == voice_command[3:])]\n if len(channel) == 1:\n self.kodi.Player.Open(item={'channelid':int(channel[0]['channelid'])})\n\n else:\n logging.info('No channel match found for ' + voice_command[3:] + '(' + str(len(channel)) + ')')\n tts.say('No channel match found for ' + voice_command[3:])\n tts.say('Say Kodi t v channels for a list of available channels')\n\n elif voice_command.startswith('play unwatched ') or voice_command.startswith('play tv series '):\n voice_command = voice_command[15:]\n result = self.kodi.VideoLibrary.GetTVShows(sort={'method':'dateadded','order':'descending'},filter={'field':'title','operator': 'contains', 'value': voice_command}, properties=['playcount','sorttitle','dateadded','episode','watchedepisodes'])\n if 'tvshows' in result['result']:\n if len(result['result']['tvshows']) > 0:\n result = self.kodi.VideoLibrary.GetEpisodes(tvshowid=result['result']['tvshows'][0]['tvshowid'], sort={'method':'episode','order':'ascending'},filter={'field':'playcount','operator': 'lessthan', 'value': '1'},properties=['episode','playcount'],limits={'end': 1})\n if 'episodes' in result['result']:\n if len(result['result']['episodes']) > 0:\n self.kodi.Player.Open(item={'episodeid':result['result']['episodes'][0]['episodeid']})\n\n else:\n tts.say('No new episodes of ' + voice_command + ' available')\n logging.info('No new episodes of ' + voice_command + ' available')\n\n else:\n tts.say('No new episodes of ' + voice_command + ' available')\n logging.info('No new episodes of ' + voice_command + ' available')\n\n else:\n tts.say('No tv show found titled ' + voice_command)\n logging.info('No tv show found')\n\n elif voice_command.startswith('play recording '):\n voice_command = voice_command[15:]\n result = self.kodi.PVR.GetRecordings(properties=[\"starttime\"])\n if 'recordings' in result['result']:\n if len(result['result']['recordings']) > 0:\n recordings = sorted([recording for recording in result[\"result\"][\"recordings\"] if recording[\"label\"].lower() == voice_command], key = lambda x : x[\"starttime\"], reverse=True)\n if len(recordings) > 0:\n self.kodi.Player.Open(item={'recordingid':int(recordings[0][\"recordingid\"])})\n else:\n tts.say('No recording titled ' + voice_command)\n logging.info('No recording found')\n else:\n tts.say('No recordings found')\n logging.info('No PVR recordings found')\n\n elif voice_command == 'stop':\n result = self.kodi.Player.Stop(playerid=1)\n logging.info('Kodi response: ' + str(result))\n\n elif voice_command == 'play' or voice_command == 'pause' or voice_command == 'paws' or voice_command == 'resume':\n result = self.kodi.Player.PlayPause(playerid=1)\n logging.info('Kodi response: ' + str(result))\n\n elif voice_command == 'update tv shows':\n self.kodi.VideoLibrary.Scan()\n\n elif voice_command == 'shutdown' or voice_command == 'shut down':\n self.kodi.System.Shutdown()\n\n else:\n tts.say('Unrecognised Kodi command')\n logging.warning('Unrecognised Kodi request: ' + voice_command)\n return\n","sub_path":"src/modules/kodi.py","file_name":"kodi.py","file_ext":"py","file_size_in_byte":5631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"465795429","text":"\n\"\"\"\n1.数据输入\n2.构建计算图\n3.训练数据\n\"\"\"\nimport tensorflow as tf\n\nfrom PIL import Image\nfrom tensorflow import gfile\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\n# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\nlearning_rate = 0.002\nbeta1 = 0.5\n\n\nclass DCGAN(object):\n def __init__(self, batch_size, z_dim=100, img_size=32,):\n self.z_dim = z_dim\n self.img_size = img_size\n self.indicator = 0\n self.example_num = 55000\n self.z_data = np.random.standard_normal([55000, 4])\n self.load_data()\n self.batch_size = batch_size\n self.resize_img()\n self.random_shuffle()\n\n def load_data(self):\n self.img_data = mnist.train.images\n\n def random_shuffle(self):\n p = np.random.permutation(55000)\n self.img_data = self.img_data[p]\n self.z_data = self.z_data[p]\n\n def resize_img(self):\n data = self.img_data * 255\n img_data_array = []\n for i in range(55000): # 为了计算的快捷,还需要修改。\n img_data = np.reshape(data[i], [28, 28])\n img_data = Image.fromarray(img_data)\n img_data = img_data.resize([32, 32])\n img_data = np.reshape(np.asarray(\n img_data) / 127.5 - 1, [32, 32, 1])\n img_data_array.append(img_data)\n self.img_data_array = np.asarray(img_data_array)\n\n def next_batch(self): # 很有意思的一个data_shuffle\n end_indicater = self.indicator + self.batch_size\n if end_indicater > 55000:\n self.random_shuffle()\n self.indicator = 0\n end_indicater = self.indicator + self.batch_size\n assert end_indicater < 55000\n self.batch_data = self.img_data_array[self.indicator:end_indicater]\n self.batch_z_data = self.z_data[self.indicator:end_indicater]\n self.indicator = end_indicater\n return np.asarray(self.batch_z_data, np.float32),\\\n np.asarray(self.batch_data, np.float32)\n# 以上的部分用于获取数据\n# 一个卷积和一个反卷积。\n\n\ndef conv2d_tranpose(\n input,\n out_channel,\n name,\n training=True,\n with_bn_relu=False):\n with tf.name_scope(name):\n conv2d_trans = tf.layers.conv2d_transpose(\n input, out_channel, [\n 5, 5], strides=[\n 2, 2], padding='SAME')\n if with_bn_relu:\n bn = tf.layers.batch_normalization(conv2d_trans, training=training)\n return bn\n else:\n return conv2d_trans\n\n\ndef conv2d(input, out_channel, name, training=True):\n def leak_relu(x, rate=0.2, name=''):\n return tf.maximum(x, rate * x, name=name)\n with tf.variable_scope(name):\n conv2d_out = tf.layers.conv2d(input, out_channel, [5, 5], [2, 2],\n padding='SAME')\n bn = tf.layers.batch_normalization(\n conv2d_out, training=training) # 暂时没有理解training的意义。\n conv2d_data = leak_relu(bn, name='output')\n tf.summary.histogram('conv2d', conv2d_out)\n return conv2d_data\n\n# 一个生成器和一个判别器。\n\n\nclass generator(object): # 这里就将类变成了一个可调用对象。和函数的性质差不多。\n def __init__(self, init_conv_size, training=True):\n self.init_conv_size = init_conv_size\n self.g_conv_channel = [128, 64, 32, 1]\n self.d_conv_channel = [32, 64, 125, 256]\n self.training = training\n self.reuse = False\n\n def __call__(self, inputs):\n \"\"\"\"\n 生成器,一开始生成的是一个长度为4的向量。通过一个全连接层,转为初始卷积大小:【4,4,1】.\n #然后将初始的[4,4]的一堆卷积通过反卷积放大。其通道数的变化是g_conv_channel=[128,64,32,1]。\n 但是同时每经历一次反卷积,其size加倍。所以4*4-->8*8-->16*16-->32*32。所以最后输出的size是【32,32,1】的结构的。\n 卷积,每次处理完都会跟着一个批归一化。对于卷积是每一层后面都会跟着一个批归一化。\n \"\"\"\"\"\n input = tf.convert_to_tensor(inputs)\n with tf.variable_scope('generators', reuse=self.reuse):\n with tf.variable_scope('input_conv-1', reuse=self.reuse):\n fc = tf.layers.dense(input, self.g_conv_channel[0] *\n self.init_conv_size * self.init_conv_size)\n conv0 = tf.reshape(fc, [-1, self.init_conv_size,\n self.init_conv_size,\n self.g_conv_channel[0]])\n bn0 = tf.layers.batch_normalization(conv0)\n relu0 = tf.nn.relu(bn0)\n deconv = relu0\n for i in range(1, len(self.g_conv_channel)):\n with_bn_relu = (i != len(self.g_conv_channel) - 1)\n deconv = conv2d_tranpose(\n deconv, self.g_conv_channel[i], 'deconv%d' %\n i, self.training, with_bn_relu)\n img_input = deconv\n with tf.variable_scope('img'):\n img = tf.nn.tanh(img_input, 'img')\n self.reuse = True\n self.variable = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,\n scope='generators')\n return img_input\n\n\nclass discrim(object):\n def __init__(self):\n self.d_conv_channel = [32, 64, 125, 256]\n self.reuse = False\n\n def __call__(self, input):\n img_tensor = tf.convert_to_tensor(input)\n # 32*32的图片-->16*16-->8*8-->4*4-->2*2。展开之后是一行4列。\n with tf.variable_scope('discrim', reuse=self.reuse):\n for i in range(len(self.d_conv_channel)):\n img_tensor = conv2d(\n img_tensor,\n self.d_conv_channel[i],\n name='conv2d_data%d' %\n i)\n img_tensor = img_tensor\n with tf.variable_scope('fc', reuse=self.reuse):\n flat = tf.layers.flatten(img_tensor)\n logit = tf.layers.dense(flat, 2, name='logit')\n self.reuse = True\n self.variable = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,\n scope='discrim')\n return logit\n\n\n\"\"\"#将生成的数据和原始的数据开始做误差分析\"\"\"\n\n\nclass DCGAN_TM(object):\n def __init__(self):\n self.batch_size = 128\n self.z_size = 4\n self.img_size = 32\n self.init_conv2_size = 4\n self.generator = generator(self.init_conv2_size)\n self.discrim = discrim()\n pass\n\n def build(self):\n # build函数是用于建立一个误差的。输入是生成的【128,4】的一个向量,以及从数据集中取出来的【128,32,32,1】的图像。\n # 输出的是三组loss.\n self.z_placeholder = tf.placeholder(\n tf.float32, [self.batch_size, self.z_size])\n self.img_placeholder = tf.placeholder(tf.float32, [self.batch_size,\n self.img_size,\n self.img_size, 1])\n generator_img = self.generator(\n self.z_placeholder) # 就是这个占位符中的数据,拿去生成的数据。\n # 最后会生成一个[batch_size,32,32,1]的数据。然后把生成的假图像放到判别器中进行判别。\n fake_img_logit = self.discrim(generator_img)\n # 生成出来的图片进行判别之后会生成【batch_size,2的结构】\n real_img_logit = self.discrim(self.img_placeholder)\n # #损失函数的考虑:\n # #对生成器来说,生成的图片因该能让判别器判别为真。\n # #对判别器来说,对生成的图片应该判别为假。\n # #对于原来的图片应该判别为真。\n # #判别器的第一个loss,对真的图像应该判别为真。\n loss_on_real_to_real = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=tf.ones(\n self.batch_size,\n dtype=tf.int64),\n logits=real_img_logit))\n # #判别器的第二个loss,对假的图片判别为假。\n loss_on_fake_to_fake = tf.cast(\n tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=tf.zeros(\n self.batch_size,\n dtype=tf.int64),\n logits=fake_img_logit)),\n tf.float32)\n # 生成器的loss,让判别器判断出生成的图片为真。\n loss_on_fake_to_real = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=tf.ones(\n self.batch_size,\n tf.int64),\n logits=fake_img_logit))\n # 然后把这些loss进行打包,按照训练的步骤进行打包。\n tf.add_to_collection('g_loss', loss_on_fake_to_real)\n tf.add_to_collection('d_loss', loss_on_real_to_real)\n tf.add_to_collection('d_loss', loss_on_fake_to_fake)\n loss = {\n 'g': tf.add_n(\n tf.get_collection('g_loss'),\n name='g_total_loss'),\n 'd': tf.add_n(\n tf.get_collection('d_loss'),\n name='d_total_loss')}\n tf.summary.scalar('loss', loss['g'])\n\n return loss, generator_img, self.z_placeholder, self.img_placeholder\n\n def build_op(self, losses, learning_rate, beta1):\n g_opt = tf.train.AdamOptimizer(\n learning_rate=learning_rate, beta1=beta1)\n d_opt = tf.train.AdamOptimizer(\n learning_rate=learning_rate, beta1=beta1)\n\n g_opt_op = g_opt.minimize(\n losses['g'], var_list=self.generator.variable)\n d_opt_op = d_opt.minimize(losses['d'], var_list=self.discrim.variable)\n with tf.control_dependencies([g_opt_op, d_opt_op]):\n return tf.no_op(name='train')\n\n\ndef combie_show_imgs(img, img_size, rows=8, col=16):\n \"\"\"\"#把一个batch的小图片生成一张大图片。一共有8*16,128张图也就是batch_size\"\"\"\"\"\n big_img = []\n for i in range(rows):\n img_row = []\n for l in range(col):\n data = img[i * 8 + l]\n data = np.reshape(data, [img_size, img_size])\n data = (data + 1) * 127.5\n img_row.append(data)\n img_row = np.hstack(img_row)\n big_img = big_img.append(img_row)\n big_img = np.vstack(big_img)\n big_img = np.asarray(big_img, np.uint8) # np.uint8表示2^8.取0-255之间的数值。\n big_img = Image.fromarray(big_img)\n return big_img\n\n\n# 获取到了数据。z_data的数据用于生成图像,img——data的数据用于判断。\nDC = DCGAN(128)\n\nge = generator(4)\nsess = tf.Session()\n# ge(z_data)\nsess.run(tf.global_variables_initializer())\n# fake_img=sess.run(ge(z_data))\n# print(fake_img)\ndcgan = DCGAN_TM()\nlosses, img_gen, z_placeholder, img_placeholder = dcgan.build()\ntf.summary.image('gen_img', img_gen)\nsess.run(tf.global_variables_initializer())\ntrain_op = dcgan.build_op(losses, learning_rate=learning_rate, beta1=beta1)\nwrite_data = tf.summary.FileWriter('log_office', sess.graph)\n\nsess.run(tf.global_variables_initializer())\nfor i in range(10000):\n z_data, img_data = DC.next_batch()\n merged = tf.summary.merge_all()\n fetches = [train_op, losses['g'], losses['d']]\n should_sample = (i + 1) % 10 == 0\n if should_sample:\n fetches += [img_gen, merged]\n out_value, all = sess.run(\n fetches, {\n z_placeholder: z_data, img_placeholder: img_data})\n if should_sample:\n write_data.add_summary(all[-1], i + 1)\n print(all[1])\n if should_sample:\n plt.imshow((np.reshape(out_value[3][0], [32, 32]) + 1) * 127.5)\n plt.show()\n","sub_path":"cv/对抗DCGAN/对抗神经网络处理手写数字集.py","file_name":"对抗神经网络处理手写数字集.py","file_ext":"py","file_size_in_byte":12187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"406178151","text":"product = 1 # Init product as 1\nsumm = 0 # Init sum as 0\n\nfor x in xrange(100,0,-1): # Numbers from 100-1 inclusive, multiply product (get factorial)\n\tproduct *= x\n\nproduct = str(product) # Convert product to string\n\nfor x in xrange(0, len(product)): # For each digit in string, add to sum\n\tsumm += int(product[x:x+1])\n\nprint(summ) # Print the sum of the digits of 100 factorial","sub_path":"Python/Problem20.py","file_name":"Problem20.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"43592150","text":"#!/usr/bin/env python3\n\n#\n# Copyright 2022 Maciej Grela \n# SPDX-License-Identifier: WTFPL\n#\n\n## Category: Making the Internet of Things work for you\n## Shortdesc: Query a Fatek FBs PLC on Port 0 and publish input (X) as well as output (Y) state via MQTT\n\n# Reference: https://github.com/elshaka/fatek-serial\n# Reference: https://github.com/mh-mansouri/FatekPLC_for_LabView/blob/main/FATEK%20Communication%20Protocol.pdf\n\nimport structlog\nimport logging\nimport sys\nimport re\nimport argparse\nimport serial\nimport datetime as dt\nimport serial.tools.list_ports\nfrom types import SimpleNamespace\nimport json\nfrom urllib.parse import urlparse\nimport time\nimport socket\n\n# Reference: https://stackoverflow.com/a/49724281\nLOG_LEVEL_NAMES = [logging.getLevelName(v) for v in\n sorted(getattr(logging, '_levelToName', None) or logging._levelNames) if getattr(v, \"real\", 0)]\n\nlog = structlog.get_logger()\n\nconfig = SimpleNamespace(\n loglevel=\"INFO\",\n\n # Serial port device\n port=None,\n\n # Serial protocol retransmit parameters\n timeout=5,\n retransmit_count=5,\n\n # Station number (address)\n station_no='01',\n\n # Query period\n query_period=5, # Time between queries\n\n # MQTT broker hostname\n mqtt_broker=None,\n\n # MQTT topic\n mqtt_topic=\"fatek\",\n\n)\n\n\n# The Fatek FBs Port 0 protocol uses an LRC like the Modbus ASCII protocol\ndef _lrc(string):\n lrc = 0\n for char in string:\n lrc += ord(char)\n return hex(lrc & 0xff)[2:].upper()\n\n\ndef transaction(ser, command, **kwargs):\n data = kwargs.get('data', '')\n config = kwargs['config']\n\n data = data.replace(' ','')\n string = '\\x02' + config.station_no + command + data\n\n n = config.retransmit_count\n while n > 0:\n try:\n command = string + _lrc(string) + '\\x03'\n log.debug('sent to port0', command=command)\n ser.write(command.encode('ascii'))\n\n response = ser.read_until('\\x03').decode('ascii')[1:-3]\n log.debug('received from port0', response=response)\n if len(response) > 0:\n # TODO: Verify LRC and station number on received packets\n d = dict(\n station = response[0:2],\n command = response[2:4],\n error = response[4:5],\n data = response[5:]\n )\n\n if d['error'] != '0':\n log.error('error response', command=command, response=response)\n return None\n\n log.debug('parsed response', parsed=d)\n return d\n else:\n n -= 1\n log.warn('empty response', command=command, attempts_left=n)\n\n except serial.SerialTimeoutException as e:\n n -= 1\n log.warn('timeout', pkt=string, attempts_left=n)\n\n return None\n\n\ndef unpack_states(names, data):\n return zip(names, list(data))\n\n\ndef main_loop(config):\n \n log.info('using serial port', port=config.port)\n\n ser = serial.serial_for_url(config.port)\n ser.baudrate = 9600\n ser.bytesize = serial.SEVENBITS\n ser.parity = serial.PARITY_EVEN\n ser.stopbits = 1\n ser.timeout = config.timeout\n\n while True:\n\n input_count = 12\n input_names = [ f'X{i}' for i in range(0, input_count) ]\n\n # Read 0x0C inputs starting from X0000\n response = transaction(ser, command='44', config=config, data='0CX0000')\n if response:\n inputs = dict(unpack_states(input_names, response['data']))\n log.debug('input states', inputs=inputs)\n else:\n log.error('error reading inputs', response=response)\n\n output_count = 8\n output_names = [ f'Y{i}' for i in range(0, input_count) ]\n\n # Read 0x08 outputs starting from Y0000\n response = transaction(ser, command='44', config=config, data='08Y0000')\n if response:\n outputs = dict(unpack_states(output_names, response['data']))\n log.debug('output states', outputs=outputs)\n else:\n log.error('error reading outputs', response=response)\n\n payload = dict(inputs=dict(inputs), outputs=dict(outputs))\n if mqtt_client:\n for (name,v) in inputs.items():\n topic = f\"{config.mqtt_topic}/inputs/{name}\"\n log.debug('mqtt publish', topic=topic, payload=v)\n mqtt_client.publish(topic, qos=1, payload=str(v))\n for (name,v) in outputs.items():\n topic = f\"{config.mqtt_topic}/outputs/{name}\"\n log.debug('mqtt publish', topic=topic, payload=v)\n mqtt_client.publish(topic, qos=1, payload=str(v))\n\n\n time.sleep(config.query_period)\n\n return 1\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"Query a Fatek FBs PLC on Port 0 and publish input (X) as well as output (Y) state via MQTT\")\n parser.add_argument(\"--loglevel\", default=config.loglevel, help=\"Log level\")\n parser.add_argument(\"-p\", \"--port\", metavar=\"URL\", help=\"The serial port URL\")\n parser.add_argument(\"--mqtt-broker\", metavar=\"NAME\", help=\"Send data to specified MQTT broker URL\")\n parser.add_argument(\"--mqtt-topic\", metavar=\"TOPIC\", default=config.mqtt_topic, help=\"Set MQTT topic\")\n parser.add_argument(\"--mqtt-reconnect-delay\", metavar=\"MIN MAX\", nargs=2, type=int, help=\"Set MQTT client reconnect behaviour\")\n\n args = parser.parse_args()\n config.__dict__.update(vars(args))\n\n # Restrict log message to be above selected level\n structlog.configure( wrapper_class=structlog.make_filtering_bound_logger(getattr(logging, args.loglevel)) )\n\n if config.mqtt_broker:\n broker_url = urlparse(config.mqtt_broker)\n log.debug(\"MQTT URL {}\".format(broker_url))\n\n import paho.mqtt.client as mqtt\n import ssl\n\n mqtt_client = mqtt.Client()\n\n # TODO: How to attach structlog to paho?\n # mqtt_client.enable_logger(logger=log)\n\n if broker_url.scheme == 'mqtts':\n log.debug(\"Initializing MQTT TLS\")\n mqtt_client.tls_set(cert_reqs=ssl.CERT_NONE)\n mqtt_port = 8883\n else:\n mqtt_port = 1883\n\n if config.mqtt_reconnect_delay is not None:\n (_min_delay, _max_delay) = config.mqtt_reconnect_delay\n mqtt_client.reconnect_delay_set(min_delay=_min_delay, max_delay=_max_delay)\n\n try:\n log.info(\"Connecting to MQTT broker URL '{}'\".format(config.mqtt_broker))\n mqtt_client.connect(broker_url.netloc, port=mqtt_port)\n mqtt_client.loop_start()\n except:\n # Connection to broker failed\n log.error(\"Cannot connect to MQTT broker\", exc_info=True)\n sys.exit(1)\n\n else:\n mqtt_client = None\n\n log.debug('configuration dump', config=config)\n\n main_loop(config)\n","sub_path":"bin/fatek-flight-recorder.py","file_name":"fatek-flight-recorder.py","file_ext":"py","file_size_in_byte":6904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"310178513","text":"# -*- coding: utf-8 -*-\r\n#python 3有xgboots\r\n\r\n# 引入模块\r\n\r\nimport pandas as pd\r\n\r\nfrom xgboost import XGBRegressor\r\n# 读取数据\r\ntrain = pd.read_csv(\"data/train1.csv\")\r\ntest = pd.read_csv(\"data/test1.csv\")\r\nsubmit = pd.read_csv(\"data/sample_submit.csv\")\r\n\r\n# 删除id\r\ntrain.drop('id', axis=1, inplace=True)\r\ntrain.drop('city', axis=1, inplace=True)\r\ntest.drop('id', axis=1, inplace=True)\r\n\r\n# 取出训练集的y\r\ny_train = train.pop('y')\r\n\r\n\r\n\r\n# 建立一个默认的xgboost回归模型\r\nreg = XGBRegressor()\r\nreg.fit(train, y_train)\r\n\r\n#y_pred = reg.predict(test)\r\n\r\n\r\n# 输出预测结果至my_XGB_prediction.csv\r\n#submit['y'] = y_pred\r\n#submit.to_csv('data/my_XGB_prediction22.csv', index=False)\r\n\r\nfrom sklearn import metrics\r\nimport numpy as np\r\nrmse=np.sqrt(metrics.mean_squared_error(y_train, reg.predict(train)))\r\nprint (rmse)\r\n#18.5718185229\r\n","sub_path":"D A/python/回归预测/回归预测xgboots - dropcity.py","file_name":"回归预测xgboots - dropcity.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"544560061","text":"#!/usr/bin/python3\nimport unittest\nfrom python.common.baseunittest import BaseUnitTest\nfrom python.eapi.methods.users.user import User\nfrom python.eapi.methods.roles.role import Role\nfrom python.eapi.methods.teams.team import Team\nfrom python.eapi.methods.contacts.contact import Contact\nfrom python.eapi.methods.contactgroups.contactgroup import ContactGroup\nfrom python.eapi.methods.contactnotes.contactnote import ContactNote\nfrom python.eapi.methods.loans.loan import Loan\nfrom python.eapi.methods.loantypes.loantype import LoanType\nfrom python.eapi.methods.loanstatuses.loanstatus import LoanStatus\nfrom python.eapi.methods.loanprograms.loanprogram import LoanProgram\nfrom python.eapi.methods.loanpurposes.loanpurpose import LoanPurpose\n\n\nclass RunAllGets(BaseUnitTest):\n \"\"\"Performs tests on all GET endpoints.\"\"\"\n\n @BaseUnitTest.log_try_except\n def test_01_get_users(self):\n \"\"\"\n 1. Get multiple.\n \"\"\"\n the_user = User()\n the_user.get_multi()\n\n @BaseUnitTest.log_try_except\n def test_02_get_one_user(self):\n \"\"\"\n 1. Choose a random, existing.\n 2. Get only that one.\n \"\"\"\n the_user = User()\n random_user = the_user.choose_random()\n the_user.get_one(random_user)\n\n @BaseUnitTest.log_try_except\n def test_03_get_roles(self):\n \"\"\"\n 1. Get multiple.\n \"\"\"\n the_role = Role()\n the_role.get_multi()\n\n @BaseUnitTest.log_try_except\n def test_04_get_one_role(self):\n \"\"\"\n 1. Choose a random, existing.\n 2. Get only that one.\n \"\"\"\n the_role = Role()\n random_role = the_role.choose_random()\n the_role.get_one(random_role)\n\n @BaseUnitTest.log_try_except\n def test_05_get_teams(self):\n \"\"\"\n 1. Get multiple.\n \"\"\"\n the_team = Team()\n the_team.get_multi()\n\n @BaseUnitTest.log_try_except\n def test_06_get_one_team(self):\n \"\"\"\n 1. Choose a random, existing.\n 2. Get only that one.\n \"\"\"\n the_team = Team()\n random_team = the_team.choose_random()\n the_team.get_one(random_team)\n\n @BaseUnitTest.log_try_except\n def test_07_get_contacts(self):\n \"\"\"\n 1. Get multiple.\n \"\"\"\n the_contact = Contact()\n the_contact.get_multi()\n\n @BaseUnitTest.log_try_except\n def test_08_get_one_contact(self):\n \"\"\"\n 1. Choose a random, existing.\n 2. Get only that one.\n \"\"\"\n the_contact = Contact()\n random_contact = the_contact.choose_random()\n the_contact.get_one(random_contact)\n\n @BaseUnitTest.log_try_except\n def test_09_get_contact_groups(self):\n \"\"\"\n 1. Get multiple.\n \"\"\"\n the_cgroup = ContactGroup()\n the_cgroup.get_multi()\n\n @BaseUnitTest.log_try_except\n def test_10_get_one_contact_group(self):\n \"\"\"\n 1. Choose a random, existing.\n 2. Get only that one.\n \"\"\"\n the_cgroup = ContactGroup()\n random_cgroup = the_cgroup.choose_random()\n the_cgroup.get_one(random_cgroup)\n\n @BaseUnitTest.log_try_except\n def test_11_get_contact_notes(self):\n \"\"\"\n 1. Get multiple.\n \"\"\"\n the_cnote = ContactNote()\n the_cnote.get_multi()\n\n @BaseUnitTest.log_try_except\n def test_12_get_one_contact_note(self):\n \"\"\"\n 1. Choose a random, existing.\n 2. Get only that one.\n \"\"\"\n the_cnote = ContactNote()\n random_cnote = the_cnote.choose_random()\n the_cnote.get_one(random_cnote)\n\n @BaseUnitTest.log_try_except\n def test_13_get_loans(self):\n \"\"\"\n 1. Get multiple.\n \"\"\"\n the_loan = Loan()\n the_loan.get_multi()\n\n @BaseUnitTest.log_try_except\n def test_14_get_one_loan(self):\n \"\"\"\n 1. Choose a random, existing.\n 2. Get only that one.\n \"\"\"\n the_loan = Loan()\n random_loan = the_loan.choose_random()\n the_loan.get_one(random_loan)\n\n @BaseUnitTest.log_try_except\n def test_15_get_loan_types(self):\n \"\"\"\n 1. Get multiple..\n \"\"\"\n the_loantype = LoanType()\n the_loantype.get_multi()\n\n @BaseUnitTest.log_try_except\n def test_16_get_one_loan_type(self):\n \"\"\"\n 1. Choose a random, existing.\n 2. Get only that one.\n \"\"\"\n the_ltype = LoanType()\n random_loantype = the_ltype.choose_random()\n the_ltype.get_one(random_loantype)\n\n @BaseUnitTest.log_try_except\n def test_17_get_loan_statuses(self):\n \"\"\"\n 1. Get multiple.\n \"\"\"\n the_loanstat = LoanStatus()\n the_loanstat.get_multi()\n\n @BaseUnitTest.log_try_except\n def test_18_get_one_loan_status(self):\n \"\"\"\n 1. Choose a random, existing.\n 2. Get only that one.\n \"\"\"\n the_lstat = LoanStatus()\n random_loanstat = the_lstat.choose_random()\n the_lstat.get_one(random_loanstat)\n\n @BaseUnitTest.log_try_except\n def test_19_get_loan_programs(self):\n \"\"\"\n 1. Get multiple.\n \"\"\"\n the_loanprog = LoanProgram()\n the_loanprog.get_multi()\n\n @BaseUnitTest.log_try_except\n def test_20_get_one_loan_program(self):\n \"\"\"\n 1. Choose a random, existing.\n 2. Get only that one.\n \"\"\"\n the_lprog = LoanProgram()\n random_loanprog = the_lprog.choose_random()\n the_lprog.get_one(random_loanprog)\n\n @BaseUnitTest.log_try_except\n def test_21_get_loan_purposes(self):\n \"\"\"\n 1. Get multiple.\n \"\"\"\n the_loanpurp = LoanPurpose()\n the_loanpurp.get_multi()\n\n @BaseUnitTest.log_try_except\n def test_22_get_one_loan_purpose(self):\n \"\"\"\n 1. Choose a random, existing.\n 2. Get only that one.\n \"\"\"\n the_lpurp = LoanPurpose()\n random_loanpurp = the_lpurp.choose_random()\n the_lpurp.get_one(random_loanpurp)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"python/eapi/tests/test_all_gets.py","file_name":"test_all_gets.py","file_ext":"py","file_size_in_byte":6056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"653819563","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport boto,psycopg2\nfrom boto.s3.key import Key\nimport log,ctrl,util\n\nlogger = log.module_logger()\n\ndef s3conn():\n conn = boto.connect_s3()\n return conn\n\ndef get_bucket(conn,bucketname):\n bucket = conn.get_bucket(bucketname)\n return bucket\n\ndef set_contents(bucket,localfile):\n key = Key(bucket,localfile.name)\n res = key.set_contents_from_file(localfile)\n return res\n\ndef get_contents(bucket,filename,localpath):\n key = bucket.get_key(filename)\n if key is None:\n raise SystemError('{0} in s3 bucket {1} not found.'.format(filename,str(bucket)))\n filepath = os.path.join(localpath,filename)\n key.get_contents_to_filename(filepath)\n return os.path.isfile(filepath)\n \ndef rdsconn(config):\n conn = psycopg2.connect(\n host = config.get('db','host'),\n dbname = config.get('db','dbname'),\n database = config.get('db','database'),\n user = config.get('db','user'),\n password = config.get('db','password'),\n )\n commit_sql(conn,'set search_path to {0};'.format(config.get('db','schema')))\n return conn\n\ndef close_rdsconn(conn):\n conn.close()\n return conn\n\ndef fetch_sql(conn,sql):\n with conn:\n with conn.cursor() as cur:\n cur.execute(sql)\n return cur.fetchall()\n\ndef commit_sql(conn,sql):\n with conn:\n with conn.cursor() as cur:\n cur.execute(sql)\n return conn\n\ndef commit_ctrl_insert(conn,ctrl):\n basesql = \"insert into data_ctrl_mst(maker_id,data_type,data_date) values(%s,%s,%s);\"\n with conn:\n with conn.cursor() as cur:\n cur.execute(basesql,(ctrl.maker_id,ctrl.data_type,util.get_date(ctrl.data_date)))\n return conn\n\ndef commit_ctrl_update(conn,ctrl):\n basesql = \"update data_ctrl_mst set update_stamp = current_timestamp , status = %s where maker_id = %s and data_type = %s and data_date = %s;\"\n with conn:\n with conn.cursor() as cur:\n cur.execute(basesql,(ctrl.status, ctrl.maker_id,ctrl.data_type,util.get_date(ctrl.data_date)))\n return conn\n\ndef commit_ctrl_update_with_msg(conn,ctrl):\n basesql = \"update data_ctrl_mst set update_stamp = current_timestamp , status = %s, msg = %s where maker_id = %s and data_type = %s and data_date = %s;\"\n with conn:\n with conn.cursor() as cur:\n cur.execute(basesql,(ctrl.status,ctrl.msg, ctrl.maker_id,ctrl.data_type,util.get_date(ctrl.data_date)))\n return conn\n\n","sub_path":"tools/aws.py","file_name":"aws.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"438705303","text":"import logging\nimport falcon\n\nfrom userAds.models.comments import Advert\nfrom userAds.utils import deserializers, serializers\n\nlogger = logging.getLogger(__name__)\n\n\nclass userAdsModel(object):\n def __init__(self):\n pass\n\n @falcon.before(deserializers)\n @falcon.after(serializers)\n def on_post(self, request, result):\n data = request.params.get('data')\n # save to DB\n advert = Advert(**data)\n advert.save()\n result.body = {'status': 'Saved'}\n\n @falcon.before(deserializers)\n @falcon.after(serializers)\n def on_get(self, request, result):\n adverts = Advert.objects()\n result.body = {'items': adverts, 'count': len(adverts)}","sub_path":"userAds/controllers/mainCtrl.py","file_name":"mainCtrl.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"211714235","text":"#! /usr/bin/env python3\n\nfrom random import shuffle, randint\nfrom attack import *\n\nphases = [\"Pre-Battle\", \"Main\", \"Battle\"]\n\nclass Battle(object):\n def __init__(self, friendly, enemy, pvp=False):\n self.friendly = friendly # Friendly team\n self.enemy = enemy # Enemy team\n self.combatants = self.friendly + self.enemy # All combatants\n self.pvp = pvp # Player-versus-player mode\n # Friendly team wins\n def friendly_win(self):\n win = True\n for enemy in self.enemy:\n if enemy.hp > 0:\n win = False\n break\n return win\n # Enemy team wins\n def enemy_win(self):\n win = True\n for friendly in self.friendly:\n if friendly.hp > 0:\n win = False\n break\n return win\n # Choose attack and target\n def choose_attack_target(self, character):\n # Determine current character's team for convenience.\n if character in self.friendly:\n friendly_team = self.friendly\n enemy_team = self.enemy\n else:\n friendly_team = self.enemy\n enemy_team = self.friendly\n # Crappy AI for non PvP.\n if character in self.enemy and self.pvp == False:\n chosen_attack = all_attacks[\"Impossible Waste\"]\n while chosen_attack.cost > character.mp:\n chosen_attack = chosen_attack = character.attacks[randint(0, len(character.attacks)-1)]\n if chosen_attack.target == 2:\n chosen_targets = enemy_team\n elif chosen_attack.target == 3:\n chosen_targets = self.combatants\n elif chosen_attack.target == 4:\n chosen_targets = friendly_team\n elif chosen_attack.target == 0:\n chosen_targets = []\n else:\n chosen_target = None\n while chosen_target == None:\n chosen_target = enemy_team[randint(0, len(enemy_team)-1)]\n while chosen_target.hp < 1:\n chosen_target = enemy_team[randint(0, len(enemy_team)-1)]\n chosen_targets = [chosen_target]\n character.chosen_attack = chosen_attack\n character.chosen_targets = chosen_targets\n # Player's turn.\n else:\n # Print all the character's attacks.\n for attack in range(0, len(character.attacks)):\n the_attack = character.attacks[attack]\n print(attack, the_attack.name, \" \" * (20 - len(the_attack.name)), \"(\" + the_attack.element + \",\", str(the_attack.power), \"POW,\", str(the_attack.cost) + \" MP,\", str(character.critical_rate(the_attack)), \"CRI,\", (\"magical\" if (\"magic\" in str(the_attack.magical).lower() or the_attack.magical == True) else \"physical\") + (\" nuke\" if \"nuke\" in str(the_attack.magical).lower() else \"\") + \")\")\n\n # Set chosen_attack to None for a loop.\n chosen_attack = None\n\n # If the character can't attack, pick Hopeless Struggle.\n if character.can_attack() == False:\n chosen_attack = all_attacks[\"Hopeless Struggle\"]\n # Select an attack.\n while chosen_attack == None:\n try: index = int(input(\"Enter the number of your attack: \"))\n except: print(\"Error!\")\n else:\n if not len(character.attacks) < index:\n chosen_attack = character.attacks[index]\n if chosen_attack.cost > character.mp:\n print(\"Not enough MP!\")\n chosen_attack = None\n\n character.chosen_attack = chosen_attack\n # Print an extra line.\n print()\n\n # Assess target.\n if chosen_attack.target == 2: # Target = enemy team\n chosen_targets = enemy_team\n elif chosen_attack.target == 3: # Target = everyone\n chosen_targets = (self.combatants)\n elif chosen_attack.target == 4: # Target = friendly team\n chosen_targets = friendly_team\n elif chosen_attack.target == 0: # Target = self\n chosen_targets = []\n else: # Target = individual character\n for character2 in range(0, len(self.combatants)):\n character22 = self.combatants[character2]\n if character22.hp > 0:\n print(character2, character22.name, \" \" * (20 - len(character22.name)), \"(\" + str(character22.hp) + \"/\" + str(character22.maxHp) + \" HP,\", str(character22.mp) + \"/\" + str(character22.maxMp) + \" MP) (\" + (\"friendly\" if character22 in friendly_team else \"enemy\") + \")\")\n chosen_targets = None\n # Select a target\n while chosen_targets == None:\n try: index = int(input(\"Enter the number of your target: \"))\n except: print(\"Error!\")\n else:\n if not len(self.combatants) < index:\n chosen_targets = [self.combatants[index]]\n if [character] == chosen_targets:\n print(\"You cannot attack yourself!\")\n chosen_targets = None\n elif chosen_targets[0].hp < 1:\n print(chosen_targets[0].name, \"is already dead!\")\n chosen_targets = None\n character.chosen_targets = chosen_targets\n if chosen_attack.target == 1:\n print()\n\n # Start battle.\n def start(self):\n print(\"Combatants:\")\n for combatant in self.combatants:\n print(combatant.name)\n input()\n while not self.friendly_win() and not self.enemy_win(): # While battle isn't over\n for phase in range(0, 3): # Individually run through each character.\n shuffle(self.combatants)\n self.combatants.sort(key=lambda x: (x.get_priority(), x.speed()), reverse=True)\n print(\"It is now the\", phases[phase], \"Phase.\\n\")\n for character in self.combatants:\n if character.hp < 1: # KO'ed character can't fight.\n continue\n # Sort combatants by priority, then Speed stat\n # Determine current character's team for convenience.\n if character in self.friendly:\n friendly_team = self.friendly\n enemy_team = self.enemy\n else:\n friendly_team = self.enemy\n enemy_team = self.friendly\n if phase == 0:\n # Renewal skill restores 30% HP.\n if \"renewal\" in character.skill_properties():\n character.add_hp(character.maxHp * 0.3)\n if \"renewal-\" in character.skill_properties():\n character.add_hp(character.maxHp * 0.15)\n if \"regenerator\" in character.skill_properties():\n character.add_mp(character.maxMp * 0.3)\n if \"regenerator-\" in character.skill_properties():\n character.add_mp(character.maxMp * 0.15)\n elif phase == 1:\n # Print whose turn it is and stats.\n print(character.name, \"must make a move.\", character.name, \"is a\", character.element1 + \"/\" + character.element2 + \"-elemental with\", str(character.hp) + \"/\" + str(character.maxHp), \"HP and\", str(character.mp) + \"/\" + str(character.maxMp), \"MP remaining.\")\n print(\"Skills: \", end=\"\")\n skill_names = []\n for skill in character.skills:\n skill_names.append(skill.name)\n print(\", \".join(skill_names))\n print(\"Stats: \", end=\"\")\n for stat, value in character.stats.items():\n print(stat.title() + \":\", eval(\"character.\" + stat + \"()\"), end=\", \")\n print(\"Hit rate:\", character.hit_rate(), end=\", \")\n print(\"Avoid rate:\", character.avoid_rate(), end=\"\\n\\n\")\n self.choose_attack_target(character)\n elif phase == 2:\n # If the character can't attack, mention it.\n if character.can_attack() == False:\n print(character.name, \"has no viable moves left!\")\n\n # Names of chosen targets with HP left.\n chosen_targets_names = []\n for target in character.chosen_targets:\n if target.hp > 0:\n chosen_targets_names.append(target.name)\n\n # Print names of chosen targets.\n print(character.name, \"targeted\", \", \".join(chosen_targets_names), \"using\", character.chosen_attack.name + \"!\")\n\n # Declare your attack.\n character.declare_attack(character.chosen_attack, character.chosen_targets)\n\n # Print results.\n for ct in range(0, len(character.chosen_targets)):\n chosen_target = character.chosen_targets[ct]\n damage = chosen_target.last_damage\n if damage == None or damage == 0:\n continue\n elif damage == False and type(damage) == bool:\n print(character.name + \"'s attack missed\", chosen_target.name + \"!\")\n elif damage == -1:\n continue\n else:\n print(character.name, \"dealt\", damage, \"damage to\", chosen_target.name + \".\", chosen_target.name, \"is now at\", str(chosen_target.hp) + \"/\" + str(chosen_target.maxHp), \"HP.\")\n # Mention that the target was finished off.\n if chosen_target.hp < 1:\n print(chosen_target.name, \"can no longer battle and must retreat.\")\n input()\n # If someone won, break loop.\n if self.friendly_win() or self.enemy_win():\n break\n if self.friendly_win() or self.enemy_win():\n break\n if phase == 0:\n print()\n input(\"End of Phase.\\n\")\n if self.friendly_win() or self.enemy_win():\n break\n # Loop is over; print who won.\n print(\"You won:\", self.friendly_win())\n print(\"You lost:\", self.enemy_win())\n","sub_path":"battle.py","file_name":"battle.py","file_ext":"py","file_size_in_byte":10998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"647503956","text":"#!/usr/bin/env python3\n\"\"\"Yolo (You only look once)\"\"\"\nimport tensorflow.keras as K\nimport tensorflow as tf\nimport numpy as np\nimport glob\nimport cv2\n\n\nclass Yolo:\n \"\"\"Uses the Yolo v3 algorithm to perform object detection\"\"\"\n def __init__(self, model_path, classes_path, class_t, nms_t, anchors):\n \"\"\"Constructor method\n model: the Darknet Keras model\n class_names: a list of the class names for the model\n class_t: the box score threshold for the initial filtering step\n nms_t: the IOU threshold for non-max suppression\n anchors: the anchor boxes\n \"\"\"\n self.model = K.models.load_model(model_path)\n with open(classes_path, 'r') as f:\n self.class_names = f.read().splitlines()\n self.class_t = class_t\n self.nms_t = nms_t\n self.anchors = anchors\n\n def sigmoid(self, x):\n \"\"\"Returns sigmoid function\"\"\"\n return(1/(1 + np.exp(-x)))\n\n def process_outputs(self, outputs, image_size):\n \"\"\"Returns the processed boundary boxes for each output\"\"\"\n boxes, box_confidences, box_class_probs = [], [], []\n img_h, img_w = image_size\n for i in range(len(outputs)):\n # input sizes\n input_w = self.model.input_shape[1]\n input_h = self.model.input_shape[2]\n\n # Grid height, grid width and anchors boxes\n grid_h = outputs[i].shape[0]\n grid_w = outputs[i].shape[1]\n anchor_boxes = outputs[i].shape[2]\n\n # Predicted coordinates\n tx = outputs[i][..., 0]\n ty = outputs[i][..., 1]\n tw = outputs[i][..., 2]\n th = outputs[i][..., 3]\n\n # corner\n c = np.zeros((grid_h, grid_w, anchor_boxes))\n # indexes and top-left corner\n idx_y = np.arange(grid_h)\n idx_y = idx_y.reshape(grid_h, 1, 1)\n idx_x = np.arange(grid_w)\n idx_x = idx_x.reshape(1, grid_w, 1)\n cx = c + idx_x\n cy = c + idx_y\n\n # Anchors width and height\n # [116 156 373] [ 90 198 326]\n # [30 62 59] [ 61 45 119]\n # [10 16 33] [13 30 23]\n pw = self.anchors[i, :, 0]\n ph = self.anchors[i, :, 1]\n\n # Bounding box prediction\n bx = self.sigmoid(tx) + cx\n by = self.sigmoid(ty) + cy\n bw = pw * np.exp(tw)\n bh = ph * np.exp(th)\n\n # normalize bx and by values to the grid\n bx = bx / grid_w\n by = by / grid_h\n\n # normalize bw and bh values to the input sizes\n bw = bw / input_w\n bh = bh / input_h\n\n # get the corner coordinates\n bx1 = bx - bw / 2\n by1 = by - bh / 2\n bx2 = bx + bw / 2\n by2 = by + bh / 2\n\n # to image size scale\n outputs[i][..., 0] = bx1 * img_w\n outputs[i][..., 1] = by1 * img_h\n outputs[i][..., 2] = bx2 * img_w\n outputs[i][..., 3] = by2 * img_h\n\n # filtered bounding boxes\n boxes.append(outputs[i][..., 0:4])\n # objectiveness score between 0 and 1\n box_confidences.append(self.sigmoid(outputs[i][..., 4:5]))\n # probability of classes\n box_class_probs.append(self.sigmoid(outputs[i][..., 5:]))\n return(boxes, box_confidences, box_class_probs)\n\n def filter_boxes(self, boxes, box_confidences, box_class_probs):\n \"\"\"filter out boxes with low object score\"\"\"\n scores = []\n\n for i in range(len(boxes)):\n # Computing box scores\n scores.append(box_confidences[i] * box_class_probs[i])\n\n # filtering boxes\n filter_boxes = [box.reshape(-1, 4) for box in boxes]\n filter_boxes = np.concatenate(filter_boxes)\n\n # Finding the index of the class with maximum box score\n classes = [np.argmax(box, -1) for box in scores]\n classes = [box.reshape(-1) for box in classes]\n classes = np.concatenate(classes)\n\n # Getting the corresponding box score\n class_scores = [np.max(box, -1) for box in scores]\n class_scores = [box.reshape(-1) for box in class_scores]\n class_scores = np.concatenate(class_scores)\n\n filtering_mask = np.where(class_scores >= self.class_t)\n # Applying the mask to boxes, classes and scores\n filtered_boxes = filter_boxes[filtering_mask]\n box_classes = classes[filtering_mask]\n box_scores = class_scores[filtering_mask]\n\n return(filtered_boxes, box_classes, box_scores)\n\n def iou(self, filtered_boxes, scores):\n \"\"\"Returns the intersection over union result\"\"\"\n # grab the coordinates of the bounding boxes\n x1 = filtered_boxes[:, 0]\n y1 = filtered_boxes[:, 1]\n x2 = filtered_boxes[:, 2]\n y2 = filtered_boxes[:, 3]\n\n # compute the area of the bounding boxes and sort the bounding\n # boxes by the bottom-right y-coordinate of the bounding box\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n idxs = scores.argsort()[::-1]\n\n # initialize the list of picked indexes\n pick = []\n # keep looping while some indexes still remain in the indexes\n # list\n while idxs.size > 0:\n # grab the last index in the indexes list and add the\n # index value to the list of picked indexes\n i = idxs[0]\n pick.append(i)\n\n # find the largest (x, y) coordinates for the start of\n # the bounding box and the smallest (x, y) coordinates\n # for the end of the bounding box\n xx1 = np.maximum(x1[i], x1[idxs[1:]])\n yy1 = np.maximum(y1[i], y1[idxs[1:]])\n xx2 = np.minimum(x2[i], x2[idxs[1:]])\n yy2 = np.minimum(y2[i], y2[idxs[1:]])\n\n # compute the width and height of the bounding box\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n\n # compute the ratio of overlap\n inter = w * h\n union = area[i] + area[idxs[1:]] - inter\n overlap = inter / union\n\n # delete all indexes from the index list that have\n ind = np.where(overlap <= self.nms_t)[0]\n idxs = idxs[ind + 1]\n\n # return only the bounding boxes that were picked using the\n # integer data type\n return pick\n\n def non_max_suppression(self, filtered_boxes, box_classes, box_scores):\n \"\"\"non max suppression\"\"\"\n box_predictions = []\n predicted_box_classes, predicted_box_score = [], []\n u_classes = np.unique(box_classes)\n for cls in u_classes:\n idx = np.where(box_classes == cls)\n\n filters = filtered_boxes[idx]\n scores = box_scores[idx]\n classes = box_classes[idx]\n\n pick = self.iou(filters, scores)\n\n filters1 = filters[pick]\n scores1 = scores[pick]\n classes1 = classes[pick]\n\n box_predictions.append(filters1)\n predicted_box_classes.append(classes1)\n predicted_box_score.append(scores1)\n box_predictions = np.concatenate(box_predictions, axis=0)\n predicted_box_classes = np.concatenate(predicted_box_classes, axis=0)\n predicted_box_score = np.concatenate(predicted_box_score, axis=0)\n\n return (box_predictions, predicted_box_classes, predicted_box_score)\n\n @staticmethod\n def load_images(folder_path):\n \"\"\"load images\"\"\"\n images = []\n images_paths = []\n for filename in glob.glob(folder_path + '/*.jpg'):\n images.append(cv2.imread(filename))\n images_paths.append(filename)\n return(images, images_paths)\n\n def preprocess_images(self, images):\n \"\"\"Preprocess images\"\"\"\n rescaled, image_shapes = [], []\n input_w = self.model.input_shape[1]\n input_h = self.model.input_shape[2]\n\n for image in images:\n resized = cv2.resize(image, (input_w, input_h),\n interpolation=cv2.INTER_CUBIC)\n rescaled.append(resized / 255)\n image_shapes.append(image.shape[:2])\n\n pimages = np.array(rescaled)\n image_shapes = np.array(image_shapes)\n return(pimages, image_shapes)\n","sub_path":"supervised_learning/0x0A-object_detection/5-yolo.py","file_name":"5-yolo.py","file_ext":"py","file_size_in_byte":8393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"350597977","text":"from django.urls import path\nfrom . import views\n\napp_name = 'priming_leonie'\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('start', views.start, name='start'),\n path('end', views.end, name='end'),\n path('end_form', views.end_form, name='end_form'),\n path('questions', views.questions, name='questions'), # add a / ???\n path('store_results', views.store_results, name=\"store_results\"),\n path('store_form', views.store_form, name='store_form')\n]\n","sub_path":"MuCogni_App/priming_leonie/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"189102110","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 30 16:23:54 2020\n\n@author: Giusy Falcone (gfalcon2@illinois.edu)\n@copyright University of illinois at Urbana Champaign\n\"\"\"\n\nimport subprocess\nimport time\nimport config\nimport os\n\ndef MarsGram_online(model):\n # Change input file\n keyword_month = ' MONTH ='\n keyword_day = ' MDAY ='\n keyword_year = ' MYEAR ='\n keyword_hour = ' IHR ='\n keyword_min = ' IMIN ='\n keyword_sec = ' SEC ='\n keyword_points = ' NPOS ='\n keyword_inlat = ' FLAT ='\n keyword_inlon = ' FLON ='\n keyword_inhgt = ' FHGT ='\n keyword_deltime = ' DELTIME ='\n keyword_dellat = ' DELLAT ='\n keyword_dellon = ' DELLON ='\n keyword_delhgt = ' DELHGT ='\n keyword_numberMC = ' NMONTE ='\n keyword_seeds = ' NR1 ='\n\n file_object = model['Directory']+'Code/inputstd0.txt'\n\n lines = []\n altitude = str(model['Initial Altitude'])\n latitude = str(model['Initial Latitude'])\n longitude = str(model['Initial Longitude'])\n\n timereal = model['Time Real']\n year = str(timereal.year)\n month = str(timereal.month)\n day = str(timereal.day)\n\n hour = str(timereal.hour)\n minute = str(timereal.minute)\n sec = str(timereal.second)\n\n point_number = str(model['Number of Points'])\n delalt = str(model['Delta Altitude'])\n dellon = str(model['Delta Longitude'])\n dellat = str(model['Delta Latitude'])\n deltime = str(model['Delta t'])\n mc_number = str(model['Monte Carlo'])\n mc_seeds = str(int(config.index_MonteCarlo))\n\n with open(file_object) as infile:\n for line in infile:\n if keyword_month in line:\n index = line.find('=')\n old = line[index + 1:-1]\n line = line.replace(str(old), ' ' + month)\n #print(line)\n elif keyword_day in line:\n index = line.find('=')\n old = line[index + 1:-1]\n line = line.replace(str(old), ' ' + day)\n #print(line)\n elif keyword_year in line:\n index = line.find('=')\n old = line[index + 1:-1]\n line = line.replace(str(old), ' ' + year)\n #print(line)\n elif keyword_hour in line:\n index = line.find('=')\n old = line[index + 1:-1]\n line = line.replace(str(old), ' ' + hour)\n #print(line)\n elif keyword_min in line:\n index = line.find('=')\n old = line[index + 1:-1]\n line = line.replace(str(old), ' ' + minute)\n #print(line)\n elif keyword_sec in line:\n index = line.find('=')\n old = line[index + 1:-1]\n line = line.replace(str(old), ' ' + sec)\n #print(line)\n elif keyword_points in line:\n index = line.find('=')\n old = line[index + 1:-1]\n line = line.replace(str(old), ' ' + str(point_number))\n #print(line)\n elif keyword_inlat in line:\n index = line.find('=')\n old = line[index + 1:-1]\n line = line.replace(str(old), ' ' + latitude)\n #print(line)\n elif keyword_inlon in line:\n index = line.find('=')\n old = line[index + 1:-1]\n line = line.replace(str(old), ' ' + longitude)\n #print(line)\n elif keyword_inhgt in line:\n index = line.find('=')\n old = line[index + 1:-1]\n line = line.replace(str(old), ' ' + altitude)\n #print(line)\n elif keyword_delhgt in line:\n index = line.find('=')\n old = line[index + 1:-1]\n line = line.replace(str(old), ' ' + delalt)\n #print(line)\n elif keyword_dellon in line:\n index = line.find('=')\n old = line[index + 1:-1]\n line = line.replace(str(old), ' ' + dellon)\n #print(line)\n elif keyword_dellat in line:\n index = line.find('=')\n old = line[index + 1:-1]\n line = line.replace(str(old), ' ' + dellat)\n #print(line)\n elif keyword_deltime in line:\n index = line.find('=')\n old = line[index + 1:-1]\n line = line.replace(str(old), ' ' + deltime)\n #print(line)\n elif keyword_numberMC in line:\n index = line.find('=')\n old = line[index + 1:-1]\n line = line.replace(str(old), ' ' + mc_number)\n #print(line)\n elif keyword_seeds in line:\n index = line.find('=')\n old = line[index + 1:-1]\n line = line.replace(str(old), ' ' + mc_seeds)\n lines.append(line)\n\n outfile = open(file_object, 'w')\n outfile.writelines(lines)\n\n infile.close()\n outfile.close()\n\n start_time = time.time()\n marsgram_app = model['Directory']+'Code/marsgram_M10.x'\n output_dir = model['Directory']+'OUTPUT.txt'\n args = (marsgram_app , '-d' ,output_dir) # , \"-c\", \"somefile.xml\", \"-d\", \"text.txt\", \"-r\", \"aString\", \"-f\", \"anotherString\")\n\n\n popen = subprocess.Popen(args, stdout=subprocess.PIPE)\n output = popen.stdout.read()\n #print(\"--- MARSGram execution %s seconds ---\" % (time.time() - start_time))\n\n # Read Output File\n file_object = model['Directory']+'Code/OUTPUT.txt'\n\n\n data_list = []\n\n # Split Values\n with open(file_object) as fileobj:\n for line in fileobj:\n if line[5] == 'T':\n keys = line.split()\n else:\n data = line.split()\n data_list.append(data)\n\n # List of interesting values\n data_interesting = [[], [], [], [], [], [], [], [], [], [], []]\n\n # Creates lists\n for i in range(len(data_list)):\n for j in range(len(data_list[i])):\n if keys[j] == 'HgtMOLA' or keys[j] == 'HgtSFCM':\n data_interesting[0].append(float(data_list[i][j]))\n elif keys[j] == 'LatPC':\n data_interesting[1].append(float(data_list[i][j]))\n elif keys[j] == 'LonW':\n data_interesting[2].append(float(data_list[i][j]))\n elif keys[j] == 'Denkgm3':\n data_interesting[3].append(float(data_list[i][j]))\n elif keys[j] == 'Temp':\n data_interesting[4].append(float(data_list[i][j]))\n elif keys[j] == 'EWind':\n data_interesting[5].append(float(data_list[i][j]))\n elif keys[j] == 'EWTot':\n data_interesting[6].append(float(data_list[i][j]))\n elif keys[j] == 'NWind':\n data_interesting[7].append(float(data_list[i][j]))\n elif keys[j] == 'NWTot':\n data_interesting[8].append(float(data_list[i][j]))\n elif keys[j] == 'VWind':\n data_interesting[9].append(float(data_list[i][j]))\n elif keys[j] == 'DensP':\n data_interesting[10].append(float(data_list[i][j]))\n\n\n list_to_remove = ['Time', 'sigD', 'Ls', 'Dust', 'LTST', 'CO2%m', 'N2%m', 'Ar%m', 'O2%m', 'CO%m', 'O%m', 'He%m',\n 'H2%m', 'H%m', 'H2O%m', 'DensP']\n for key in list_to_remove:\n keys.remove(key)\n\n\n ## MONTECARLO\n if model['Monte Carlo'] == 1:\n density = [a*b for a,b in zip(data_interesting[3],data_interesting[10])]\n data_interesting[3] = density\n\n\n config.atmospheric_data = {keys[i]: data_interesting[i] for i in range(len(keys))}\n\n dirpath = os.getcwd()\n test = os.listdir(dirpath)\n\n for item in test:\n if item.endswith(\".txt\"):\n os.remove(os.path.join(dirpath, item))\n\n","sub_path":"physical_models/MARSGram.py","file_name":"MARSGram.py","file_ext":"py","file_size_in_byte":8007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"114015070","text":"import tensorflow as tf\nimport numpy as np\n\na_=np.inf\nprint(a_)\na=tf.constant([2,np.inf,9,np.nan],dtype=tf.float16)\nb=tf.is_inf(a)\nc=tf.is_nan(a)\n\nwith tf.Session() as sess:\n print(sess.run(b))\n print(sess.run(c))\n","sub_path":"TF_fun/TF_IsNan_IsInf.py","file_name":"TF_IsNan_IsInf.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"150555827","text":"import platform\n\nfrom setuptools import setup\n\nversion = \"0.21.0a0\"\ndescription = \"A modern Python 3 test framework for finding and fixing flaws faster.\"\nwith open(\"README.md\", \"r\") as fh:\n if platform.system() != \"Windows\":\n long_description = fh.read()\n else:\n long_description = description\n\nsetup(\n name=\"ward\",\n version=version,\n description=description,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"http://github.com/darrenburns/ward\",\n author=\"Darren Burns\",\n author_email=\"darrenb900@gmail.com\",\n license=\"MIT\",\n packages=[\"ward\"],\n python_requires=\">=3.6\",\n entry_points={\"console_scripts\": [\"ward=ward.run:run\"]},\n install_requires=[\n \"colorama==0.4.1\",\n \"termcolor==1.1.0\",\n \"dataclasses==0.6\",\n \"click==7.0\",\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"222984944","text":"# This module contains functions, which allow to read an xml file, and write in a text file\n\nfrom lxml import etree\nimport os.path\n\n\ndef read(file): # give the path of the file\n x = []\n y = []\n demand = [0]\n tree = etree.parse(\"\" + file)\n for abs in tree.xpath(\"/instance/network/nodes/node/cx\"):\n x.append((float(abs.text)))\n for ord in tree.xpath(\"/instance/network/nodes/node/cy\"):\n y.append((float(ord.text)))\n inst = [(x[i], y[i]) for i in range(len(x))]\n for dem in tree.xpath(\"/instance/requests/request/quantity\"):\n demand.append((float(dem.text)))\n for c in tree.xpath(\"/instance/fleet/vehicle_profile/capacity\"):\n C = float(c.text)\n return inst, demand, C\n\n\ndef writef(namefile, text):\n if not os.path.isfile(namefile):\n f = open(namefile, 'w')\n f.write(text + '\\n')\n f.close()\n else:\n f = open(namefile, 'a')\n f.write(text + '\\n')\n f.close()\n","sub_path":"Projet/Code/module_final/cvrp/ReadWrite.py","file_name":"ReadWrite.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"167211542","text":"# 123. 买卖股票的最佳时机 III\r\n# 给定一个数组,它的第 i 个元素是一支给定的股票在第 i 天的价格。\r\n#\r\n# 设计一个算法来计算你所能获取的最大利润。你最多可以完成 两笔 交易。\r\n#\r\n# 注意: 你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。\r\nclass Solution:\r\n # 57.74 61.85\r\n def maxProfit(self, prices) -> int:\r\n n = len(prices)\r\n if n < 2:\r\n return 0\r\n # dp[i][j] i代表买入次数1,2,j=0表示不持股,j=1表示持股\r\n dp = [[0]*2 for _ in range(3)]\r\n dp[1][1] = -prices[0]\r\n dp[2][1] = float('-inf')\r\n for i in range(n):\r\n dp[1][1] = max(dp[1][1],-prices[i])\r\n dp[1][0] = max(dp[1][0],dp[1][1]+prices[i])\r\n dp[2][1] = max(dp[2][1],dp[1][0]-prices[i])\r\n dp[2][0] = max(dp[2][0],dp[2][1]+prices[i])\r\n return dp[2][0]\r\n\r\n\r\n\r\n# 优秀解答\r\nclass Solution:\r\n def maxProfit(self, prices) -> int:\r\n fb, fs = -float(\"INF\"), 0\r\n sb, ss = -float(\"INF\"), 0\r\n for p in prices:\r\n if fb < -p:\r\n fb = -p\r\n if fs < fb + p:\r\n fs = fb + p\r\n if sb < fs - p:\r\n sb = fs - p\r\n if ss < sb + p:\r\n ss = sb + p\r\n return ss\r\n\r\ns=Solution()\r\nprint(s.maxProfit([3,3,5,0,0,3,1,4]))","sub_path":"pySrc/BestTimeToBuyAndSellStockIII.py","file_name":"BestTimeToBuyAndSellStockIII.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"573428361","text":"from sklearn.datasets import load_iris\n\niris = load_iris()\nprint(iris.data)\nprint(iris.target)\n\na = iris.target.copy()\nfor i in range(len(a)):\n if(a[i]!=0):\n a[i]=5\nprint(a)\n\nb = iris.target.copy()\nfor i in range(len(b)):\n if(b[i]!=1):\n b[i]=5\nprint(b)\n\nc = iris.target.copy()\nfor i in range(len(c)):\n if(c[i]!=2):\n c[i]=5\nprint(c)\n\n\nfrom sklearn.linear_model import LogisticRegression \nmodel1 = LogisticRegression(solver='liblinear', random_state=0) \nmodel2 = LogisticRegression(solver='liblinear', random_state=0) \nmodel3 = LogisticRegression(solver='liblinear', random_state=0) \n\nfrom sklearn.model_selection import train_test_split \nX_train, X_test, y_train, y_test = train_test_split(iris.data, a, test_size=0.20, random_state=42)\nmodel1.fit(X_train,y_train)\n\nfrom sklearn.model_selection import train_test_split \nX_train, X_test, y_train, y_test = train_test_split(iris.data, b, test_size=0.20, random_state=42)\nmodel2.fit(X_train,y_train)\n\nfrom sklearn.model_selection import train_test_split \nX_train, X_test, y_train, y_test = train_test_split(iris.data, c, test_size=0.20, random_state=42)\nmodel3.fit(X_train,y_train)\n\ny_result1 = model1.predict(X_test)\ny_result2 = model2.predict(X_test)\ny_result3 = model3.predict(X_test)\ny_result = []\n\nfor i in range(len(X_test)):\n if(y_result1[i]!=5):\n y_result.append(y_result1[i])\n elif(y_result2[i]!=5):\n y_result.append(y_result2[i])\n else:\n y_result.append(y_result3[i]) \n\nX_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.20, random_state=42)\n\nprint(y_test)\nprint(y_result)\n\nTotal_Predictions = len(y_test)\nTrue_Predictions = 0\nfor i in range(len(y_test)):\n if(y_test[i]== y_result[i]):\n True_Predictions +=1\nprint(\"Total Predictions:\", Total_Predictions)\nprint(\"True Predictions:\", True_Predictions)\n\nimport matplotlib.pyplot as plt\nplt.scatter(X_test[:,0],y_test,color='r')\nplt.scatter(X_test[:,0],y_result,color='g')\nplt.show()\nplt.scatter(X_test[:,1],y_test,color='r')\nplt.scatter(X_test[:,1],y_result,color='g')\nplt.show()\nplt.scatter(X_test[:,2],y_test,color='r')\nplt.scatter(X_test[:,2],y_result,color='g')\nplt.show()\nplt.scatter(X_test[:,3],y_test,color='r')\nplt.scatter(X_test[:,3],y_result,color='g')\nplt.show()\n\nmodel = LogisticRegression(multi_class='multinomial', solver='lbfgs', max_iter=1000)\nmodel.fit(X_train,y_train)\n\ny_res = model.predict(X_test)\nprint(y_res)\nprint(y_test)\n\nTrue_Predictions1 = 0\nfor i in range(len(y_test)):\n if(y_test[i]== y_res[i]):\n True_Predictions1 +=1\n\nprint(\"Total Predictions:\", Total_Predictions)\nprint(\"True Predictions:\", True_Predictions1)\n\nimport matplotlib.pyplot as plt\nplt.scatter(X_test[:,0],y_test,color='r')\nplt.scatter(X_test[:,0],y_res,color='g')\nplt.show()\nplt.scatter(X_test[:,1],y_test,color='r')\nplt.scatter(X_test[:,1],y_res,color='g')\nplt.show()\nplt.scatter(X_test[:,2],y_test,color='r')\nplt.scatter(X_test[:,2],y_res,color='g')\nplt.show()\nplt.scatter(X_test[:,3],y_test,color='r')\nplt.scatter(X_test[:,3],y_res,color='g')\nplt.show()\n\nimport numpy as np\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import cross_val_score\ncv = KFold(n_splits=4, random_state=1, shuffle=True)\nscores = cross_val_score(model, X_test, y_test, scoring='accuracy', cv=cv, n_jobs=-1)\nprint('Accuracy: %.3f (%.3f)' % (np.mean(scores), np.std(scores)))\n","sub_path":"ML/Assignment6.py","file_name":"Assignment6.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"213121852","text":"from itertools import chain, islice, tee, izip\n\n\ndef previous_and_next(some_iterable):\n \"\"\"Provide previous and next values while iterating a list.\n\n This is from: http://stackoverflow.com/a/1012089/64911\n\n This will allow you to lazily iterate a list such that as you iterate, you\n get a tuple containing the previous, current, and next value.\n \"\"\"\n prevs, items, nexts = tee(some_iterable, 3)\n prevs = chain([None], prevs)\n nexts = chain(islice(nexts, 1, None), [None])\n return izip(prevs, items, nexts)\n","sub_path":"juriscraper/lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"338353950","text":"import DBOperation\nimport common as cmm\n\nclass dateFilter():\n @classmethod\n def getAllRawData(self):\n cleanedTimeList = []\n dbo = DBOperation.DBOperation()\n try :\n dbo.fetchAllData('cost')\n (gridData,gridLabel) = cmm.getAndConvertCostData((\"\",\"\",\"\",))\n except :\n gridLabel = \"\"\n\n for item in gridLabel:\n #print \"item : >%s<\"%item\n cleanedTimeList.append(item[:item.rfind(' ')])\n\n cleanedTimeList = list(set(cleanedTimeList))\n #print \"cleanedTimeList : \",cleanedTimeList\n return cleanedTimeList\n\n @classmethod\n def getTimeList(self):\n timeList = self.getAllRawData()\n if 0 == len(timeList):\n return 0\n mainTimeDict = {}\n\n for item in timeList:\n year = item[:4]\n month = item[5:7]\n day = item[8:10]\n\n #print \"---------------------------------------------------\"\n #print \"year : \",year\n #print \"month : \",month\n #print \"day : \",day\n #print \"---------------------------------------------------\"\n\n if not mainTimeDict.has_key(year):\n #print \" : <%s> don't exist in list %s,creating...\"%(year,mainTimeDict)\n mainTimeDict[year]={}\n if not mainTimeDict[year].has_key(month):\n #print \" : <%s> don't exist in list %s, creating...\"%(month,mainTimeDict[year])\n mainTimeDict[year][month]=[]\n\n if day not in mainTimeDict[year][month]:\n #print \"key : <%s> not in list %s, append!\"%(day,mainTimeDict[year][month])\n mainTimeDict[year][month].append(day)\n #not recommended for this sort. but if data mount is not huge, it is ok.\n mainTimeDict[year][month].sort()\n else :\n #print \"key : <%s> is in list %s, abort!\"%(day,mainTimeDict[year][month])\n continue\n\n #print \"mainTimeDict : \",mainTimeDict\n return mainTimeDict\n\n\n#data = dateFilter.getAllRawData()\n#timeList = dateFilter.getTimeList(data)\n\n\n\n\n\n\n","sub_path":"wx_learning/firstBlood/dateFilter.py","file_name":"dateFilter.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"19813665","text":"# Eksamen ITGK 2014 besvarelse\r\n\r\n# ***** Oppgave 1 *****\r\n'''\r\n1. d\tR\r\n2. b\tR\r\n3. b\tR\r\n4. d\tR\r\n5. a\tR\r\n6. b\tR\r\n7. c\tR\r\n8. c\tR\r\n9. c\tR\r\n10. d\tR\r\n11. b\tR\r\n12. d\tR\r\n13. a\tR\r\n14. b\tR\r\n15. a\tR\r\n16. a\tR\r\n17. a\tR\r\n18. d\tR\r\n19. c\tR\r\n20. a\tR\r\n21. b\tR\r\n22. a\tR\r\n23. a\tR\r\n24. a\tR\r\n25. a\tR\r\n\r\n25/25\r\n'''\r\n\r\n# ***** Oppgave 2 *****\r\n# a)\r\ndef inputPerson():\r\n\tnavn = input('Name: ')\r\n\tID = input('ID: ')\r\n\tvekt = int(input('Weight: '))\r\n\tsize = int(input('Size: '))\r\n\treturn [navn, ID, vekt, size]\r\n\r\n# b)\r\ndef readDbFile(filename):\r\n\tdb = []\r\n\twith open(filename, 'r') as f:\r\n\t\tfor line in f:\r\n\t\t\tline = line.split(';')\r\n\t\t\tentry = [line[0], line[1], int(line[2]), int(line[3])]\r\n\t\t\tdb.append(entry)\r\n\treturn db\r\n\r\n# c)\r\ndef printMembersList(db):\r\n\tCOL1 = 15\r\n\tCOL2 = 9\r\n\tCOL3 = 5\r\n\tCOL4 = 4\r\n\tprint('Navn'.ljust(COL1), 'ID-NR'.ljust(COL2), 'VEKT kg.'.rjust(COL3+4), 'SKJERMSTORLEIK'.rjust(COL4))\r\n\tfor line in db:\r\n\t\tprint(line[0].ljust(COL1), line[1].ljust(COL2), str(line[2]).rjust(COL3)+' kg', str(line[3]).rjust(COL4)+'kvadratfot')\r\n\r\n# d)\r\ndef addPerson(filename):\r\n\tperson = inputPerson()\r\n\tdb = readDbFile(filename)\r\n\tdb.append(person)\r\n\twith open(filename, 'a') as f:\r\n\t\tf.write(person[0]+';'+person[1]+';'+person[2]+';'+person[3]+'\\n')\r\n\treturn db\r\n\r\n# e)\r\ndef feet2seconds(feet):\r\n\tif feet<=3000:\r\n\t\treturn 0\r\n\telif feet <= 4000:\r\n\t\treturn (feet-3000)/100\r\n\telse:\r\n\t\treturn 10 + (feet-4000)/200\r\n\r\n# 22/25\r\n\r\n# ***** Oppgave 3 *****\r\n# a)\r\ndef weatherStats(weatherData):\r\n\tmax_entry = (-float('inf'), None)\r\n\tmin_entry = (float('inf'), None)\r\n\train = 0\r\n\tfor i in range(len(weatherData)):\r\n\t\tif weatherData[i][0]>max_entry[0]:\r\n\t\t\tmax_entry = (weatherData[i][0], i+1)\r\n\t\tif weatherData[i][1]= avg:\r\n\t\t\tcoldest = (avg, i+1)\r\n\treturn coldest[1]\r\n\r\n# c)\r\ndef addNewDay(extraData, weatherData): \r\n\textraData.split(', ')\r\n\t#finner høyeste temp\r\n\tmaxTemp = int(extraData[0].strip('max='))\r\n\tminTemp = int(extraData[1].strip('min='))\r\n\train\t= int(extraData[2].strip('mm'))\r\n\tweatherData.append([maxTemp, minTemp, rain])\r\n\treturn weatherData\r\n\r\n# 25/30\r\n\r\n# ****** Oppgave 4 ******\r\n'''\r\na) \t14\r\n\tFunksjonen ganger sammen tall på motsatt side av listen og summerer alle produktene. \r\nb) \t[1,0,1,0]\r\n\t[0,1,0,1]\r\n\t[1,0,1,0]\r\n\t[0,1,0,1]\r\n\tLager et sjakkrutemønster med 0 og 1 som er W*W stort\r\nc) 'ROSENBORG'\r\n\tFunksjonen setter sammen hver tredje indeks (fra 0-te) til én sammenhengende streng. \r\nd)\tFeil 1 skjer i linje 14 hvor det burde stå; return parentheses_list==[]\r\n\tFeil 2 skjer fordi koden ikke sjekker rekkefølge på paranteser, kun forekomster. For\r\n\teksempel etter en ( må det komme en ny åpende parantes eller en ), slik at parantesen lukkes\r\n\tfør noen nye åpnes. Hvis koden oppdager en lukkende parantes må den sjekke at den svarer til \r\n\tforrige åpnende parantes. Feilen kan rettes i linje 10 ved å heller skrive\r\n\t\tif char != parentheses_list[~0]: \r\n\tog i linje 13 hvor den må fjerne siste element i listen\r\n\t\tparentheses_list.pop()\r\n\r\n18/20\r\n'''","sub_path":"tdt4110/Eksamenstrening/høst2014.py","file_name":"høst2014.py","file_ext":"py","file_size_in_byte":3582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"290624942","text":"import codecs\nimport requests\nimport time\nimport sys\n\nfrom bs4 import BeautifulSoup\nfrom django.core.management import BaseCommand\nfrom mastermind.models import Player\nfrom mastermind.models import Bms\nfrom mastermind.models import Score\n\nclass Command(BaseCommand):\n help = 'BMSリストのスコアデータを取得、データベース登録'\n\n def handle(self, *args, **options):\n #bms_list取得\n bms_list = []\n b = Bms.objects.all()\n for bms in b:\n bms_list.append([bms.bms_id, bms.players])\n\n #player_id_list取得\n player_id_list = []\n p = Player.objects.all()\n for player in p:\n player_id_list.append(player.player_id)\n\n #スコアデータの取得\n for bms_data in bms_list:\n bms_id = int(bms_data[0])\n players = int(bms_data[1])\n\n pages = -(-players // 100)\n for i in range(1, pages + 1):\n target_url = f'http://www.dream-pro.info/~lavalse/LR2IR/search.cgi?mode=ranking&page={i}&bmsid={bms_id}'\n score_list = self.scrape(target_url, player_id_list)\n self.update_database(bms_id, score_list)\n\n #正常終了\n print('ok')\n sys.exit(0)\n\n @staticmethod\n def scrape(url: str, player_id_list: list) -> list:\n \"\"\"\n player_id_listに載っている人のスコアデータを取得\n :param url:\n :param player_id_list:\n :return: score list\n \"\"\"\n time.sleep(1)\n try:\n resp = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})\n except:\n resp.raise_for_status()\n sys.exit(1)\n\n content_type_encoding = resp.encoding if resp.encoding != 'ISO-8859-1' else None\n soup = BeautifulSoup(resp.content, 'html.parser', from_encoding=content_type_encoding)\n table = soup.find_all('table')[3]\n rows = table.find_all('tr')\n\n #指定プレイヤーIDのスコアデータ抽出\n score_data = []\n for row in rows:\n id_cell = row.find(['a'])\n if id_cell:\n player_id = id_cell.get('href').replace('search.cgi?mode=mypage&playerid=', '')\n if player_id not in player_id_list:\n continue\n else:\n score_data.append(player_id)\n for cell in row.find_all(['td', 'th'], attrs={'class': ''}):\n score_data.append(cell.get_text())\n\n #抽出したスコアデータを整形\n del score_data[0:17]\n score_list = [score_data[i:i+18] for i in range(0, len(score_data), 18)]\n\n return score_list\n\n @staticmethod\n def update_database(bms_id: str, score_list: list):\n \"\"\"\n database 登録\n :param bms_id:\n :param score_list:\n \"\"\"\n for score in score_list:\n tmp = score[6].split('/')\n Score.objects.filter(bms_id=bms_id, player_id=score[0]).update_or_create(defaults={\n 'player_id':score[0],'bms_id':bms_id, 'player_name':score[2], 'clear_type':score[4],\n 'score_rank':score[5], 'score_rate':score[6], 'combo':score[7], 'bp':score[8],\n 'pg':score[9], 'gr':score[10], 'gd':score[11], 'bd':score[12], 'pr':score[13],\n 'score':int(tmp[0])\n })\n","sub_path":"mastermind/management/commands/lr2ir.py","file_name":"lr2ir.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"620738275","text":"__author__ = 'yossiadi'\n\nimport unittest\nimport numpy as np\nfrom lib import utility as utils\n\n\nclass TestUtils(unittest.TestCase):\n\n def test_concat_train(self):\n frame_concat = 4\n x = np.random.rand(10, 100, 39)\n y = np.zeros([10, 100])\n x_n, y_n = utils.concatenate_x_frames(x, y, frame_concat)\n\n # check lengths\n self.assertEqual(len(x), len(x_n))\n self.assertEqual(len(y), len(y_n))\n for i in range(len(x)):\n self.assertEqual(len(x[i]), len(x_n[i]) + 2*frame_concat)\n for i in range(len(y)):\n self.assertEqual(len(y[i]), len(y_n[i]) + 2*frame_concat)\n\n # check values\n for i in range(len(x)):\n for j in range(len(x[i]) - frame_concat*2):\n for k in range(len(x_n[i][j])):\n l = k % 39\n t = k / 39\n self.assertEqual(x[i][j+t][l], x_n[i][j][k])\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"tests/TestUtils.py","file_name":"TestUtils.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"80056884","text":"from allianceauth.services.modules.discord.models import DiscordUser\nfrom discord.ext import commands\nimport functools\nimport os\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\n# i dont want to do this, but the below object get wont work without it, investigate.\nos.environ[\"DJANGO_ALLOW_ASYNC_UNSAFE\"] = \"true\"\n\n\ndef sender_has_perm(perm):\n def predicate(ctx):\n id = ctx.message.author.id\n try:\n has_perm = DiscordUser.objects.get(uid=id).user.has_perm(perm)\n if has_perm:\n return True\n else:\n raise commands.MissingPermissions([\"auth_roles\"])\n except Exception as e:\n logger.error(e)\n raise commands.MissingPermissions([\"not_linked\"])\n return commands.check(predicate)\n","sub_path":"aadiscordbot/cogs/utils/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"642082922","text":"import gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\n\nfrom tower import Tower\n\nclass Player:\n\n def __init__(self):\n self.pos_x = 0\n self.pos_y = 0\n self.budget = 10\n\n self.tower = -1\n self.board = -1\n\n self.sidebar = -1\n\n self.game_is_running = True\n self.waves = 0\n\n def move(self, dx, dy):\n if self.board.inside(self.pos_x + dx, self.pos_y + dy) == False:\n return False\n if self.board[self.pos_x + dx][self.pos_y + dy].Type != \"player\":\n return False\n \n self.range(False)\n\n old_square = self.board[self.pos_x][self.pos_y]\n if old_square.Occupant == self.tower:\n old_square.Occupant = -1\n old_square.update(1, 1)\n\n self.pos_x += dx\n self.pos_y += dy\n\n new_square = self.board[self.pos_x][self.pos_y]\n if new_square.Occupant == -1:\n new_square.Occupant = self.tower\n new_square.update(1, 1)\n\n self.range(True)\n\n return True\n\n def range(self, state):\n if self.tower != -1:\n for ii in range(-self.tower.rang, self.tower.rang+1):\n for jj in range(-self.tower.rang, self.tower.rang+1):\n if abs(ii) + abs(jj) <= self.tower.rang:\n if self.board.inside(self.pos_x + ii, self.pos_y + jj):\n square = self.board[self.pos_x + ii][self.pos_y + jj]\n square.range = state\n square.update(1, 1)\n","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"74128749","text":"# Hezekiah's Code Jam Solution\n# Usage: python3 codejam.py < file.in > file.out\n\nimport math\n\ndef more(line):\n\treturn 0\n\ndef palindromes(minimum, maximum):\n\tif minimum <= 1:\n\t\tif minimum == 0:\n\t\t\tyield 0\n\t\tyield 1\n\t\tminimum = 2\n\tfor length in range(math.ceil(math.log10(minimum)), math.ceil(math.log10(maximum)) + 1):\n\t\thalf = length // 2\n\t\tif half == 0:\n\t\t\tfor palindrome in range(2, 10):\n\t\t\t\tif palindrome >= minimum and palindrome <= maximum:\n\t\t\t\t\tyield palindrome\n\t\telse:\n\t\t\tfor number in range(10 ** (half - 1), 10 ** half):\n\t\t\t\tstring = str(number)\n\t\t\t\tif half * 2 == length:\n\t\t\t\t\tpalindrome = int(string + string[::-1])\n\t\t\t\t\tif palindrome >= minimum and palindrome <= maximum:\n\t\t\t\t\t\tyield palindrome\n\t\t\t\telse:\n\t\t\t\t\tfor digit in [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]:\n\t\t\t\t\t\tpalindrome = int(string + digit + string[::-1])\n\t\t\t\t\t\tif palindrome >= minimum and palindrome <= maximum:\n\t\t\t\t\t\t\tyield palindrome\n\ndef solve(lines):\n\tab = lines[0].split()\n\tcount = 0\n\tfor palindrome in palindromes(math.ceil(math.sqrt(int(ab[0]))), math.floor(math.sqrt(int(ab[1])))):\n\t\tstring = str(palindrome * palindrome)\n\t\tif string == string[::-1]:\n\t\t\tcount += 1\n\treturn str(count)\n\ndef main():\n\tcases = int(input())\n\tfor case in range(1, cases + 1):\n\t\tlines = [input()]\n\t\tfor line in range(0, more(lines[0])):\n\t\t\tlines.append(input())\n\t\tprint(\"Case #\" + str(case) + \": \" + solve(lines))\n\nmain()\n","sub_path":"solutions_2463486_0/Python/ulrikdem/codejam.py","file_name":"codejam.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"552040414","text":"import argparse\nimport json\nimport os\nimport sys\nimport unicodedata\nfrom pathlib import Path\n\nfrom nltk.tokenize import word_tokenize, sent_tokenize\n\nVOID, TITLE, ABSTRACT = 0, 1, 2\n\n\ndef is_article_beginning(line):\n return line.startswith(\" p2_num_wins:\n print(f'P1 wins with {p1_num_wins}')\nelse:\n print(f'P2 wins with {p2_num_wins}')","sub_path":"py3bc/10_loops_01.py","file_name":"10_loops_01.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"356873981","text":"import xija\nimport os\nfrom astropy.units import Quantity\nfrom astropy.io import ascii\nfrom acispy.dataset import Dataset\nfrom acispy.plots import DatePlot\nimport numpy as np\nfrom Chandra.Time import secs2date, DateTime, date2secs\nfrom acispy.states import States\nfrom acispy.model import Model\nfrom acispy.msids import MSIDs\nfrom acispy.time_series import EmptyTimeSeries\nfrom acispy.utils import mylog, \\\n get_time, ensure_list, plotdate2cxctime\nimport Ska.Numpy\nimport Ska.engarchive.fetch_sci as fetch\nfrom chandra_models import get_xija_model_file\nimport matplotlib.pyplot as plt\nfrom kadi import events\nimport importlib\nfrom matplotlib import font_manager\n\nshort_name = {\"1deamzt\": \"dea\",\n \"1dpamzt\": \"dpa\",\n \"1pdeaat\": \"psmc\",\n \"fptemp_11\": \"acisfp\",\n \"tmp_fep1_mong\": \"fep1_mong\",\n \"tmp_fep1_actel\": \"fep1_actel\",\n \"tmp_fep1_fb\": \"fep1_fb\",\n \"tmp_bep_pcb\": \"bep_pcb\"}\n\nshort_name_rev = {v: k for k, v in short_name.items()}\n\nfull_name = {\"1deamzt\": \"DEA\",\n \"1dpamzt\": \"DPA\",\n \"1pdeaat\": \"PSMC\",\n \"fptemp_11\": \"Focal Plane\",\n \"tmp_fep1_mong\": \"FEP1 Mongoose\",\n \"tmp_fep1_actel\": \"FEP1 Actel\",\n \"tmp_fep1_fb\": \"FEP1 FB\",\n \"tmp_bep_pcb\": \"BEP PCB\"}\n\nlimits = {'1deamzt': 36.5,\n '1dpamzt': 37.5,\n '1pdeaat': 52.5,\n 'tmp_fep1_mong': 47.0,\n 'tmp_fep1_actel': 46.0,\n 'tmp_bep_pcb': 43.0,\n 'tmp_fep1_fb': 41.0,\n 'fptemp_11': {\"ACIS-I\": -112.0, \"ACIS-S\": -111.0}}\n\nlow_limits = {\n 'tmp_fep1_mong': 2.0,\n 'tmp_fep1_actel': 2.0,\n 'tmp_fep1_fb': 2.0,\n 'tmp_bep_pcb': 4.5\n}\n\nmargins = {'1deamzt': 2.0,\n '1dpamzt': 2.0,\n '1pdeaat': 4.5,\n 'tmp_fep1_mong': 2.0,\n 'tmp_fep1_actel': 2.0,\n 'tmp_fep1_fb': 2.0,\n 'tmp_bep_pcb': 2.0}\n\nmodel_classes = {\n \"dpa\": \"DPACheck\",\n \"dea\": \"DEACheck\",\n \"psmc\": \"PSMCCheck\",\n \"acisfp\": \"ACISFPCheck\",\n \"fep1_mong\": \"FEP1MongCheck\",\n \"fep1_actel\": \"FEP1ActelCheck\",\n \"bep_pcb\": \"BEPPCBCheck\"\n}\n\n\ndef find_json(name, model_spec):\n if model_spec is None:\n name = short_name[name]\n model_spec = get_xija_model_file(name)\n elif not os.path.exists(model_spec):\n raise IOError(\"The JSON file %s does not exist!\" % model_spec)\n return model_spec\n\n\nclass ModelDataset(Dataset):\n def __init__(self, msids, states, model):\n super(ModelDataset, self).__init__(msids, states, model)\n\n def write_model(self, filename, overwrite=False):\n \"\"\"\n Write the model data vs. time to an ASCII text file.\n\n Parameters\n ----------\n filename : string\n The filename to write the data to.\n overwrite : boolean, optional\n If True, an existing file with the same name will be overwritten.\n \"\"\"\n if os.path.exists(filename) and not overwrite:\n raise IOError(\"File %s already exists, but overwrite=False!\" % filename)\n names = []\n arrays = []\n for i, msid in enumerate(self.model.keys()):\n if i == 0:\n times = self.times(\"model\", msid).value\n dates = self.dates(\"model\", msid)\n names += ['time', 'date']\n arrays += [times, dates]\n names.append(msid)\n arrays.append(self[\"model\", msid].value)\n temp_array = np.rec.fromarrays(arrays, names=names)\n fmt = {(name, '%.2f') for name in names if name != \"date\"}\n out = open(filename, 'w')\n Ska.Numpy.pprint(temp_array, fmt, out)\n out.close()\n\n def write_model_and_data(self, filename, overwrite=False, \n mask_radzones=False, mask_fmt1=False,\n mask_badtimes=True, tstart=None,\n tstop=None):\n \"\"\"\n Write the model, telemetry, and states data vs. time to\n an ASCII text file. The state data is interpolated to the\n times of the model so that everything is at a common set\n of times.\n\n Parameters\n ----------\n filename : string\n The filename to write the data to.\n overwrite : boolean, optional\n If True, an existing file with the same name will be overwritten.\n \"\"\"\n states_to_map = [\"vid_board\", \"pitch\", \"clocking\", \"simpos\",\n \"ccd_count\", \"fep_count\", \"off_nom_roll\"]\n out = []\n for i, msid in enumerate(self.model.keys()):\n if i == 0:\n if self.states._is_empty:\n out += [(\"model\", state) for state in states_to_map]\n else:\n for state in states_to_map:\n self.map_state_to_msid(state, msid)\n out.append((\"msids\", state))\n out.append((\"model\", msid))\n if (\"msids\", msid) in self.field_list:\n self.add_diff_data_model_field(msid)\n out += [(\"msids\", msid), (\"model\", \"diff_%s\" % msid)]\n msid = list(self.model.keys())[0]\n telem = self[\"msids\", msid]\n mask = np.ones_like(telem.value, dtype='bool')\n if tstart is not None:\n tstart = DateTime(tstart).secs\n mask[telem.times.value < tstart] = False\n if tstop is not None:\n tstop = DateTime(tstop).secs\n mask[telem.times.value > tstop] = False\n if mask_radzones:\n rad_zones = events.rad_zones.filter(start=telem.dates[0],\n stop=telem.dates[-1])\n for rz in rad_zones:\n idxs = np.logical_and(telem.times.value >= rz.tstart,\n telem.times.value <= rz.tstop)\n mask[idxs] = False\n if mask_fmt1:\n which = self[\"msids\", \"ccsdstmf\"] == \"FMT1\"\n mask[which] = False\n self.write_msids(filename, out, overwrite=overwrite, mask=mask)\n\n def _get_msids(self, model, comps, tl_file):\n comps = [comp.lower() for comp in comps]\n times = model[comps[0]].times.value\n tstart = secs2date(times[0] - 700.0)\n tstop = secs2date(times[-1] + 700.0)\n if tl_file is not None:\n msids = MSIDs.from_tracelog(tl_file, tbegin=tstart, tend=tstop)\n else:\n if \"earth_solid_angle\" in comps:\n comps.remove(\"earth_solid_angle\")\n comps.append(\"ccsdstmf\")\n msids = MSIDs.from_database(comps, tstart, tstop=tstop, filter_bad=True,\n interpolate='nearest', interpolate_times=times)\n if msids[comps[0]].times.size != times.size:\n raise RuntimeError(\"Lengths of time arrays for model data and MSIDs \"\n \"do not match. You probably ran a model past the \"\n \"end date in the engineering archive!\")\n return msids\n\n def make_dashboard_plots(self, msid, tstart=None, tstop=None, yplotlimits=None,\n errorplotlimits=None, fig=None, figfile=None,\n bad_times=None, mask_radzones=False, plot_limits=True, \n mask_fmt1=False):\n \"\"\"\n Make dashboard plots for the particular thermal model.\n\n Parameters\n ----------\n msid : string\n The MSID name to plot in the dashboard. \n tstart : string, optional\n The start time of the data for the dashboard plot. If not specified,\n the beginning of the thermal model run is used.\n tstop : string, optional\n The stop time of the data for the dashboard plot. If not specified,\n the end of the thermal model run is used.\n yplotlimits : two-element array_like, optional\n The (min, max) bounds on the temperature to use for the\n temperature vs. time plot. Default: Determine the min/max\n bounds from the telemetry and model prediction and\n decrease/increase by degrees to determine the plot limits.\n errorplotlimits : two-element array_like, optional\n The (min, max) error bounds to use for the error plot.\n Default: [-15, 15]\n fig : :class:`~matplotlib.figure.Figure`, optional\n A Figure instance to plot in. Default: None, one will be\n created if not provided.\n figfile : string, optional\n The file to write the dashboard plot to. One will be created\n if not provided.\n bad_times : list of tuples, optional\n Provide a set of times to exclude from the creation of the\n dashboard plot.\n mask_radzones : boolean, optional\n If True, mask out radzone periods for dashboard plots of the\n focal plane model. Default: False\n plot_limits : boolean, optional\n If True, plot the yellow caution and planning limits on the\n dashboard plots. Default: True\n \"\"\"\n from xijafit import dashboard as dash\n if fig is None:\n fig = plt.figure(figsize=(20,10))\n if (\"msids\", msid) not in self.field_list:\n raise RuntimeError(\"You must include the real data if you want to make a \"\n \"dashboard plot! Set get_msids=True when creating the\"\n \"thermal model!\")\n telem = self[\"msids\", msid]\n pred = self[\"model\", msid]\n mask = np.logical_and(telem.mask, pred.mask)\n if tstart is not None:\n tstart = DateTime(tstart).secs\n mask[telem.times.value < tstart] = False\n if tstop is not None:\n tstop = DateTime(tstop).secs\n mask[telem.times.value > tstop] = False\n if bad_times is not None:\n for (left, right) in bad_times:\n idxs = np.logical_and(telem.times.value >= date2secs(left),\n telem.times.value <= date2secs(right))\n mask[idxs] = False\n if msid == \"fptemp_11\" and mask_radzones:\n rad_zones = events.rad_zones.filter(start=telem.dates[0],\n stop=telem.dates[-1])\n for rz in rad_zones:\n idxs = np.logical_and(telem.times.value >= rz.tstart,\n telem.times.value <= rz.tstop)\n mask[idxs] = False\n if mask_fmt1:\n which = self[\"msids\", \"ccsdstmf\"] == \"FMT1\"\n mask[which] = False\n times = telem.times.value[mask]\n if yplotlimits is None:\n ymin = min(telem.value[mask].min(), pred.value[mask].min())-2\n ymax = min(telem.value[mask].max(), pred.value[mask].max())+2\n yplotlimits = [ymin, ymax]\n if errorplotlimits is None:\n errorplotlimits = [-5, 5]\n mylimits = {\"units\": \"C\"}\n if plot_limits:\n if msid == \"fptemp_11\":\n mylimits[\"acisi_limit\"] = -112.0\n mylimits[\"aciss_limit\"] = -111.0\n mylimits[\"fp_sens_limit\"] = -118.7\n else:\n mylimits[\"caution_high\"] = limits[msid]+margins[msid]\n mylimits[\"planning_limit\"] = limits[msid]\n dash.dashboard(pred.value[mask], telem.value[mask], times, mylimits,\n msid=msid, modelname=full_name.get(msid, msid),\n errorplotlimits=errorplotlimits, yplotlimits=yplotlimits,\n fig=fig, savefig=False)\n if figfile is not None:\n fig.savefig(figfile)\n return fig\n\n\nclass ThermalModelFromRun(ModelDataset):\n \"\"\"\n Fetch multiple temperature models and their associated commanded states\n from ASCII table files generated by xija or model check tools. If MSID\n data will be added, it will be interpolated to the times of the model\n data.\n\n Parameters\n ----------\n loc : string or list of strings\n Path to the directory where the model and state data are stored.\n get_msids : boolean, optional\n Whether or not to load the MSIDs corresponding to the\n temperature models for the same time period from the\n engineering archive. Default: False.\n tl_file : string\n Path to the location of the tracelog file to get the MSID data from.\n Default: None, which means the engineering archive will be queried\n if get_msids=True.\n Examples\n --------\n >>> from acispy import ThermalModelFromRun\n >>> ds = ThermalModelFromRun(\"/data/acis/LoadReviews/2019/MAY2019/ofls/out_dpa\",\n ... get_msids=True)\n \"\"\"\n def __init__(self, loc, get_msids=False, tl_file=None):\n temp_file = os.path.join(loc, \"temperatures.dat\")\n state_file = os.path.join(loc, \"states.dat\")\n esa_file = os.path.join(loc, \"earth_solid_angle.dat\")\n if not os.path.exists(state_file):\n state_file = None\n if not os.path.exists(esa_file):\n esa_file = None\n model = Model.from_load_file(temp_file, esa_file=esa_file)\n comps = list(model.keys())\n if state_file is not None:\n states = States.from_load_file(state_file)\n else:\n states = EmptyTimeSeries()\n if get_msids:\n msids = self._get_msids(model, comps, tl_file)\n else:\n msids = EmptyTimeSeries()\n super(ThermalModelFromRun, self).__init__(msids, states, model)\n\n\nclass ThermalModelFromLoad(ModelDataset):\n \"\"\"\n Fetch a temperature model and its associated commanded states\n from a load review. Optionally get MSIDs for the same time period.\n If MSID data will be added, it will be interpolated to the times\n of the model data.\n\n Parameters\n ----------\n load : string\n The load review to get the model from, i.e. \"JAN2516A\".\n comps : list of strings, optional\n List of temperature components to get from the load models. If\n not specified all four components will be loaded.\n get_msids : boolean, optional\n Whether or not to load the MSIDs corresponding to the\n temperature models for the same time period from the\n engineering archive. Default: False.\n states_comp : string, optional\n The thermal model page to use to get the states. \"DEA\", \"DPA\",\n \"PSMC\", or \"FP\". Default: \"DPA\"\n\n Examples\n --------\n >>> from acispy import ThermalModelFromLoad\n >>> comps = [\"1deamzt\", \"1pdeaat\", \"fptemp_11\"]\n >>> ds = ThermalModelFromLoad(\"APR0416C\", comps, get_msids=True)\n \"\"\"\n def __init__(self, load, comps=None, get_msids=False,\n tl_file=None, states_comp=\"DPA\"):\n if comps is None:\n comps = [\"1deamzt\", \"1dpamzt\", \"1pdeaat\", \"fptemp_11\",\n \"tmp_fep1_mong\", \"tmp_fep1_actel\", \"tmp_bep_pcb\"]\n comps = ensure_list(comps)\n model = Model.from_load_page(load, comps)\n states = States.from_load_page(load, comp=states_comp)\n if get_msids:\n msids = self._get_msids(model, comps, tl_file)\n else:\n msids = EmptyTimeSeries()\n super(ThermalModelFromLoad, self).__init__(msids, states, model)\n\n\nclass ThermalModelRunner(ModelDataset):\n \"\"\"\n Class for running Xija thermal models.\n\n Parameters\n ----------\n name : string\n The name of the model to simulate. Can be \"dea\", \"dpa\", \"psmc\", or \"fep1_mong\".\n tstart : string\n The start time in YYYY:DOY:HH:MM:SS format.\n tstop : string\n The stop time in YYYY:DOY:HH:MM:SS format.\n states : dict, optional\n A dictionary of modeled commanded states required for the model. The\n states can either be a constant value or NumPy arrays. If not supplied,\n the thermal model will be run with states from the commanded states\n database.\n T_init : float, optional\n The initial temperature for the thermal model run. If None,\n an initial temperature will be determined from telemetry.\n Default: None\n dt : float, optional\n The timestep to use for this run. Default is 328 seconds or is provided\n by the model specification file.\n model_spec : string, optional\n Path to the model spec JSON file for the model. Default: None, the \n standard model path will be used.\n mask_bad_times : boolean, optional\n If set, bad times from the data are included in the array masks\n and plots. Default: False\n server : string \n DBI server or HDF5 file. Only used if the commanded states database\n is used. Default: None\n\n Examples\n --------\n >>> states = {\"ccd_count\": np.array([5,6,1]),\n ... \"pitch\": np.array([150.0]*3),\n ... \"fep_count\": np.array([5,6,1]),\n ... \"clocking\": np.array([1]*3),\n ... \"vid_board\": np.array([1]*3),\n ... \"off_nom_roll\": np.array([0.0]*3),\n ... \"simpos\": np.array([-99616.0]*3)}\n >>> dpa_model = ThermalModelRunner(\"dpa\", \"2015:002:00:00:00\",\n ... \"2016:005:00:00:00\", states=states,\n ... T_init=10.1)\n \"\"\"\n def __init__(self, name, tstart, tstop, states=None, T_init=None,\n get_msids=True, dt=328.0, model_spec=None,\n mask_bad_times=False, ephem_file=None, evolve_method=None,\n rk4=None, tl_file=None, no_eclipse=False, compute_model=None):\n\n self.name = name.lower()\n self.sname = short_name[name]\n if self.sname in short_name_rev:\n self.model_check = importlib.import_module(f\"{self.sname}_check\")\n else:\n self.model_check = None\n\n self.model_spec = find_json(name, model_spec)\n\n self.ephem_file = ephem_file\n \n tstart = get_time(tstart)\n tstop = get_time(tstop)\n\n tstart_secs = DateTime(tstart).secs\n\n self.no_earth_heat = getattr(self, \"no_earth_heat\", False)\n\n if states is not None:\n if isinstance(states, States):\n states_obj = states\n states = states.as_array()\n else:\n if \"tstart\" not in states:\n states[\"tstart\"] = DateTime(states[\"datestart\"]).secs\n if \"tstop\" not in states:\n states[\"tstop\"] = DateTime(states[\"datestop\"]).secs\n num_states = states[\"tstart\"].size\n if \"letg\" not in states:\n states[\"letg\"] = np.array([\"RETR\"]*num_states)\n if \"hetg\" not in states:\n states[\"hetg\"] = np.array([\"RETR\"]*num_states)\n states_obj = States(states)\n else:\n states_obj = EmptyTimeSeries()\n\n if T_init is None:\n T_init = fetch.MSID(self.name, tstart_secs-700., tstart_secs+700.).vals.mean()\n\n if compute_model is not None:\n self.xija_model = compute_model(self.name, tstart, tstop, states,\n dt, T_init, model_spec, evolve_method, rk4)\n elif self.name in short_name and states is not None:\n self.xija_model = self._compute_acis_model(self.name, tstart, tstop,\n states, dt, T_init, rk4=rk4,\n no_eclipse=no_eclipse,\n evolve_method=evolve_method)\n else:\n self.xija_model = self._compute_model(name, tstart, tstop, dt, T_init,\n evolve_method=evolve_method, \n rk4=rk4)\n\n self.bad_times = getattr(self.xija_model, \"bad_times\", None)\n self.bad_times_indices = getattr(self.xija_model, \"bad_times_indices\", None)\n\n if isinstance(states, dict):\n states.pop(\"dh_heater\", None)\n\n components = [self.name]\n if 'dpa_power' in self.xija_model.comp:\n components.append('dpa_power')\n if 'earthheat__fptemp' in self.xija_model.comp:\n components.append('earthheat__fptemp')\n if states is None:\n components += [\"pitch\", \"roll\", \"fep_count\", \"vid_board\", \"clocking\",\n \"ccd_count\", \"sim_z\"]\n masks = {}\n if mask_bad_times and self.bad_times is not None:\n masks[self.name] = np.ones(self.xija_model.times.shape, dtype='bool')\n for (left, right) in self.bad_times_indices:\n masks[self.name][left:right] = False\n\n model_obj = Model.from_xija(self.xija_model, components, masks=masks)\n\n if get_msids:\n msids_obj = self._get_msids(model_obj, [self.name], tl_file)\n else:\n msids_obj = EmptyTimeSeries()\n super(ThermalModelRunner, self).__init__(msids_obj, states_obj, model_obj)\n\n def _get_ephemeris(self, tstart, tstop, times):\n msids = ['orbitephem0_{}'.format(axis) for axis in \"xyz\"]\n msids += ['solarephem0_{}'.format(axis) for axis in \"xyz\"]\n ephem = {}\n if self.ephem_file is None:\n e = fetch.MSIDset(msids, tstart - 2000.0, tstop + 2000.0)\n for msid in msids:\n ephem[msid] = Ska.Numpy.interpolate(e[msid].vals, e[msid].times,\n times)\n else:\n e = ascii.read(self.ephem_file)\n msids = ['orbitephem0_{}'.format(axis) for axis in \"xyz\"]\n idxs = np.logical_and(e[\"times\"] >= tstart - 2000.0,\n e[\"times\"] <= tstop + 2000.0)\n for msid in msids:\n ephem[msid] = Ska.Numpy.interpolate(e[msid][idxs],\n e[\"times\"][idxs], times)\n return ephem\n\n def _compute_model(self, name, tstart, tstop, dt, T_init,\n evolve_method=None, rk4=None):\n if name == \"fptemp_11\":\n name = \"fptemp\"\n model = xija.XijaModel(name, start=tstart, stop=tstop, dt=dt,\n model_spec=self.model_spec,\n evolve_method=evolve_method, rk4=rk4)\n model.comp[name].set_data(T_init)\n for t in [\"dea0\", \"dpa0\"]:\n if t in model.comp:\n model.comp[t].set_data(T_init)\n model.make()\n model.calc()\n return model\n\n def _compute_acis_model(self, name, tstart, tstop, states, dt, T_init,\n no_eclipse=False, evolve_method=None, rk4=None):\n import re\n from acis_thermal_check import calc_pitch_roll\n pattern = re.compile(\"q[1-4]\")\n check_obj = getattr(self.model_check, model_classes[self.sname])()\n if name == \"fptemp_11\":\n name = \"fptemp\"\n model = xija.XijaModel(name, start=tstart, stop=tstop, dt=dt, \n model_spec=self.model_spec, rk4=rk4,\n evolve_method=evolve_method)\n ephem = self._get_ephemeris(model.tstart, model.tstop, model.times)\n if states is None:\n state_times = model.times\n state_names = [\"ccd_count\", \"fep_count\", \"vid_board\", \n \"clocking\", \"pitch\", \"roll\"]\n if 'aoattqt1' in model.comp:\n state_names += [\"q1\", \"q2\", \"q3\", \"q4\"]\n states = {}\n for n in state_names:\n nstate = n\n ncomp = n\n if pattern.match(n):\n ncomp = f'aoattqt{n[-1]}'\n elif name == \"roll\":\n nstate = \"off_nom_roll\"\n states[nstate] = np.array(model.comp[ncomp].dvals)\n else:\n if isinstance(states, np.ndarray):\n state_names = states.dtype.names\n else:\n state_names = list(states.keys())\n state_times = np.array([states[\"tstart\"], states[\"tstop\"]])\n model.comp['sim_z'].set_data(np.array(states['simpos']), state_times)\n if 'pitch' in state_names:\n model.comp['pitch'].set_data(np.array(states['pitch']), state_times)\n else:\n pitch, roll = calc_pitch_roll(model.times, ephem, states)\n model.comp['pitch'].set_data(pitch, model.times)\n model.comp['roll'].set_data(roll, model.times)\n for st in ('ccd_count', 'fep_count', 'vid_board', 'clocking'):\n model.comp[st].set_data(np.array(states[st]), state_times)\n if 'dh_heater' in model.comp:\n dhh = states[\"dh_heater\"] if \"dh_heater\" in state_names else 0\n model.comp['dh_heater'].set_data(dhh, state_times)\n if \"off_nom_roll\" in state_names:\n roll = np.array(states[\"off_nom_roll\"])\n model.comp[\"roll\"].set_data(roll, state_times)\n if 'dpa_power' in model.comp:\n # This is just a hack, we're not\n # really setting the power to zero.\n model.comp['dpa_power'].set_data(0.0)\n model.comp[name].set_data(T_init)\n if no_eclipse:\n model.comp[\"eclipse\"].set_data(False)\n check_obj._calc_model_supp(model, state_times, states, ephem, None)\n if self.name == \"fptemp_11\" and self.no_earth_heat:\n model.comp[\"earthheat__fptemp\"].k = 0.0\n model.make()\n model.calc()\n return model\n\n @classmethod\n def from_states_file(cls, name, states_file, T_init,\n dt=328.0, model_spec=None, mask_bad_times=False, \n ephem_file=None, get_msids=True, no_eclipse=False):\n \"\"\"\n Run a xija thermal model using a states.dat file. \n\n Parameters\n ----------\n name : string\n The name of the model to simulate. Can be \"dea\", \"dpa\", \"psmc\", or \"fep1mong\".\n states_file : string\n A file containing commanded states, in the same format as \"states.dat\" which is\n outputted by ACIS thermal model runs for loads.\n T_init : float\n The starting temperature for the model in degrees C.\n model_spec : string, optional\n Path to the model spec JSON file for the model. Default: None, the\n standard model path will be used.\n mask_bad_times : boolean, optional\n If set, bad times from the data are included in the array masks\n and plots. Default: False\n \"\"\"\n states = States.from_load_file(states_file)\n tstart = get_time(states['tstart'].value[0])\n tstop = get_time(states['tstop'].value[-1])\n return cls(name, tstart, tstop, states=states, T_init=T_init,\n dt=dt, model_spec=model_spec, mask_bad_times=mask_bad_times,\n ephem_file=ephem_file, get_msids=get_msids, no_eclipse=no_eclipse)\n\n @classmethod\n def from_database(cls, name, tstart, tstop, T_init, server=None, get_msids=True,\n dt=328.0, model_spec=None, mask_bad_times=False,\n ephem_file=None, no_eclipse=False, compute_model=None):\n states = States.from_database(tstart, tstop, server=server)\n return cls(name, tstart, tstop, states=states, T_init=T_init, dt=dt,\n model_spec=model_spec, mask_bad_times=mask_bad_times,\n ephem_file=ephem_file, get_msids=get_msids, \n no_eclipse=no_eclipse, compute_model=compute_model)\n\n @classmethod\n def from_commands(cls, name, tstart, tstop, cmds, T_init, get_msids=True,\n dt=328.0, model_spec=None, mask_bad_times=False, \n ephem_file=None, no_eclipse=False, compute_model=None):\n tstart = get_time(tstart)\n tstop = get_time(tstop)\n states = States.from_commands(tstart, tstop, cmds)\n return cls(name, tstart, tstop, states=states, T_init=T_init, dt=dt,\n model_spec=model_spec, mask_bad_times=mask_bad_times,\n ephem_file=ephem_file, get_msids=get_msids, no_eclipse=no_eclipse,\n compute_model=compute_model)\n\n @classmethod\n def from_kadi(cls, name, tstart, tstop, T_init, get_msids=True, dt=328.0,\n model_spec=None, mask_bad_times=False, ephem_file=None,\n no_eclipse=False, compute_model=None):\n tstart = get_time(tstart)\n tstop = get_time(tstop)\n states = States.from_kadi_states(tstart, tstop)\n return cls(name, tstart, tstop, states=states, T_init=T_init, dt=dt,\n model_spec=model_spec, mask_bad_times=mask_bad_times,\n ephem_file=ephem_file, get_msids=get_msids, no_eclipse=no_eclipse,\n compute_model=compute_model)\n\n @classmethod\n def from_backstop(cls, name, backstop_file, T_init, model_spec=None, dt=328.0,\n mask_bad_times=False, ephem_file=None, get_msids=True,\n no_eclipse=False, compute_model=None):\n import parse_cm\n bs_cmds = parse_cm.read_backstop_as_list(backstop_file)\n tstart = bs_cmds[0]['time']\n tstop = bs_cmds[-1]['time']\n return cls.from_commands(name, tstart, tstop, bs_cmds, T_init, dt=dt,\n model_spec=model_spec, get_msids=get_msids,\n mask_bad_times=mask_bad_times, compute_model=compute_model,\n ephem_file=ephem_file, no_eclipse=no_eclipse)\n\n def make_solarheat_plot(self, node, figfile=None, fig=None):\n \"\"\"\n Make a plot which shows the solar heat value vs. pitch.\n\n Parameters\n ----------\n node : string\n The xija node which has the solar heating applied to it\n in the model. Can be an real node on the spacecraft like\n 1DEAMZT or a pseudo-node like \"dpa0\" in the 1DPAMZT model.\n figfile : string, optional\n The file to write the solar heating plot to. One will be created\n if not provided.\n fig : :class:`~matplotlib.figure.Figure`, optional\n A Figure instance to plot in. Default: None, one will be\n created if not provided.\n \"\"\"\n if fig is None:\n fig, ax = plt.subplots(figsize=(15, 10))\n else:\n ax = fig.add_subplot(111)\n try:\n comp = self.xija_model.comp[\"solarheat__%s\" % node]\n except KeyError:\n raise KeyError(\"%s does not have a SolarHeat component!\" % node)\n comp.plot_solar_heat__pitch(fig, ax)\n if figfile is not None:\n fig.savefig(figfile)\n return fig\n\n def make_power_plot(self, figfile=None, fig=None, use_ccd_count=False):\n \"\"\"\n Make a plot which shows the ACIS state power coefficients, vs. either\n FEP or CCD count.\n\n Parameters\n ----------\n figfile : string, optional\n The file to write the power coefficient plot to. One will be created\n if not provided.\n fig : :class:`~matplotlib.figure.Figure`, optional\n A Figure instance to plot in. Default: None, one will be\n created if not provided.\n use_ccd_count : boolean, optional\n If True, plot the CCD count on the x-axis. Primarily useful for the\n 1DEAMZT model. Default: False\n \"\"\"\n plt.rc(\"font\", size=18)\n plt.rc(\"axes\", linewidth=2)\n if fig is None:\n fig, ax = plt.subplots(figsize=(10, 10))\n else:\n ax = fig.add_subplot(111)\n xm = self.xija_model\n dtype = [('x', 'int'), ('y', 'float'), ('name', '>> dea_run = SimulateSingleObs(\"1deamzt\", \"2016:201:05:12:03\", 24, 14.0,\n ... 150., ccd_count=5, off_nom_roll=-6.0,\n ... dh_heater=1)\n \"\"\"\n def __init__(self, name, tstart, hours, T_init, pitch, ccd_count,\n vehicle_load=None, simpos=-99616.0, off_nom_roll=0.0, \n dh_heater=0, fep_count=None, clocking=1, q=None, instrument=None,\n model_spec=None, no_limit=False, no_earth_heat=False):\n if name in short_name_rev:\n name = short_name_rev[name]\n if name == \"fptemp_11\" and instrument is None:\n raise RuntimeError(\"Must specify either 'ACIS-I' or 'ACIS-S' in \"\n \"'instrument' if you want to test a focal plane \" \n \"temperature prediction!\")\n if fep_count is None:\n fep_count = ccd_count\n if q is None and name == \"fptemp_11\":\n raise RuntimeError(\"Please supply an attitude quaternion for the focal plane model!\")\n self.vehicle_load = vehicle_load\n self.no_limit = no_limit\n tstart = get_time(tstart)\n datestart = tstart\n tstart = DateTime(tstart).secs\n tstop = tstart+hours*3600.0+10012.0\n datestop = secs2date(tstop)\n tend = tstop+0.5*(tstop-tstart)\n dateend = secs2date(tend)\n self.datestart = datestart\n self.datestop = datestop\n self.hours = hours\n self.tstart = Quantity(tstart, \"s\")\n self.tstop = Quantity(tstop, \"s\")\n self.dateend = dateend\n self.T_init = Quantity(T_init, \"deg_C\")\n self.instrument = instrument\n self.no_earth_heat = no_earth_heat\n if vehicle_load is None:\n states = {\"ccd_count\": np.array([ccd_count], dtype='int'),\n \"fep_count\": np.array([fep_count], dtype='int'),\n \"clocking\": np.array([clocking], dtype='int'),\n 'vid_board': np.array([ccd_count > 0], dtype='int'),\n \"pitch\": np.array([pitch]),\n \"simpos\": np.array([simpos]),\n \"datestart\": np.array([self.datestart]),\n \"datestop\": np.array([self.dateend]),\n \"tstart\": np.array([self.tstart.value]),\n \"tstop\": np.array([tend]),\n \"hetg\": np.array([\"RETR\"]),\n \"letg\": np.array([\"RETR\"]),\n \"off_nom_roll\": np.array([off_nom_roll]),\n \"dh_heater\": np.array([dh_heater], dtype='int')}\n # For the focal plane model we need a quaternion.\n if name == \"fptemp_11\":\n for i in range(4):\n states[\"q%d\" % (i+1)] = np.array([q[i]])\n else:\n mylog.info(\"Modeling a %d-chip observation concurrent with \" % ccd_count +\n \"the %s vehicle loads.\" % vehicle_load)\n states = dict((k, state.value) for (k, state) in\n States.from_load_page(vehicle_load).table.items())\n ecs_run_idxs = states[\"tstart\"] < tstop\n states[\"ccd_count\"][ecs_run_idxs] = ccd_count\n states[\"fep_count\"][ecs_run_idxs] = fep_count\n states[\"clocking\"][ecs_run_idxs] = clocking\n states[\"vid_board\"][ecs_run_idxs] = ccd_count > 0\n super(SimulateSingleObs, self).__init__(name, datestart, dateend, states,\n T_init, model_spec=model_spec,\n get_msids=False, no_eclipse=True)\n\n mylog.info(\"Run Parameters\")\n mylog.info(\"--------------\")\n mylog.info(\"Start Datestring: %s\" % datestart)\n mylog.info(\"Length of ECS run in hours: %s\" % hours)\n mylog.info(\"Stop Datestring: %s\" % datestop)\n mylog.info(\"Initial Temperature: %g degrees C\" % T_init)\n mylog.info(\"CCD Count: %d\" % ccd_count)\n mylog.info(\"FEP Count: %d\" % fep_count)\n if vehicle_load is None:\n disp_pitch = pitch\n disp_roll = off_nom_roll\n else:\n pitches = states[\"pitch\"][ecs_run_idxs]\n rolls = states[\"off_nom_roll\"][ecs_run_idxs]\n disp_pitch = \"Min: %g, Max: %g\" % (pitches.min(), pitches.max())\n disp_roll = \"Min: %g, Max: %g\" % (rolls.min(), rolls.max())\n mylog.info(\"Pitch: %s\" % disp_pitch)\n mylog.info(\"SIM Position: %g\" % simpos)\n mylog.info(\"Off-nominal Roll: %s\" % disp_roll)\n mylog.info(\"Detector Housing Heater: %s\" % {0: \"OFF\", 1: \"ON\"}[dh_heater])\n\n mylog.info(\"Model Result\")\n mylog.info(\"------------\")\n\n if self.name == \"fptemp_11\":\n limit = limits[self.name][instrument]\n margin = 0.0\n else:\n limit = limits[self.name]\n margin = margins[self.name]\n if self.name in low_limits:\n self.low_limit = Quantity(low_limits[self.name], \"deg_C\")\n else:\n self.low_limit = None\n self.limit = Quantity(limit, \"deg_C\")\n self.margin = Quantity(margin, 'deg_C')\n self.limit_time = None\n self.limit_date = None\n self.duration = None\n self.violate = False\n if self.no_limit:\n return\n viols = self.mvals.value > self.limit.value\n if np.any(viols):\n idx = np.where(viols)[0][0]\n self.limit_time = self.times('model', self.name)[idx]\n self.limit_date = secs2date(self.limit_time)\n self.duration = Quantity((self.limit_time.value-tstart)*0.001, \"ks\")\n msg = \"The limit of %g degrees C will be reached at %s, \" % (self.limit.value, self.limit_date)\n msg += \"after %g ksec.\" % self.duration.value\n mylog.info(msg)\n if self.limit_time < self.tstop:\n self.violate = True\n viol_time = \"before\"\n else:\n self.violate = False\n viol_time = \"after\"\n mylog.info(\"The limit is reached %s the end of the observation.\" % viol_time)\n else:\n mylog.info(\"The limit of %g degrees C is never reached.\" % self.limit.value)\n\n if self.violate:\n mylog.warning(\"This observation is NOT safe from a thermal perspective.\")\n else:\n mylog.info(\"This observation is safe from a thermal perspective.\")\n\n def plot_model(self, no_annotations=False, plot=None, fontsize=18,\n **kwargs):\n \"\"\"\n Plot the simulated model run.\n\n Parameters\n ----------\n no_annotations : boolean, optional\n If True, don't put lines or text on the plot. Shouldn't be\n used if you're actually trying to determine if a ECS run is\n safe. Default: False\n \"\"\"\n if self.vehicle_load is None:\n field2 = None\n else:\n field2 = \"pitch\"\n viol_text = \"NOT SAFE\" if self.violate else \"SAFE\"\n dp = DatePlot(self, [(\"model\", self.name)], field2=field2, plot=plot,\n fontsize=fontsize, **kwargs)\n if not self.no_limit:\n if self.name == \"fptemp_11\":\n color = {\"ACIS-S\": \"blue\", \"ACIS-I\": \"purple\"}[self.instrument]\n dp.add_hline(self.limit.value, ls='--', lw=2, color=color)\n else:\n dp.add_hline(self.limit.value, ls='-', lw=2, color='g')\n dp.add_hline(self.limit.value+self.margin.value, ls='-', lw=2, color='gold')\n if self.low_limit is not None:\n dp.add_hline(self.low_limit.value, ls='-', lw=2, color='g')\n dp.add_hline(self.low_limit.value - self.margin.value, ls='-', lw=2, color='gold')\n if not no_annotations:\n if not self.no_limit:\n dp.add_text(find_text_time(self.datestop, hours=4.0), self.T_init.value + 2.0,\n viol_text, fontsize=22, color='black')\n dp.add_vline(self.datestart, ls='--', lw=2, color='b')\n dp.add_text(find_text_time(self.datestart), self.limit.value - 2.0,\n \"START\", color='blue', rotation=\"vertical\")\n dp.add_vline(self.datestop, ls='--', lw=2, color='b')\n dp.add_text(find_text_time(self.datestop), self.limit.value - 12.0,\n \"END\", color='blue', rotation=\"vertical\")\n if self.limit_date is not None:\n dp.add_vline(self.limit_date, ls='--', lw=2, color='r')\n dp.add_text(find_text_time(self.limit_date), self.limit.value-2.0,\n \"VIOLATION\", color='red', rotation=\"vertical\")\n dp.set_xlim(find_text_time(self.datestart, hours=-1.0), self.dateend)\n if self.low_limit is not None:\n ymin = self.low_limit.value-self.margin.value\n else:\n ymin = self.T_init.value\n ymin = min(ymin, self.mvals.value.min())-2.0\n ymax = max(self.limit.value+self.margin.value, self.mvals.value.max())+3.0\n self._time_ticks(dp, ymax, fontsize)\n dp.set_ylim(ymin, ymax)\n return dp\n\n def _time_ticks(self, dp, ymax, fontsize):\n from matplotlib.ticker import AutoMinorLocator\n axt = dp.ax.twiny()\n mtimes = self.xija_model.times\n xmin, xmax = (plotdate2cxctime(dp.ax.get_xlim())-mtimes[0])*1.0e-3\n axt.plot((mtimes-mtimes[0])*1.0e-3, \n ymax*np.ones_like(mtimes))\n axt.set_xlim(xmin, xmax)\n axt.xaxis.set_minor_locator(AutoMinorLocator(5))\n axt.set_xlabel(\"Time (ks)\", fontdict={\"size\": fontsize})\n fontProperties = font_manager.FontProperties(size=fontsize)\n for label in axt.get_xticklabels():\n label.set_fontproperties(fontProperties)\n for label in axt.get_yticklabels():\n label.set_fontproperties(fontProperties)\n\n def get_temp_at_time(self, t):\n \"\"\"\n Get the model temperature at a time *t* seconds\n past the beginning of the ECS run.\n \"\"\"\n t += self.tstart.value\n return Quantity(np.interp(t, self['model', self.name].times.value,\n self['model', self.name].value), \"deg_C\")\n\n @property\n def mvals(self):\n return self['model', self.name]\n\n def write_msids(self, filename, fields, mask_field=None, overwrite=False):\n raise NotImplementedError\n\n def write_states(self, states_file, overwrite=False):\n raise NotImplementedError\n\n def write_model(self, filename, overwrite=False):\n raise NotImplementedError\n\n def make_dashboard_plots(self, yplotlimits=None, errorplotlimits=None, fig=None):\n raise NotImplementedError\n\n def write_model_and_data(self, filename, overwrite=False):\n raise NotImplementedError\n\n\nclass SimulateECSRun(SimulateSingleObs):\n \"\"\"\n Class for simulating thermal models for ECS measurements.\n \"\"\"\n","sub_path":"acispy/thermal_models.py","file_name":"thermal_models.py","file_ext":"py","file_size_in_byte":46741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"607918778","text":"# -*- coding: utf-8 -*-\r\n\r\nimport sys\r\nfrom PyQt4.QtGui import QApplication, QWidget, QMainWindow\r\nfrom pyqt4.mainwindow import *\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\r\n\r\nimport user\r\n\r\n#Title of the windows\r\ntitle = \"Energy schedule balancing with RTC communication\"\r\nylabel = \"Power\"\r\nxlabel = \"Hours\"\r\n\r\n\r\n#init graph variables\r\nschedule = []\r\nfor i in range(0,24):\r\n schedule.append(0)\r\n\r\nclass Win(QMainWindow):\r\n def __init__(self, parent=None):\r\n QMainWindow.__init__(self,parent)\r\n self.ui = Ui_MainWindow()\r\n self.ui.setupUi(self)\r\n\r\n self.tar = 0\r\n self.eps = 0\r\n\r\n #options\r\n self.setWindowTitle(title)\r\n\r\n #signals\r\n self.ui.pushButton.clicked.connect(self.send)\r\n for i in range(0,24):\r\n s = eval(\"self.ui.s\"+str(i))\r\n s.setRange(-10000,10000)\r\n s.setSingleStep(100)\r\n s.valueChanged.connect(self.update)\r\n self.ui.starget.valueChanged.connect(self.target)\r\n self.ui.sepsilon.valueChanged.connect(self.epsilon)\r\n class Fig():\r\n def __init__(self, color):\r\n self.figure = plt.figure()\r\n self.canvas = FigureCanvas(self.figure)\r\n self.color = color\r\n self.local = Fig('green')\r\n self.ajusted = Fig('blue')\r\n self.ui.graphs.addWidget(self.local.canvas)\r\n self.ui.graphs.addWidget(self.ajusted.canvas)\r\n\r\n # methods\r\n def send(self):\r\n data = user.send(schedule, self.tar, self.eps)\r\n self.plot(self.ajusted, list(data.values()))\r\n #self.plot(self.ajusted, schedule)\r\n def update(self, value):\r\n i = int(self.sender().objectName()[1:])\r\n schedule[i] = value\r\n self.plot(self.local, schedule)\r\n #self.plot(self.local, schedule)\r\n def epsilon(self, value):\r\n self.eps = value\r\n self.plot(self.local, schedule)\r\n def target(self, value):\r\n self.tar = value\r\n self.plot(self.local, schedule)\r\n #def log(self, text): self.ui.textBrowser.insertPlainText(text)\r\n def plot(self, graph, data):\r\n ax = graph.figure.add_subplot(111)\r\n ax.hold(False)\r\n ax.set_xlim(0,23)\r\n h = range(0,24)\r\n ftarget = (self.tar,)*24\r\n fepsup = (self.tar*(1+self.eps),)*24\r\n fepsdown = (self.tar*(1-self.eps),)*24\r\n ax.plot(h, data, 'g', h, ftarget, 'k', h, fepsup, 'k--', h, fepsdown, 'k--')\r\n graph.canvas.draw()\r\n\r\n#main of the application\r\nif __name__ == '__main__':\r\n\r\n app = QApplication(sys.argv)\r\n win = Win()\r\n win.show()\r\n\r\n #GUI main loop\r\n sys.exit(app.exec_())\r\n\r\n#def log(text): win.log(text)\r\n\r\n","sub_path":"rtc/python/gui/user/bak/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"541877608","text":"import h5py\nimport numpy as np\nimport os\nimport random\nimport matplotlib.pyplot as plt\nimport math\nfrom functools import partial\nimport tensorflow as tf\nfrom glob import glob\n\nfrom Segmentation.utils.augmentation import crop_randomly_image_pair_2d, adjust_contrast_randomly_image_pair_2d\nfrom Segmentation.utils.augmentation import adjust_brightness_randomly_image_pair_2d\nfrom Segmentation.utils.augmentation import apply_centre_crop_3d, apply_valid_random_crop_3d\nfrom Segmentation.utils.augmentation import apply_random_brightness_3d, apply_random_contrast_3d, apply_random_gamma_3d\nfrom Segmentation.utils.augmentation import apply_flip_3d, apply_rotate_3d, normalise\n\ndef get_multiclass(label):\n\n # label shape\n # (batch_size, height, width, channels)\n\n batch_size = label.shape[0]\n height = label.shape[1]\n width = label.shape[2]\n channels = label.shape[3]\n\n background = np.zeros((batch_size, height, width, 1))\n label_sum = np.sum(label, axis=3)\n background[label_sum == 0] = 1\n\n label = np.concatenate((label, background), axis=3)\n\n return label\n\ndef _bytes_feature(value):\n \"\"\"Returns a bytes_list from a string / byte.\"\"\"\n if isinstance(value, type(tf.constant(0))):\n value = value.numpy() # BytesList won't unpack a string from an EagerTensor.\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef _float_feature(value):\n \"\"\"Returns a float_list from a float /p double.\"\"\"\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n\ndef _int64_feature(value):\n \"\"\"Returns an int64_list from a bool / enum / int / uint.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\ndef create_OAI_dataset(data_folder, tfrecord_directory, get_train=True, use_2d=True, crop_size=None):\n\n if not os.path.exists(tfrecord_directory):\n os.mkdir(tfrecord_directory)\n\n train_val = 'train' if get_train else 'valid'\n files = glob(os.path.join(data_folder, f'*.im'))\n\n for idx, f in enumerate(files):\n f_name = f.split(\"/\")[-1]\n f_name = f_name.split(\".\")[0]\n\n fname_img = f'{f_name}.im'\n fname_seg = f'{f_name}.seg'\n\n img_filepath = os.path.join(data_folder, fname_img)\n seg_filepath = os.path.join(data_folder, fname_seg)\n\n assert os.path.exists(seg_filepath), f\"Seg file does not exist: {seg_filepath}\"\n\n with h5py.File(img_filepath, 'r') as hf:\n img = np.array(hf['data'])\n with h5py.File(seg_filepath, 'r') as hf:\n seg = np.array(hf['data'])\n\n if crop_size is not None:\n\n img_mid = (int(img.shape[0] / 2), int(img.shape[1] / 2))\n seg_mid = (int(seg.shape[0] / 2), int(seg.shape[1] / 2))\n\n assert img_mid == seg_mid, \"We expect the mid shapes to be the same size\"\n\n seg_total = np.sum(seg)\n\n img = img[img_mid[0] - crop_size:img_mid[0] + crop_size,\n img_mid[1] - crop_size:img_mid[1] + crop_size, :]\n seg = seg[seg_mid[0] - crop_size:seg_mid[0] + crop_size,\n seg_mid[1] - crop_size:seg_mid[1] + crop_size, :, :]\n\n # assert np.sum(seg) == seg_total, \"We are losing information in the initial cropping.\"\n assert img.shape == (crop_size * 2, crop_size * 2, 160)\n assert seg.shape == (crop_size * 2, crop_size * 2, 160, 6)\n\n img = np.rollaxis(img, 2, 0)\n seg = np.rollaxis(seg, 2, 0)\n seg_temp = np.zeros((*seg.shape[0:3], 1), dtype=np.int8)\n\n assert seg.shape[0:3] == seg_temp.shape[0:3]\n\n seg_sum = np.sum(seg, axis=-1)\n seg_temp[seg_sum == 0] = 1\n seg = np.concatenate([seg_temp, seg], axis=-1) # adds additional channel for no class\n img = np.expand_dims(img, axis=-1)\n assert img.shape[-1] == 1\n assert seg.shape[-1] == 7\n\n shard_dir = f'{idx:03d}-of-{len(files) - 1:03d}.tfrecords'\n tfrecord_filename = os.path.join(tfrecord_directory, shard_dir)\n\n target_shape, label_shape = None, None\n with tf.io.TFRecordWriter(tfrecord_filename) as writer:\n if use_2d:\n for k in range(len(img)):\n img_slice = img[k, :, :, :]\n seg_slice = seg[k, :, :, :]\n\n img_raw = img_slice.tostring()\n seg_raw = seg_slice.tostring()\n\n height = img_slice.shape[0]\n width = img_slice.shape[1]\n num_channels = seg_slice.shape[-1]\n\n target_shape = img_slice.shape\n label_shape = seg.shape\n\n feature = {\n 'height': _int64_feature(height),\n 'width': _int64_feature(width),\n 'num_channels': _int64_feature(num_channels),\n 'image_raw': _bytes_feature(img_raw),\n 'label_raw': _bytes_feature(seg_raw)\n }\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n writer.write(example.SerializeToString())\n else:\n height = img.shape[0]\n width = img.shape[1]\n depth = img.shape[2]\n num_channels = seg.shape[-1]\n\n target_shape = img.shape\n label_shape = seg.shape\n\n img_raw = img.tostring()\n seg_raw = seg.tostring()\n\n feature = {\n 'height': _int64_feature(height),\n 'width': _int64_feature(width),\n 'depth': _int64_feature(depth),\n 'num_channels': _int64_feature(num_channels),\n 'image_raw': _bytes_feature(img_raw),\n 'label_raw': _bytes_feature(seg_raw)\n }\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n writer.write(example.SerializeToString())\n print(f'{idx} out of {len(files) - 1} datasets have been processed. Target: {target_shape}, Label: {label_shape}')\n\ndef parse_fn_2d(example_proto, training, augmentation, multi_class=True, use_bfloat16=False, use_RGB=False):\n\n if use_bfloat16:\n dtype = tf.bfloat16\n else:\n dtype = tf.float32\n\n features = {\n 'height': tf.io.FixedLenFeature([], tf.int64),\n 'width': tf.io.FixedLenFeature([], tf.int64),\n 'num_channels': tf.io.FixedLenFeature([], tf.int64),\n 'image_raw': tf.io.FixedLenFeature([], tf.string),\n 'label_raw': tf.io.FixedLenFeature([], tf.string)\n }\n\n # Parse the input tf.Example proto using the dictionary above.\n image_features = tf.io.parse_single_example(example_proto, features)\n image_raw = tf.io.decode_raw(image_features['image_raw'], tf.float32)\n image = tf.cast(tf.reshape(image_raw, [384, 384, 1]), dtype)\n\n if use_RGB:\n image = tf.image.grayscale_to_rgb(image)\n\n seg_raw = tf.io.decode_raw(image_features['label_raw'], tf.int16)\n seg = tf.reshape(seg_raw, [384, 384, 7])\n seg = tf.cast(seg, dtype)\n\n if training:\n if augmentation == 'random_crop':\n image, seg = crop_randomly_image_pair_2d(image, seg)\n elif augmentation == 'noise':\n image, seg = adjust_brightness_randomly_image_pair_2d(image, seg)\n image, seg = adjust_contrast_randomly_image_pair_2d(image, seg)\n elif augmentation == 'crop_and_noise':\n image, seg = crop_randomly_image_pair_2d(image, seg)\n image, seg = adjust_brightness_randomly_image_pair_2d(image, seg)\n image, seg = adjust_contrast_randomly_image_pair_2d(image, seg)\n elif augmentation is None:\n image = tf.image.resize_with_crop_or_pad(image, 288, 288)\n seg = tf.image.resize_with_crop_or_pad(seg, 288, 288)\n else:\n \"Augmentation strategy {} does not exist or is not supported!\".format(augmentation)\n\n else:\n image = tf.image.resize_with_crop_or_pad(image, 288, 288)\n seg = tf.image.resize_with_crop_or_pad(seg, 288, 288)\n\n if not multi_class:\n seg = tf.slice(seg, [0, 0, 1], [-1, -1, 6])\n seg = tf.math.reduce_sum(seg, axis=-1)\n seg = tf.expand_dims(seg, axis=-1)\n seg = tf.clip_by_value(seg, 0, 1)\n\n return (image, seg)\n\ndef parse_fn_3d(example_proto, training, multi_class=True, use_bfloat16=False, use_RGB=False):\n\n if use_bfloat16:\n dtype = tf.bfloat16\n else:\n dtype = tf.float32\n\n features = {\n 'height': tf.io.FixedLenFeature([], tf.int64),\n 'width': tf.io.FixedLenFeature([], tf.int64),\n 'depth': tf.io.FixedLenFeature([], tf.int64),\n 'num_channels': tf.io.FixedLenFeature([], tf.int64),\n 'image_raw': tf.io.FixedLenFeature([], tf.string),\n 'label_raw': tf.io.FixedLenFeature([], tf.string)\n }\n\n # Parse the input tf.Example proto using the dictionary above.\n image_features = tf.io.parse_single_example(example_proto, features)\n image_raw = tf.io.decode_raw(image_features['image_raw'], tf.float32)\n image = tf.reshape(image_raw, [image_features['height'], image_features['width'], image_features['depth'], 1])\n\n seg_raw = tf.io.decode_raw(image_features['label_raw'], tf.int16)\n seg = tf.reshape(seg_raw, [image_features['height'], image_features['width'],\n image_features['depth'], image_features['num_channels']])\n seg = tf.cast(seg, tf.float32)\n\n if not multi_class:\n seg_cartilage = tf.slice(seg, [0, 0, 0, 1], [-1, -1, -1, 6])\n seg_cartilage = tf.math.reduce_sum(seg_cartilage, axis=-1)\n seg_cartilage = tf.expand_dims(seg_cartilage, axis=-1)\n seg = tf.clip_by_value(seg_cartilage, 0, 1)\n return (image, seg)\n\ndef read_tfrecord_2d(tfrecords_dir, batch_size, buffer_size, augmentation,\n parse_fn=parse_fn_2d, multi_class=True,\n is_training=False, use_bfloat16=False,\n use_RGB=False):\n\n file_list = tf.io.matching_files(os.path.join(tfrecords_dir, '*-*'))\n shards = tf.data.Dataset.from_tensor_slices(file_list)\n cycle_l = 1\n if is_training:\n shards = shards.shuffle(tf.cast(tf.shape(file_list)[0], tf.int64))\n cycle_l = 8\n if parse_fn is parse_fn_2d:\n shards = shards.repeat()\n dataset = shards.interleave(tf.data.TFRecordDataset,\n cycle_length=cycle_l,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n if is_training:\n dataset = dataset.shuffle(buffer_size=buffer_size)\n\n parser = partial(parse_fn,\n training=is_training,\n augmentation=augmentation,\n multi_class=multi_class,\n use_bfloat16=use_bfloat16,\n use_RGB=use_RGB)\n dataset = dataset.map(map_func=parser, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n dataset = dataset.batch(batch_size, drop_remainder=True).prefetch(tf.data.experimental.AUTOTUNE)\n\n # optimise dataset performance\n options = tf.data.Options()\n options.experimental_optimization.parallel_batch = True\n options.experimental_optimization.map_fusion = True\n options.experimental_optimization.map_vectorization.enabled = True\n options.experimental_optimization.map_parallelization = True\n dataset = dataset.with_options(options)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n return dataset\n\n# def read_tfrecord_3d(tfrecords_dir, batch_size, buffer_size, is_training, crop_size=None, depth_crop_size=80, aug=[], predict_slice=False, **kwargs):\n# dataset = read_tfrecord(tfrecords_dir, batch_size, buffer_size, parse_fn_3d, is_training=is_training, crop_size=crop_size, **kwargs)\n# if \"resize\" in aug:\n# assert \"shift\" in aug, \"Need to use shift if using resize\"\n# if is_training:\n# if crop_size is not None:\n# if (crop_size > 172) or (depth_crop_size > 70):\n# assert not (\"shift\" in aug), \"Can't apply shift augmentation with crop_size > 172 or depth_crop_size > 70.\"\n# resize = \"resize\" in aug\n# random_shift = \"shift\" in aug\n# parse_crop = partial(apply_valid_random_crop_3d, crop_size=crop_size, depth_crop_size=depth_crop_size, resize=resize, random_shift=random_shift, output_slice=predict_slice)\n# dataset = dataset.map(map_func=parse_crop, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n# if \"bright\" in aug:\n# dataset = dataset.map(apply_random_brightness_3d)\n# if \"contrast\" in aug:\n# dataset = dataset.map(apply_random_contrast_3d)\n# if \"gamma\" in aug:\n# dataset = dataset.map(apply_random_gamma_3d)\n# if \"flip\" in aug:\n# dataset = dataset.map(apply_flip_3d)\n# if \"rotate\" in aug:\n# dataset = dataset.map(apply_rotate_3d)\n# else:\n# if crop_size is not None:\n# parse_crop = partial(apply_centre_crop_3d, crop_size=crop_size, depth_crop_size=depth_crop_size, output_slice=predict_slice)\n# dataset = dataset.map(map_func=parse_crop, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n# if \"normalise\" in aug:\n# dataset = dataset.map(normalise)\n# return dataset\n\ndef read_tfrecord_3d(tfrecords_dir, batch_size, buffer_size, is_training,\n crop_size=None, depth_crop_size=80, aug=[],\n predict_slice=False, use_keras_fit=False, multi_class=False):\n file_list = tf.io.matching_files(os.path.join(tfrecords_dir, '*-*'))\n shards = tf.data.Dataset.from_tensor_slices(file_list)\n if is_training:\n shards = shards.shuffle(tf.cast(tf.shape(file_list)[0], tf.int64))\n if use_keras_fit:\n shards = shards.repeat()\n dataset = shards.interleave(tf.data.TFRecordDataset, cycle_length=4, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n if is_training:\n dataset = dataset.shuffle(buffer_size=buffer_size)\n\n parser = partial(parse_fn_3d, training=is_training, multi_class=multi_class)\n\n dataset = dataset.map(map_func=parser, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n dataset = dataset.batch(batch_size, drop_remainder=True).prefetch(tf.data.experimental.AUTOTUNE)\n\n # optimise dataset performance\n options = tf.data.Options()\n options.experimental_optimization.parallel_batch = True\n options.experimental_optimization.map_fusion = True\n options.experimental_optimization.map_vectorization.enabled = True\n options.experimental_optimization.map_parallelization = True\n dataset = dataset.with_options(options)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n\n if crop_size is not None:\n if is_training:\n resize = \"resize\" in aug\n random_shift = \"shift\" in aug\n parse_crop = partial(apply_valid_random_crop_3d, crop_size=crop_size, depth_crop_size=depth_crop_size, resize=resize, random_shift=random_shift, output_slice=predict_slice)\n dataset = dataset.map(map_func=parse_crop, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n if \"bright\" in aug:\n dataset = dataset.map(apply_random_brightness_3d)\n if \"contrast\" in aug:\n dataset = dataset.map(apply_random_contrast_3d)\n if \"gamma\" in aug:\n dataset = dataset.map(apply_random_gamma_3d)\n if \"flip\" in aug:\n dataset = dataset.map(apply_flip_3d)\n if \"rotate\" in aug:\n dataset = dataset.map(apply_rotate_3d)\n else:\n parse_crop = partial(apply_centre_crop_3d, crop_size=crop_size, depth_crop_size=depth_crop_size, output_slice=predict_slice)\n dataset = dataset.map(map_func=parse_crop, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n dataset = dataset.map(normalise)\n return dataset\n","sub_path":"Segmentation/utils/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":15995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"99801329","text":"# coding: utf-8\n\nfrom datetime import datetime, timedelta\nfrom flask import Flask\nfrom flask import session, request\nfrom flask import render_template, redirect, jsonify\nfrom flask_ldapconn import LDAPConn\nfrom flask_sqlalchemy import SQLAlchemy\nfrom werkzeug.security import gen_salt\nfrom flask_oauthlib.provider import OAuth2Provider\nfrom pprint import pprint\nimport json\nimport ssl\nimport os\n\n# basic configuration\nconfig = {\n 'name': 'cteward-auth',\n 'debug': False,\n 'appconfig': {\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:///db.sqlite',\n },\n 'app': {\n 'host': '0.0.0.0'\n }\n}\n\nif 'CTEWARD_AUTH_CONFIG' in os.environ:\n configfile = os.environ['CTEWARD_AUTH_CONFIG']\nelse:\n configfile = '/etc/cteward/auth.json'\n\nwith open(configfile) as json_file:\n config.update(json.load(json_file))\n\n# additional configuration\nif 'ssl' in config:\n if not 'PREFERRED_URL_SCHEME' in config['appconfig']:\n config['appconfig']['PREFERRED_URL_SCHEME'] = 'https'\n if not 'ciphers' in config['ssl']:\n config['ssl']['ciphers'] = 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH'\n if not 'dh_params' in config['ssl']:\n config['ssl']['dh_params'] = '/etc/cteward/dhparams.pem'\n\napp = Flask(config['name'], template_folder='templates')\napp.debug = config['debug']\napp.secret_key = config['secret']\napp.config.update(config['appconfig'])\n\nif 'ldapconfig' in config:\n app.config.update(config['ldapconfig'])\n if not 'LDAP_VERIFY_SSL' in config['ldapconfig'] or config['ldapconfig']['LDAP_VERIFY_SSL']:\n app.config.update({'LDAP_REQUIRE_CERT': ssl.CERT_REQUIRED})\n ldap = LDAPConn(app)\n\ndb = SQLAlchemy(app)\noauth = OAuth2Provider(app)\n\nif 'ssl' in config:\n ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)\n ssl_context.load_cert_chain(config['ssl']['certfile'],config['ssl']['keyfile'])\n ssl_context.set_ciphers(config['ssl']['ciphers'])\n ssl_context.options |= ssl.OP_CIPHER_SERVER_PREFERENCE\n ssl_context.load_dh_params(config['ssl']['dh_params'])\n # FIXME: missing OCSP stapling\n # FIXME: disable SSL session tickets\nelse:\n ssl_context = None\n\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(40), unique=True)\n\n\nclass Client(db.Model):\n client_id = db.Column(db.String(40), primary_key=True)\n client_secret = db.Column(db.String(55), nullable=False)\n\n user_id = db.Column(db.ForeignKey('user.id'))\n user = db.relationship('User')\n\n _redirect_uris = db.Column(db.Text)\n _default_scopes = db.Column(db.Text)\n\n @property\n def client_type(self):\n return 'public'\n\n @property\n def redirect_uris(self):\n if self._redirect_uris:\n return self._redirect_uris.split()\n return []\n\n @property\n def default_redirect_uri(self):\n return self.redirect_uris[0]\n\n @property\n def default_scopes(self):\n if self._default_scopes:\n return self._default_scopes.split()\n return []\n\n\nclass Grant(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n\n user_id = db.Column(\n db.Integer, db.ForeignKey('user.id', ondelete='CASCADE')\n )\n user = db.relationship('User')\n\n client_id = db.Column(\n db.String(40), db.ForeignKey('client.client_id'),\n nullable=False,\n )\n client = db.relationship('Client')\n\n code = db.Column(db.String(255), index=True, nullable=False)\n\n redirect_uri = db.Column(db.String(255))\n expires = db.Column(db.DateTime)\n\n _scopes = db.Column(db.Text)\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n return self\n\n @property\n def scopes(self):\n if self._scopes:\n return self._scopes.split()\n return []\n\n\nclass Token(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n client_id = db.Column(\n db.String(40), db.ForeignKey('client.client_id'),\n nullable=False,\n )\n client = db.relationship('Client')\n\n user_id = db.Column(\n db.Integer, db.ForeignKey('user.id')\n )\n user = db.relationship('User')\n\n # currently only bearer is supported\n token_type = db.Column(db.String(40))\n\n access_token = db.Column(db.String(255), unique=True)\n refresh_token = db.Column(db.String(255), unique=True)\n expires = db.Column(db.DateTime)\n _scopes = db.Column(db.Text)\n\n @property\n def scopes(self):\n if self._scopes:\n return self._scopes.split()\n return []\n\n\ndef current_user():\n if 'id' in session:\n uid = session['id']\n return User.query.get(uid)\n return None\n\n\n#### TODO: integrated this properly\nusername = 'username'\npassword = 'password'\nattribute = 'uid'\nbasedn = 'ou=crew,dc=c-base,dc=org'\nsearch_filter = ('(memberOf=cn=crew,ou=groups,dc=c-base,dc=org)')\n\nwith app.app_context():\n retval = ldap.authenticate(username, password, attribute, basedn, search_filter)\n if retval:\n print('Welcome %s.' % username)\n else:\n print('Auth failed.')\n####\n\n@app.route('/', methods=('GET', 'POST'))\ndef home():\n if request.method == 'POST':\n username = request.form.get('username')\n user = User.query.filter_by(username=username).first()\n if not user:\n user = User(username=username)\n db.session.add(user)\n db.session.commit()\n session['id'] = user.id\n return redirect('/')\n user = current_user()\n return render_template('home.html', user=user)\n\n\n@app.route('/client')\ndef client():\n user = current_user()\n if not user:\n return redirect('/')\n item = Client(\n client_id=gen_salt(40),\n client_secret=gen_salt(50),\n _redirect_uris=' '.join([\n 'http://localhost:8000/authorized',\n 'http://127.0.0.1:8000/authorized',\n 'http://127.0.1:8000/authorized',\n 'http://127.1:8000/authorized',\n ]),\n _default_scopes='email',\n user_id=user.id,\n )\n db.session.add(item)\n db.session.commit()\n return jsonify(\n client_id=item.client_id,\n client_secret=item.client_secret,\n )\n\n\n@oauth.clientgetter\ndef load_client(client_id):\n return Client.query.filter_by(client_id=client_id).first()\n\n\n@oauth.grantgetter\ndef load_grant(client_id, code):\n return Grant.query.filter_by(client_id=client_id, code=code).first()\n\n\n@oauth.grantsetter\ndef save_grant(client_id, code, request, *args, **kwargs):\n # decide the expires time yourself\n expires = datetime.utcnow() + timedelta(seconds=100)\n grant = Grant(\n client_id=client_id,\n code=code['code'],\n redirect_uri=request.redirect_uri,\n _scopes=' '.join(request.scopes),\n user=current_user(),\n expires=expires\n )\n db.session.add(grant)\n db.session.commit()\n return grant\n\n\n@oauth.tokengetter\ndef load_token(access_token=None, refresh_token=None):\n if access_token:\n return Token.query.filter_by(access_token=access_token).first()\n elif refresh_token:\n return Token.query.filter_by(refresh_token=refresh_token).first()\n\n\n@oauth.tokensetter\ndef save_token(token, request, *args, **kwargs):\n toks = Token.query.filter_by(\n client_id=request.client.client_id,\n user_id=request.user.id\n )\n # make sure that every client has only one token connected to a user\n for t in toks:\n db.session.delete(t)\n\n expires_in = token.pop('expires_in')\n expires = datetime.utcnow() + timedelta(seconds=expires_in)\n\n tok = Token(\n access_token=token['access_token'],\n refresh_token=token['refresh_token'],\n token_type=token['token_type'],\n _scopes=token['scope'],\n expires=expires,\n client_id=request.client.client_id,\n user_id=request.user.id,\n )\n db.session.add(tok)\n db.session.commit()\n return tok\n\n\n@app.route('/oauth/token', methods=['GET', 'POST'])\n@oauth.token_handler\ndef access_token():\n return None\n\n\n@app.route('/oauth/authorize', methods=['GET', 'POST'])\n@oauth.authorize_handler\ndef authorize(*args, **kwargs):\n user = current_user()\n if not user:\n return redirect('/')\n if request.method == 'GET':\n client_id = kwargs.get('client_id')\n client = Client.query.filter_by(client_id=client_id).first()\n kwargs['client'] = client\n kwargs['user'] = user\n return render_template('authorize.html', **kwargs)\n\n confirm = request.form.get('confirm', 'no')\n return confirm == 'yes'\n\n\n@app.route('/api/me')\n@oauth.require_oauth()\ndef me():\n user = request.oauth.user\n return jsonify(username=user.username)\n\ndef add_common_response_headers(response):\n # security headers\n response.headers.add('Strict-Transport-Security','max-age=63072000; includeSubdomains; preload')\n response.headers.add('X-Frame-Options','DENY')\n response.headers.add('X-Content-Type-Options','nosniff')\n return response\n\nif __name__ == '__main__':\n db.create_all()\n app.after_request(add_common_response_headers)\n app.run(ssl_context=ssl_context, **config['app'])\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"192287005","text":"import requests\nimport json\nimport urllib3\nimport re\nfrom collections import Counter\nfrom settings import APP_STATIC\nimport os\nfrom myModule.tagStudy import model\nfrom myModule import counselingdao\n\nwith open(os.path.join(APP_STATIC, 'mobum_tagging_data.json')) as c:\n mobum_tagging_data = json.load(c)\nwith open(os.path.join(APP_STATIC, 'imp_data.json')) as c:\n situList = json.load(c)\n\ndef getMorph(text, type):\n openApiURL = \"http://aiopen.etri.re.kr:8000/WiseNLU\"\n accessKey = \"bf0500d9-cbdf-4a99-b84f-1020c9f829cd\"\n analysisCode = \"morp\"\n text = text\n\n requestJson = {\n \"access_key\": accessKey,\n \"argument\": {\n \"text\": text,\n \"analysis_code\": analysisCode\n }\n }\n\n http = urllib3.PoolManager()\n response = http.request(\n \"POST\",\n openApiURL,\n headers={\"Content-Type\": \"application/json; charset=UTF-8\"},\n body=json.dumps(requestJson)\n )\n dictData = eval(response.data)\n\n sen_dict = dict()\n sentence_noun_list = list()\n sentences = dictData['return_object']['sentence']\n\n morList = []\n for s in sentences:\n morp = s['morp']\n for _ in morp:\n if _['type'][0:3] == 'NNG' or _['type'][0:3] == 'NNP':\n if _['weight'] > 0.05:\n morList.append(_['lemma'])\n if type == 1:\n if _['type'] == 'SL' or _['type'] == 'SN':\n morList.append(_['lemma'])\n\n return morList\n\n# 제목+내용을 가지고 형태소 데이터 만드는 함수\ndef makeMorphList(text):\n morph_list = []\n text_morph = getMorph(text, 1) # 형태소 리스트\n morph_list.append(text_morph) # 형태소 리스트 추가\n\n count = Counter(text_morph)\n keywords = []\n for w, c in count.most_common(15):\n if len(w) > 1:\n temp = {'tag': w, 'count': c}\n keywords.append(temp['tag'])\n morph_list.append(keywords) # 키워드 형태소 리스트 추가\n\n return morph_list\n\ndef spellchecker(text):\n url = \"https://m.search.naver.com/p/csearch/ocontent/util/SpellerProxy\"\n params = {\n '_callback': 'mycallback',\n 'q': text,\n 'where': 'nexearch',\n 'color_blindness': '0'\n }\n\n if len(text) > 500:\n return\n\n response = requests.get(url, params=params).text\n response = response.replace(params['_callback'] + '(', '').replace(');', '')\n response_dict = json.loads(response)\n result = response_dict['message']['result']['notag_html']\n result = re.sub(r'<\\/?.*?>', '', result)\n\n return result\n\n# 태그 생성 함수\ndef getTags(morph_list, mobums, mobumLength):\n mobumDict = dict()\n text_tag = []\n # 해당 중분류의 모범상담이 1개 이상이면\n if mobumLength != 0:\n for mobumIdx, mobum in enumerate(mobums):\n cnt = 0\n # 제목/내용 부분\n for morph in morph_list[0]:\n if morph in mobum:\n cnt += 1\n else:\n continue\n # 키워드 부분\n for morph in morph_list[1]:\n if morph in mobum:\n cnt += 2\n else:\n continue\n mobumDict[mobumIdx] = cnt\n sortedItem = sorted(mobumDict.items(), key=lambda k: k[1], reverse=True)\n tagNum = sortedItem[0][0] # 가장 많은 빈도수를 가진 모범사례 추출\n\n for morph in morph_list[0]:\n if morph in mobums[tagNum] and morph not in text_tag:\n text_tag.append(morph)\n\n if morph in situList and morph not in text_tag:\n text_tag.append(morph)\n # 모범상담이 존재하지 않으면\n else:\n for morph in morph_list[0]:\n if morph in situList and morph not in text_tag:\n text_tag.append(morph)\n\n return text_tag\n\n\n# 태그와 유사한 단어 묶음의 태그 생성\ndef getSimilarTags(text_tag, type):\n similar_tag = []\n exist = []\n for tag in text_tag:\n similar_tag.append(tag)\n exist.append(tag)\n\n try:\n if type == 1:\n tmp = model.wv.most_similar(tag)[0:3]\n else:\n tmp = model.wv.most_similar(tag)[0:2]\n\n for _ in tmp:\n if _[0] not in exist:\n similar_tag.append(_[0])\n\n except KeyError:\n continue\n\n return similar_tag\n\n\n# 해당 해결기준과 각각의 빈도수 추출\n# item = 소분류, itemCode = 중분류\ndef cmpGijun(similar_tag, item, itemcode):\n chk = counselingdao.countGijunBySmall(item)\n\n if chk==():\n chk2 = counselingdao.countGijunByMiddle(itemcode)\n if chk2 == ():\n print('해당 해결기준이 존재하지 않습니다.')\n\n typedict = dict()\n\n # 결과값이 있을때\n if chk != () or chk2 != ():\n if chk == ():\n res = chk2\n else:\n res = chk\n cid = res[0]['category_id']\n cname = res[0]['category_name']\n\n for type in res:\n cnt = 0\n type_tag = getMorph(spellchecker(type['type_1']), 2)\n similar_type_tag = getSimilarTags(type_tag, 2)\n\n for _ in similar_tag:\n if _ in similar_type_tag:\n cnt += 1\n typedict[type['type_1']] = cnt\n\n return typedict, cid, cname\n else:\n return typedict, -1, None\n\n\n# 선택된 해결기준 상세내용 불러오기\ndef getGijun(sortdict, id):\n max = -1\n max2 = 0\n maxlist = []\n gijunlist = []\n print(sortdict, id)\n for _ in sortdict:\n if max < _[1]:\n max = _[1]\n\n for idx,_ in enumerate(sortdict):\n if _[1] == max:\n maxlist.append(_[0])\n elif _[1] != max and _[1] != 0 and idx == 1:\n max2 = _[1]\n maxlist.append(_[0])\n elif _[1] == max2 and idx == 2:\n del(maxlist[-1])\n\n # 빈도수 1순위가 3개 이내일 때만 출력\n if len(maxlist) < 4:\n for _ in maxlist:\n gijunlist.append(counselingdao.getGijunList(_, id))\n else:\n print('해당 해결기준이 존재하지 않습니다.')\n\n print('final 기준리스트 ', gijunlist)\n return gijunlist\n\n\n# 해결기준 뿌려주기\ndef showGijun(gijunlist):\n type1, type2, type3, bigo = [], [], [], []\n ans = ''\n for i, gijun in enumerate(gijunlist):\n for i2, _ in enumerate(gijun):\n # type2가 없으면 type1, std 출력\n if _['type_2'] == '':\n ans += _['type_1'] + ' → ' + _['standard'] + '|'\n type1.append(_['type_1'])\n\n # type2가 있으면\n else:\n if _['type_1'] not in type1:\n type1.append(_['type_1'])\n ans += _['type_1'] + '|'\n\n # type3이 없으면 type2, std 출력\n if _['type_3'] == '':\n ans += '@- ' + _['type_2'] + ' → ' + _['standard'] + '|'\n type2.append(_['type_2'])\n\n # type3가 있으면\n else:\n if _['type_2'] not in type2:\n type2.append(_['type_2'])\n ans += '@- ' + _['type_2'] + '|'\n\n # type4가 없으면 type3, std 출력\n if _['type_4'] == '':\n ans += '#- ' + _['type_3'] + ' → ' + _['standard'] + '|'\n type3.append(_['type_3'])\n\n # type4가 있으면\n else:\n if _['type_3'] not in type3:\n type3.append(_['type_3'])\n ans += '#- ' + _['type_3'] + '|'\n\n ans += '$- ' + _['type_4'] + ' → ' + _['standard'] + '|'\n\n # 비고 출력\n if _['bigo'] != '':\n if _['bigo'] not in bigo:\n bigo.append(_['bigo'])\n\n if len(bigo) >= 2:\n ans += '|' + '비고 : ' + bigo[-2] + '|'\n\n if i == len(gijunlist) - 1 and i2 == len(gijun) - 1:\n ans += '|' + '비고 : ' + bigo[-1]\n\n if i2 == len(gijun) - 1:\n ans += '|'\n\n return ans\n\n\n# 실행함수\ndef getSolution(ques):\n itemcode = ques['mid_cate']\n item = ques['small_cate']\n q = ques['question']\n # question 부분 앞의 양식 제거하기 위함\n if q[0:8] == '문의 내용은 [':\n newQuestion = q.replace(q[:217], '')\n else:\n newQuestion = q\n text = spellchecker(ques['title']) + ' ' + newQuestion\n\n # 태깅을 위한 해당 중분류 모범상담 태그 데이터 불러오기\n mid_id = counselingdao.getMobum(itemcode)\n mobums = mobum_tagging_data[mid_id[0]['id'] - 1]\n mobumLength = len(mobums)\n\n # 형태소 생성\n morph_list = makeMorphList(text)\n\n # 태깅 시작\n text_tag = getTags(morph_list, mobums, mobumLength)\n\n # 해결기준과의 매칭을 위한 태깅 데이터의 유사단어 갖고오기\n similar_tag = getSimilarTags(text_tag, 1)\n\n # 관련 해결기준 불러오기\n typedict, cid, cname = cmpGijun(similar_tag, item, itemcode)\n if cid != -1:\n sortdict = sorted(typedict.items(), key=lambda k: k[1], reverse=True)\n\n # 선택된 해결기준 내용 불러오기\n gijunlist = getGijun(sortdict, cid)\n print('getSolution gijunList', gijunlist)\n if gijunlist != []:\n # 내용 뿌려주기\n solution = showGijun(gijunlist)\n else:\n return text_tag, -1, None\n else:\n return text_tag, -1, None\n\n return text_tag, solution, cname","sub_path":"homepage/myModule/getSolutionController.py","file_name":"getSolutionController.py","file_ext":"py","file_size_in_byte":9737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"293169532","text":"\"\"\"\nHackerRank Question Solution\n\nQuestion Link: https://www.hackerrank.com/challenges/flatland-space-stations/problem\nCategory: Practice/Algorithms/Implementation\nDifficulty: Easy\n\nDate Created: 03.26.2019\nLast Edit: 03.26.2019\n\"\"\"\n\n# !/bin/python3\n\nimport os\n\n\ndef flatland_space_stations(n, c):\n c.sort()\n max_distance = 0\n\n # edge cases:\n max_distance = max(max_distance, c[0] - 0)\n max_distance = max(max_distance, n - 1 - c[-1])\n\n for i in range(1, len(c)):\n max_distance = max(max_distance, (c[i] - c[i - 1]) // 2)\n\n return max_distance\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n nm = input().split()\n\n n = int(nm[0])\n\n m = int(nm[1])\n\n c = list(map(int, input().rstrip().split()))\n\n result = flatland_space_stations(n, c)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","sub_path":"hackerrank/Practice/Problem Solving/Algorithms/Implementation/Easy/flatland_space_stations.py","file_name":"flatland_space_stations.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"462080781","text":"from zope.interface import implementer\nfrom pyramid.security import authenticated_userid\nfrom pyramid.threadlocal import get_current_request\n\nimport ptah\nfrom ptah import config\nfrom ptah.uri import resolve, resolver\nfrom ptah.util import tldata\nfrom ptah.interfaces import IAuthInfo, IAuthentication\n\n\nclass _Superuser(object):\n \"\"\" Default ptah superuser. check_permission always pass with superuser \"\"\"\n\n def __init__(self):\n self.__uri__ = 'ptah-auth:superuser'\n self.login = ''\n self.name = 'Manager'\n\n def __repr__(self):\n return ''\n\n\nSUPERUSER = _Superuser()\nSUPERUSER_URI = 'ptah-auth:superuser'\n\n\n@resolver('ptah-auth')\ndef superuser_resolver(uri):\n \"\"\"System super user\"\"\"\n if uri == SUPERUSER_URI:\n return SUPERUSER\n\n\nAUTH_CHECKER_ID = 'ptah:authchecker'\nAUTH_PROVIDER_ID = 'ptah:authprovider'\nAUTH_SEARCHER_ID = 'ptah:authsearcher'\n\n\ndef auth_checker(checker):\n \"\"\" register authentication checker::\n\n @ptah.auth_checker\n def my_checker(info):\n ...\n\n \"\"\"\n info = config.DirectiveInfo()\n discr = (AUTH_CHECKER_ID, hash(checker))\n intr = config.Introspectable(\n AUTH_CHECKER_ID, discr, checker.__name__, AUTH_CHECKER_ID)\n intr['name'] = '{0}.{1}'.format(info.codeinfo.module, checker.__name__)\n intr['callable'] = checker\n intr['codeinfo'] = info.codeinfo\n\n info = config.DirectiveInfo()\n info.attach(\n config.Action(\n lambda config, checker: config.get_cfg_storage(AUTH_CHECKER_ID)\\\n .update({id(checker): checker}),\n (checker,), discriminator=discr, introspectables=(intr,))\n )\n return checker\n\n\ndef pyramid_auth_checker(config, checker):\n \"\"\" pyramid configurator directive for authentication checker registration::\n\n config = Configurator()\n config.include('ptah')\n\n def my_checker(info):\n ...\n\n config.ptah_auth_checker(my_checker)\n \"\"\"\n discr = (AUTH_CHECKER_ID, hash(checker))\n intr = ptah.config.Introspectable(AUTH_CHECKER_ID, discr, '', AUTH_CHECKER_ID)\n intr['callable'] = checker\n\n config.action(\n discr,\n lambda config, checker: config.get_cfg_storage(AUTH_CHECKER_ID)\\\n .update({id(checker): checker}),\n (config, checker), introspectables=(intr,))\n\n\ndef auth_provider(name):\n \"\"\" decorator for authentication provider registration::\n\n @ptah.auth_provider('my-provider')\n class AuthProvider(object):\n ...\n \"\"\"\n info = config.DirectiveInfo()\n\n def wrapper(cls):\n discr = (AUTH_PROVIDER_ID, name)\n intr = config.Introspectable(\n AUTH_PROVIDER_ID, discr, name, AUTH_PROVIDER_ID)\n intr['id'] = name\n intr['name'] = '{0}.{1}'.format(info.codeinfo.module, cls.__name__)\n intr['provider'] = cls\n intr['codeinfo'] = info.codeinfo\n\n info.attach(\n config.Action(\n lambda config, n, p: config.get_cfg_storage(AUTH_PROVIDER_ID)\\\n .update({n: cls()}),\n (name, cls), discriminator=discr, introspectables=(intr,))\n )\n return cls\n\n return wrapper\n\n\ndef register_auth_provider(name, provider):\n \"\"\" authentication provider registration::\n\n class AuthProvider(object):\n ...\n\n ptah.register_auth_provider('my-provider', AuthProvider())\n \"\"\"\n info = config.DirectiveInfo()\n discr = (AUTH_PROVIDER_ID, name)\n intr = config.Introspectable(\n AUTH_PROVIDER_ID, discr, name, AUTH_PROVIDER_ID)\n intr['id'] = name\n intr['name'] = '{0}.{1}'.format(\n info.codeinfo.module, provider.__class__.__name__)\n intr['provider'] = provider\n intr['codeinfo'] = info.codeinfo\n\n info.attach(\n config.Action(\n lambda config, n, p: config.get_cfg_storage(AUTH_PROVIDER_ID)\\\n .update({n: p}),\n (name, provider), discriminator=discr, introspectables=(intr,))\n )\n\n\ndef pyramid_auth_provider(config, name, provider):\n \"\"\" pyramid configurator directive for\n authentication provider registration::\n\n class AuthProvider(object):\n ...\n\n config = Configurator()\n config.include('ptah')\n config.ptah_auth_provider('my-provider', AuthProvider())\n \"\"\"\n info = ptah.config.DirectiveInfo()\n discr = (AUTH_PROVIDER_ID, name)\n intr = ptah.config.Introspectable(\n AUTH_PROVIDER_ID, discr, name, AUTH_PROVIDER_ID)\n intr['id'] = name\n intr['name'] = '{0}.{1}'.format(\n info.codeinfo.module, provider.__class__.__name__)\n intr['provider'] = provider\n intr['codeinfo'] = info.codeinfo\n\n config.action(\n discr,\n lambda config, n, p: \\\n config.get_cfg_storage(AUTH_PROVIDER_ID).update({n: p}),\n (config, name, provider), introspectables=(intr,))\n\n\n@implementer(IAuthInfo)\nclass AuthInfo(object):\n \"\"\" Authentication information \"\"\"\n\n def __init__(self, principal, status=False, message=''):\n self.__uri__ = getattr(principal, '__uri__', None)\n self.principal = principal\n self.status = status\n self.message = message\n self.arguments = {}\n\n\n_not_set = object()\n\nUSER_KEY = '__ptah_userid__'\nEFFECTIVE_USER_KEY = '__ptah_effective__userid__'\n\n\n@implementer(IAuthentication)\nclass Authentication(object):\n \"\"\" Ptah authentication utility \"\"\"\n\n def authenticate(self, credentials):\n providers = config.get_cfg_storage(AUTH_PROVIDER_ID)\n for pname, provider in providers.items():\n principal = provider.authenticate(credentials)\n if principal is not None:\n info = AuthInfo(principal)\n\n for checker in \\\n config.get_cfg_storage(AUTH_CHECKER_ID).values():\n if not checker(info):\n return info\n\n info.status = True\n return info\n\n return AuthInfo(None)\n\n def authenticate_principal(self, principal):\n info = AuthInfo(principal)\n\n for checker in \\\n config.get_cfg_storage(AUTH_CHECKER_ID).values():\n if not checker(info):\n return info\n\n info.status = True\n return info\n\n def set_userid(self, uri):\n tldata.set(USER_KEY, uri)\n\n def get_userid(self):\n uri = tldata.get(USER_KEY, _not_set)\n if uri is _not_set:\n self.set_userid(authenticated_userid(get_current_request()))\n return tldata.get(USER_KEY)\n return uri\n\n def set_effective_userid(self, uri):\n tldata.set(EFFECTIVE_USER_KEY, uri)\n\n def get_effective_userid(self):\n uri = tldata.get(EFFECTIVE_USER_KEY, _not_set)\n if uri is _not_set:\n return self.get_userid()\n return uri\n\n def get_current_principal(self):\n return resolve(self.get_userid())\n\n def get_principal_bylogin(self, login):\n providers = config.get_cfg_storage(AUTH_PROVIDER_ID)\n\n for pname, provider in providers.items():\n principal = provider.get_principal_bylogin(login)\n if principal is not None:\n return principal\n\nauth_service = Authentication()\n\n\ndef search_principals(term):\n \"\"\" Search principals by term, it uses principal_searcher functions \"\"\"\n searchers = config.get_cfg_storage(AUTH_SEARCHER_ID)\n for name, searcher in searchers.items():\n for principal in searcher(term):\n yield principal\n\n\ndef register_principal_searcher(name, searcher):\n \"\"\" register principal searcher \"\"\"\n discr = (AUTH_SEARCHER_ID, name)\n intr = config.Introspectable(AUTH_SEARCHER_ID, discr, name, AUTH_SEARCHER_ID)\n intr['name'] = name\n intr['callable'] = searcher\n\n info = config.DirectiveInfo()\n info.attach(\n config.Action(\n lambda config, name, searcher:\n config.get_cfg_storage(AUTH_SEARCHER_ID).update({name:searcher}),\n (name, searcher), discriminator=discr, introspectables=(intr,))\n )\n\n\ndef pyramid_principal_searcher(config, name, searcher):\n \"\"\" pyramid configurator directive for principal searcher registration \"\"\"\n discr = (AUTH_SEARCHER_ID, name)\n intr = ptah.config.Introspectable(\n AUTH_SEARCHER_ID, discr, name, AUTH_SEARCHER_ID)\n intr['name'] = name\n intr['callable'] = searcher\n\n config.action(\n (AUTH_SEARCHER_ID, name),\n lambda config, name, searcher:\n config.get_cfg_storage(AUTH_SEARCHER_ID).update({name:searcher}),\n (config, name, searcher), introspectables=(intr,))\n\n\ndef principal_searcher(name):\n \"\"\" decorator for principal searcher registration::\n\n @ptah.principal_searcher('test')\n def searcher(term):\n ...\n\n searcher function recives text as term variable, and\n should return iterator to principal objects.\n \"\"\"\n info = config.DirectiveInfo()\n\n def wrapper(searcher):\n discr = (AUTH_SEARCHER_ID, name)\n intr = config.Introspectable(\n AUTH_SEARCHER_ID, discr, name, AUTH_SEARCHER_ID)\n intr['name'] = name\n intr['callable'] = searcher\n\n info.attach(\n config.Action(\n lambda config, name, searcher:\n config.get_cfg_storage(AUTH_SEARCHER_ID)\\\n .update({name: searcher}),\n (name, searcher), discriminator=discr, introspectables=(intr,))\n )\n\n return searcher\n\n return wrapper\n","sub_path":"ptah/authentication.py","file_name":"authentication.py","file_ext":"py","file_size_in_byte":9536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"601172141","text":"\"\"\"\nCode to calculate the ODR fit of the data\n\"\"\"\nimport numpy as np\nnmax=20\nnmax_one=400\nnmax_two=2000\ndim_zero=3\ndim_one=4\ndim=20\ndim_two=21\ndim_three=420\nscale=22.0\nx = np.ndarray(shape=(dim_one, nmax_one), dtype=float, order='F')\nmat = np.ndarray(shape=(dim, dim_two), dtype=float, order='F') # Values of dim by dim_two\nvec = np.ndarray(shape=dim, dtype=float, order='F') # Size of dim\n# coef(DIM1,DIM1)\ncoef = np.ndarray(shape=(dim_one, dim_one), dtype=float, order='F')\n# covmat(DIM0,DIM,DIM2), scoef(DIM1,DIM1)\ncovmat = np.ndarray(shape=(dim_zero, dim, dim_two), dtype=float, order='F')\nscoef = np.ndarray(shape=(dim_one, dim_one), dtype=float, order='F')\n# var(DIM1)\nvar = np.ndarray(shape=dim_one, dtype=float, order='F')\n# work(DIM3), res(NMAX1)\nwork = np.ndarray(shape=(dim_three), dtype=float, order='F')\nres = np.ndarray(shape=(nmax_one), dtype=float, order='F')\n# real cond\ncond = 0.0\n\n# integer i, errcode, norder, info\ni = 0\nerrcode = 0\nnorder = 0\ninfo = 0\n# integer nclus, nlda, ntotal, nfit\nnclus = 0\nnlda = 0\nntotal = 0\nnfit = 0\n# logical vflag\nvlfag = False\n\n\n########################### fitplan.f converted code #########################\n\"\"\"c Get the dimension and flag for output. Read data from STDIN\n read(*,*) norder, vflag\n if(norder .eq. 2 .or. norder .eq. 3) then\n do 30 i=1,NMAX1,1\n read(*,*,err=940,end=40) (x(j,i), j=1,norder)\nc Find the column with expected mu in it and transform\n if(norder .eq. 3 .and. i .eq. 1) then\n if( x(1,i) .gt. x(2,i) .and. x(1,i) .gt. x(3,i) ) then\n ncol=1\n else if( x(2,i) .gt. x(1,i) .and. x(2,i) .gt. x(3,i) ) then\n ncol=2\n else if( x(3,i) .gt. x(1,i) .and. x(3,i) .gt. x(2,i) ) then\n ncol=3\n endif\n endif\n if(norder .eq. 3) then\n x(ncol,i) = x(ncol,i)-SCALE\n endif\n 30 continue\n else\n write(*,*) \"Wrong dimension used, must be 2 or 3\"\n stop\n endif\n\n 40 continue\n\n if(vflag) then\n write(*,*) \"==> End of reading <==\"\n endif\n write(*,*) \"ncol=\",ncol\n\n ntotal = i-1\n\"\"\"\n# Get dimensions and flags for output\n\n\n# Initialize x(norder+1,j) = 1\nfor j in range(1, ntotal):\n x[norder+1, j] = 1.0\n\n# initialize coefficients\nfor i in range(1, norder):\n for j in range(1, norder):\n coef[i,j] = 0.0\n coef[i, norder+1] = -1.0\n","sub_path":"fitplan.py","file_name":"fitplan.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"239173804","text":"from scipy.io import wavfile\r\nimport winsound\r\n\r\ndef freq(file, start_time, end_time):\r\n sample_rate, data = wavfile.read(file)\r\n start_point = int(sample_rate * start_time / 1000)\r\n end_point = int(sample_rate * end_time / 1000)\r\n length = (end_time - start_time) / 1000\r\n counter = 0\r\n for i in range(start_point, end_point):\r\n if data[i] < 0 and data[i+1] > 0:\r\n counter += 1\r\n return [counter/length, counter, length] \r\n# def clip(n, min_, max_):\r\n# return max(min_, min(n, max_))\r\n# def freq(f, tempo):\r\n# sr, data = wavfile.read(f)\r\n# dt = []\r\n# for i in range(len(data)):\r\n# dt.append(clip(data[i], -1000, 1000))\r\n# tempo = int((60 / tempo) * sr)\r\n# for i in range(int(len(data) / tempo)):\r\n# print(i)\r\n# count = 0\r\n# dmod = dt[i:i+tempo]\r\n# for d in dmod:\r\n# h = -10000\r\n# l = 10000\r\n# if d > h:\r\n# h = d\r\n# if d < l:\r\n# l = d\r\n\r\n# print(h, l, d)\r\n# for d in dmod:\r\n# if d == h:\r\n# count += 1\r\n# print(count)\r\n\r\n\r\n \r\n \r\n# freq('Python/scale.wav', 120)\r\nprint(freq('scale.wav', 0, 499), freq('scale.wav', 501, 999))","sub_path":"Python/audioAnalyzer.py","file_name":"audioAnalyzer.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"232989963","text":"# This script makes some plots \nfrom scipy.interpolate import griddata\nimport scipy.interpolate\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nnew_GM4_gaspos = np.loadtxt('GM4_GM1matchedgas_posxyz.txt')\nnew_GM4_gastemp = np.loadtxt('GM4_GM1matchedgas_temp.txt')\nnew_GM4_gasmetal = np.loadtxt('GM4_GM1matchedgas_metal.txt')\n\nnew_GM1_gaspos = np.loadtxt('GM1matchedgas_posxyz.txt')\nnew_GM1_gastemp = np.loadtxt('GM1matchedgas_temp.txt')\nnew_GM1_gasmetal = np.loadtxt('GM1matchedgas_metal.txt')\n\n\nfig = plt.figure(figsize=(15, 5))\nax1 = fig.add_subplot(121) \nax2 = fig.add_subplot(122) \nax1.hist(new_GM4_gasmetal,bins=50,range=(-0.1,0.1))\nax2.hist(new_GM1_gasmetal,bins=50,range=(-0.1,0.1))\nplt.show()\n\nprint('Min gas metallicity',min(new_GM4_gasmetal),'Max gas metallicity',max(new_GM4_gasmetal))\nprint('# of Gas particles w/ metalfrac < -0.1',len(new_GM4_gasmetal[new_GM4_gasmetal < -0.025]))\nprint('# of Gas particles w/ metalfrac > 0.1',len(new_GM4_gasmetal[new_GM4_gasmetal > 0.025]))\nprint('Mean Z',np.mean(new_GM4_gasmetal))\n\nplt.scatter(new_GM4_gaspos[:,0],new_GM4_gaspos[:,1],c=new_GM4_gasmetal,s=10,cmap=plt.cm.get_cmap('jet'),alpha=0.5,vmin=-0.025,vmax=0.025)\nplt.title('GM4 Gas Particles (Matched with GM1 Gas)')\nplt.xlabel('X [kpc]')\nplt.ylabel('Y [kpc]')\nplt.ylim(-2000,1200)\nplt.xlim(-1900,1800)\ncbar = plt.colorbar()\ncbar.ax.set_ylabel('Metal Mass Fraction')\nplt.savefig('GM4_xy_Z.pdf')\nplt.show()\n\nx = new_GM4_gaspos[:,0]\ny = new_GM4_gaspos[:,1]\nz = np.log10(new_GM4_gasmetal)\n\nxi, yi = np.linspace(x.min(), x.max(), 100), np.linspace(y.min(), y.max(), 100)\nzi = griddata((x, y), z, (xi[None,:], yi[:,None]), method='cubic')\n\nplt.contourf(xi,yi,zi,15,cmap=plt.cm.jet,alpha=0.5,vmin=4,vmax=6)\nplt.xlabel('X [kpc]')\nplt.ylabel('Y [kpc]')\nplt.ylim(-2000,1200)\nplt.xlim(-1900,1800)\ncbar = plt.colorbar()\ncbar.ax.set_ylabel('Metal Mass Fration')\nplt.title('GM4 Gas Particles (Matched with GM1 Gas)')\nplt.savefig('GM4_xy_Zcon.pdf')\nplt.show()\n\n#################\n##### GM 1 ######\n#################\n\nprint('Min gas metallicity',min(new_GM1_gasmetal),'Max gas metallicity',max(new_GM1_gasmetal))\nprint('# of Gas particles w/ Z < -0.25',len(new_GM1_gasmetal[new_GM1_gasmetal < -0.025]))\nprint('# of Gas particles w/ Z > 0.25',len(new_GM1_gasmetal[new_GM1_gasmetal > 0.025]))\nprint('Mean Z',np.mean(new_GM1_gasmetal))\n\nplt.scatter(new_GM1_gaspos[:,0],new_GM1_gaspos[:,1],c=new_GM1_gasmetal,s=10,cmap=plt.cm.get_cmap('jet'),alpha=0.5,vmin=-0.025,vmax=0.025)\nplt.title('GM1 Gas Particles (Matched with GM4 Gas)')\nplt.xlabel('X [kpc]')\nplt.ylabel('Y [kpc]')\nplt.ylim(-2000,1200)\nplt.xlim(-1900,1800)\ncbar = plt.colorbar()\ncbar.ax.set_ylabel('Metal Mass Fraction')\nplt.savefig('GM1_xy_Z.pdf')\nplt.show()\n\nx = new_GM1_gaspos[:,0]\ny = new_GM1_gaspos[:,1]\nz = np.log10(new_GM1_gasmetal)\n\nxi, yi = np.linspace(x.min(), x.max(), 100), np.linspace(y.min(), y.max(), 100)\nzi = griddata((x, y), z, (xi[None,:], yi[:,None]), method='cubic')\n\nplt.contourf(xi,yi,zi,15,cmap=plt.cm.jet,alpha=0.5,vmin=4,vmax=6)\nplt.xlabel('X [kpc]')\nplt.ylabel('Y [kpc]')\nplt.ylim(-2000,1200)\nplt.xlim(-1900,1800)\ncbar = plt.colorbar()\ncbar.ax.set_ylabel('Metal Mass Fraction')\nplt.title('GM1 Gas Particles (Matched with GM4 Gas)')\nplt.savefig('GM1_xy_Zcon.pdf')\nplt.show()\n","sub_path":"compareP0GM1GM4/compareGM1GM4/makeGM1h1gasinGM4plot_metals.py","file_name":"makeGM1h1gasinGM4plot_metals.py","file_ext":"py","file_size_in_byte":3351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"378526197","text":"import sys\r\nimport os\r\nimport sqlite3\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\n\r\n \r\nclass show:\r\n def __init__(self,root):\r\n self.f=Frame(root,height=600, width=830)\r\n self.f.propagate(0)\r\n self.f.pack()\r\n\r\n self.l=Label(text=\"ALL QUERIES\",font='Helvetica 18 bold')\r\n self.l.place(x=340, y=10)\r\n \r\n #Going back button\r\n self.btn = Button(root, text = 'Back', bd = '5', command = self.returntobvp) \r\n self.btn.pack(side = 'top')\r\n \r\n conn = sqlite3 . connect ( 'database.db' )\r\n cursor = conn.cursor ()\r\n\r\n \r\n cursor=conn.execute(\"SELECT q_id, query, Answer from Query\")\r\n \r\n self.tree = ttk.Treeview(self.f, columns = (1,2,3), height = 20, selectmode=\"extended\", show = \"headings\")\r\n self.tree.place(x=30,y=80)\r\n\r\n self.tree.heading(1, text=\"Query ID\")\r\n self.tree.heading(2, text=\"Query\")\r\n self.tree.heading(3, text=\"Answer\")\r\n\r\n self.tree.column(1, width = 55)\r\n self.tree.column(2, width = 350)\r\n self.tree.column(3, width = 350)\r\n\r\n scroll = ttk.Scrollbar(self.f, orient=\"vertical\", command=self.tree.yview)\r\n scroll.pack(side = 'right', fill = 'y')\r\n\r\n self.tree.configure(yscrollcommand=scroll.set)\r\n\r\n for val in cursor:\r\n self.tree.insert('', 'end', values = (val[0],val[1], val[2]) )\r\n def returntobvp(self):\r\n root.destroy()\r\n os.system('python button_vala_page.py')\r\n\r\nroot=Tk()\r\nroot.title(\"VIEW/BROWSE\")\r\n\r\nbt=show(root)\r\n\r\nroot.mainloop()\r\n","sub_path":"Display_Page.py","file_name":"Display_Page.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"156742224","text":"class Stats:\n def __init__(self, player, strength=1, dexterity=1, vitality=1, hearing=0, observation=0, damage=1, armor=0):\n self.myCharacter = player\n self.str = strength\n self.dex = dexterity\n self.vit = vitality\n self.hearing = hearing\n self.observation = observation\n self.damage = damage\n self.armor = armor\n\n\nclass Attack:\n def __init__(self, attack):\n self.attack = attack\n\n\nclass Defence:\n def __init__(self, defence):\n self.defence = defence\n","sub_path":"MUD/Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"335081046","text":"from files.config import * \r\nfrom files.superuser import *\r\nuser=superusers()\r\nclass student:\r\n def createstudent(self):\r\n username=input(\"enter the name :\")\r\n studentdetails.append(username)\r\n def checkstudent(self,name):\r\n if name in studentdetails:\r\n return True\r\n else:\r\n return False\r\n def startexam(self):\r\n c=1\r\n for i,j in questions.items():\r\n print(\"question \",c,end=\":\")\r\n print(i)\r\n for k in range(0,4):\r\n print(\"option \",k+1,\":\",j[k])\r\n ans=int(input(\"enter the answer : \"))\r\n answers.append(ans) \r\n def result(self):\r\n result=user.evaluate()\r\n print(\"******* results ******* \")\r\n print(\"total marks : \",result,\"/\",len(questions))\r\n print(\"percentage :\",(result/len(questions)*100.0))\r\n print(\"************************\")\r\n","sub_path":"python/quiz/files/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"653795365","text":"from sklearn.metrics import confusion_matrix\nfrom sklearn.utils.multiclass import unique_labels\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef confusionMatrix(y_true, y_pred, classes, normalize=False, title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n ax.set(xticks=np.arange(cm.shape[0]),\n yticks=np.arange(cm.shape[1]),\n xticklabels=classes, yticklabels=classes,\n title=title,\n xlabel='True label',\n ylabel='Predicted label')\n\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax\n","sub_path":"sentimentAnalysis/naiveBayesConfusionMatrix_Sklearn.py","file_name":"naiveBayesConfusionMatrix_Sklearn.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"526461853","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n成交量策略\nThis is a temporary script file.\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport time\nimport talib\nimport math\nimport inspect\n# MONGODB CONNECT\nfrom pymongo import MongoClient\nimport json\nclient = MongoClient('mongodb://127.0.0.1:27017')\n#client = MongoClient('mongodb://112.12.60.2:27017')\nmydb=client[\"ptest\"]\nmycollection=mydb[\"stocks_daily\"]\nimport tushare as ts\nts.set_token('003137d7baa1439f01f9d2917992de6b8511f70f84612c78574d6996')\npro = ts.pro_api()\n\ndef get__function_name():\n '''获取正在运行函数(或方法)名称'''\n return inspect.stack()[1][3]\n#获取col数据库函数 参数 col 返回df\ndef get_col_df(col):\n mycollection=mydb[col]\n rs_stockcode = mycollection.find()\n list_stockcode = list(rs_stockcode)\n #将查询结果转换为Df\n df_stockcode = pd.DataFrame(list_stockcode)\n #print (df_stockcode)\n return df_stockcode\n#获取stockcode条件集合函数 参数 stockcode 返回df\ndef get_df_stockcode(col):\n mycollection=mydb[col]\n rs_stockcode = mycollection.find()\n list_stockcode = list(rs_stockcode)\n #将查询结果转换为Df\n df_stockcode = pd.DataFrame(list_stockcode)\n #print (df_stockcode)\n return df_stockcode\n#计算当前日期的前N天的时间戳\ndef get_day_time(n):\n the_date = datetime.datetime.now()\n pre_date = the_date - datetime.timedelta(days=n)\n pre_date_str = pre_date.strftime('%Y%m%d')#将日期转换为指定的显示格式\n return pre_date_str\n#TRADEDATE\ndef get_lasttradedate(n):\n df = pro.trade_cal(exchange='SSE', is_open='1',fileds='cal_date',start_date=get_day_time(0), end_date=get_day_time(n))\n lasttradeday = df['cal_date'].tail(1).iloc[0]\n return lasttradeday\n\n#TRADEDATE LIST\ndef get_lasttradedatelist(n,days):\n df = pro.trade_cal(exchange='SSE', is_open='1',fileds='cal_date',start_date=get_day_time(n+days), end_date=get_day_time(n))\n lasttradeday_list = df['cal_date'].tolist()\n return lasttradeday_list\n\n#TUSHARE GET STOCK_DAILY BY CODE\ndef get_daily_code(stockcode):\n df = pro.daily(ts_code=stockcode)\n return df\n\n#TUSHARE GET STOCK_DAILY BY tradedate\ndef get_daily_tradedate(tradedate):\n df = pro.daily(trade_date=tradedate)\n return df\n\n#计算MA函数\ndef cal_ma(df,nday):\n temp_serise = df['close'].rolling(nday).mean()\n temp_serise.dropna(inplace=True)\n ma_serise = temp_serise.reset_index(drop=True)\n return ma_serise\ndef cal_ma_ta(df,n):\n temp_serise = talib.MA(df['close'],timeperiod=n)\n temp_serise.dropna(inplace=True)\n ma_serise = temp_serise.reset_index(drop=True)\n return ma_serise\n\n# VOL MA\ndef cal_ma_ta_vol(df,n):\n temp_serise = talib.MA(df['vol'],timeperiod=n)\n temp_serise.dropna(inplace=True)\n ma_serise = temp_serise.reset_index(drop=True)\n return ma_serise\n\n# param MA\ndef cal_ma_ta_param(df,param,n):\n temp_serise = talib.MA(df[param],timeperiod=n)\n temp_serise.dropna(inplace=True)\n ma_serise = temp_serise.reset_index(drop=True)\n return ma_serise\n#MA\ndef get_ma_n(stockcode):\n rs_stockcode = mycollection.find({'ts_code':stockcode})\n list_stockcode = list(rs_stockcode)\n list_stockcode.reverse()\n #将查询结果转换为Df\n df_stockcode = pd.DataFrame(list_stockcode)\n if (df_stockcode.empty):\n print (stockcode)\n else:\n df_stockcode['MA5'] = cal_ma_ta(df_stockcode,5)\n df_stockcode['MA10'] = cal_ma_ta(df_stockcode,10)\n df_stockcode['MA20'] = cal_ma_ta(df_stockcode,20)\n df_stockcode['MA30'] = cal_ma_ta(df_stockcode,30)\n return df_stockcode\n\n#GET ALL DAILY\ndef get_daily_all():\n mycollection=mydb[\"stocks_daily_qfq\"]\n rs_stockcode = mycollection.find()\n list_stockcode = list(rs_stockcode)\n list_stockcode.reverse()\n #将查询结果转换为Df\n df_stockcode = pd.DataFrame(list_stockcode)\n return df_stockcode\n\n#GET DAILYBASIC\ndef get_dailybasic():\n mycollection=mydb[\"dailybasic_last\"]\n rs_dailybasic = mycollection.find()\n list_dailybasic = list(rs_dailybasic)\n #将查询结果转换为Df\n df_dailybasic = pd.DataFrame(list_dailybasic)\n return df_dailybasic\n\n#GET ALL WEEKLY\ndef get_weekly_all():\n mycollection=mydb[\"stocks_weekly_qfq\"]\n rs_stockcode = mycollection.find()\n list_stockcode = list(rs_stockcode)\n list_stockcode.reverse()\n #将查询结果转换为Df\n df_stockcode = pd.DataFrame(list_stockcode)\n return df_stockcode\n#粘合算法,返回粘合系数列表\ndef cal_stick(df,param1,param2,n):\n stick_list = []\n i = 0\n while in_avg):\n roundlowflag = True \n return roundlowflag\n\n#圆底算法2,连续3日增加,返回True/False\ndef cal_round_low2(df,param1,n,step):\n roundlowflag = False\n #获取n日最高点\n n_max = max(df[param1][1:n])\n n_naddstep = df.at[n+step,param1]\n n_0 = df.at[0,param1]\n n_1 = df.at[1,param1]\n n_2 = df.at[2,param1]\n #判断n日未超过前期高点\n if (n_max < n_naddstep):\n #判断最近交易日高于前期平均值\n if(n_0>n_1 and n_1>n_2):\n roundlowflag = True \n return roundlowflag\n \n#交易日成交量圆弧底形态策略\ndef get_daily_vol_round_low(df,n):\n df_dailybasic_last = get_dailybasic()\n result_list = []\n df_ma = get_daily_all()\n df_ma_gb_stockcode = df_ma.groupby('ts_code')\n for name,group in df_ma_gb_stockcode:\n df_group=pd.DataFrame(group)\n df_group = df_group.sort_values(by=\"trade_date\",ascending=False)\n df_group.reset_index(drop=True, inplace=True)\n if (df_group.empty or len(df_group)max(df_group['MA30'][1:3]) and df_group['MA20'][0]>max(df_group['MA20'][1:3])):\n resultflag_vol = cal_round_low2(df_group,'vol',n,1)\n resultflag_close = cal_round_low2(df_group,'close',n,1)\n if (resultflag_vol and resultflag_close):\n print (name,df_group['trade_date'][0],df_group['close'][0],df_group['vol'][0])\n result_list.append(name)\n df_result = df_dailybasic_last[df_dailybasic_last.ts_code.isin(result_list)]\n result = pd.merge(df, df_result, how='right', on=['ts_code'])\n print (len(result_list))\n return result\n#market 主板 科创板 主板 中小板 创业板\ndef get_stockbasket(exchange,market):\n data = pro.stock_basic(exchange=exchange, list_status='L')\n data = data[~ data['name'].str.contains('ST|退')]\n if (market!=''):\n data = data[data['market']==market]\n data['ts_name'] = data['name']\n return data\n\ndef get_stockbasket_nochuang(exchange):\n data = pro.stock_basic(exchange=exchange, list_status='L')\n data = data[~ data['name'].str.contains('ST|退')]\n data = data[~ data['market'].str.contains('科创板|创业板')]\n data['ts_name'] = data['name']\n return data\n\n#n日成交量最低位阶段性低点策略\ndef get_stocks_daily_n_vol_low(n):\n FNAME = get__function_name()\n TIMENOW = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n print ('START',FNAME,TIMENOW)\n result_df=pd.DataFrame()\n df_stocks = get_stockbasket('','')\n for stockcode in df_stocks['ts_code']:\n result_dict={}\n #获取前复权数据,默认为日期降序\n df_qfq = get_df_stockcode('daily_qfq_macd_'+stockcode)\n if (df_qfq is None or len(df_qfq)df_qfq['close'][1]>close_min_n_1):\n print (stockcode,df_qfq['trade_date'][0])\n result_dict['ts_code'] = stockcode\n result_dict['trigger_trade_date'] = df_qfq['trade_date'][0]\n result_dict['trigger_close'] = df_qfq['close'][0]\n result_dict['reason'] = FNAME+str(n)\n result_df = result_df.append(result_dict,ignore_index=True)\n TIMENOW = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n print ('END',FNAME,len(result_df),TIMENOW)\n return result_df \n#get_stocks_daily_n_close_low_vol_close_up(40)\n\n#最近N交易日天量天价高换手策略\ndef get_stocks_daily_n_turnover_up_vol_close_up(n):\n FNAME = get__function_name()\n TIMENOW = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n print ('START',FNAME,TIMENOW)\n result_df=pd.DataFrame() \n df_stocks = get_stockbasket('','')\n for stockcode in df_stocks['ts_code']:\n result_dict={}\n #获取前复权数据,默认为日期降序\n df_qfq = get_df_stockcode('daily_qfq_macd_'+stockcode)\n if (df_qfq is None or len(df_qfq)20 and chg_pct_n>3):\n print (stockcode,df_qfq['trade_date'][0],turnover_n,chg_pct_n)\n result_dict['ts_code'] = stockcode\n result_dict['trigger_trade_date'] = df_qfq['trade_date'][0]\n result_dict['trigger_close'] = df_qfq['close'][0]\n result_dict['reason'] = FNAME+str(n)\n result_df = result_df.append(result_dict,ignore_index=True)\n TIMENOW = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n print ('END',FNAME,len(result_df),TIMENOW)\n return result_df \n#get_stocks_daily_n_turnover_up_vol_close_up(10)\n\n#n日成交量最低位阶段性低点策略\ndef get_stocks_daily_n_vol_close_low(sn,vn,cn):\n FNAME = get__function_name()\n TIMENOW = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n #print ('START',FNAME,TIMENOW)\n result_df=pd.DataFrame()\n df_stocks = get_stockbasket('','')\n for stockcode in df_stocks['ts_code']:\n result_dict={}\n #获取前复权数据,默认为日期降序\n df_qfq = get_df_stockcode('daily_qfq_macd_'+stockcode)\n if (df_qfq is None or len(df_qfq)0])/len(df),2) \n print ('涨跌统计:',close_up_ratio,' 涨跌幅统计:',round(df['close_diff_ratio'].mean(),3),round(df['close_diff_ratio'].max(),3),round(df['close_diff_ratio'].min(),3))\n i+=1\n#testdemo(30,20,80)\n#testdemo(30,30,80)\n#testdemo(30,40,80)\n#testdemo(30,50,80)\n#testdemo(30,60,80)\n#testdemo(30,70,80)\n#testdemo(30,80,80)\n\n#n周成交量最低位阶段性低点策略\ndef get_stocks_weekly_n_vol_close_low(n):\n FNAME = get__function_name()\n TIMENOW = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n print ('START',FNAME,TIMENOW)\n result_df=pd.DataFrame()\n df_stocks = get_stockbasket('','')\n for stockcode in df_stocks['ts_code']:\n result_dict={}\n #获取前复权数据,默认为日期降序\n df_qfq = get_df_stockcode('weekly_qfq_macd_'+stockcode)\n if (df_qfq is None or len(df_qfq) max_val:\n max_val = new_val\n max_act = act # this is the hybrid stage of argmax\n self.vals0[each_state] = [max_val, max_act]\n delta = max(delta, abs(temp - self.vals0[each_state][0]))\n if delta < self.theta:\n break\n if debug: print(\"Value based init time\",pre_load, time.process_time()-start_time-pre_load, i)\n print(z)\n self.time = time.process_time() - start_time\n self.loops = i\n \n\n def play(self, state):\n # maybe if we are in the winning state, we can skip evaluating\n return self.vals0[state][1]\n \n###############################################################\n \ndef play_game_with_agent(agent, game, verbose=False):\n state = game.reset()\n \n if(verbose): print(f\"Testing agent: \\n\\t{type(agent).__name__}\")\n if(verbose): print(f\"Starting dice: \\n\\t{state}\\n\")\n start_state = state\n \n game_over = False\n actions = 0\n while not game_over:\n action = agent.play(state) # make a move based on the state\n actions += 1 # count how many moves\n \n if(verbose): print(f\"Action {actions}: \\t{action}\")\n _, state, game_over = game.roll(action)\n if(verbose and not game_over): print(f\"Dice: \\t\\t{state}\")\n\n #if(verbose): print(f\"\\nFinal dice: {state}, score: {game.score}\")\n if(verbose): print(f\"Start state: {start_state}, \\t{type(agent).__name__}, \\tFinal dice: {state}, score: {game.score}\")\n \n return game.score\n\n\ndef main():\n # random seed makes the results deterministic\n # change the number to see different results\n #  or delete the line to make it change each time it is run\n\n n = [1000]\n thetas = [0.001]\n gammas = [1]\n results = []\n penalties = [0.5,0,1]\n for cycle in n:\n for val in penalties:\n for theta in thetas:\n for gamma in gammas:\n np.random.seed(1)\n #game = DiceGame(dice=3, sides=6, bias=[0.1, 0.1, 0.1, 0.5, 0.1, 0.1], penalty = val)\n #game = DiceGame(dice=2, sides=3, values=[1, 2, 6], bias=[0.5, 0.1, 0.4], penalty=2)\n game = DiceGame()\n agent2 = MyAgent(game, theta, gamma)\n init_time = agent2.time\n init_loops = agent2.loops\n\n for i in range(cycle):\n results.append(play_game_with_agent(agent2, game, verbose=False))\n print(\"time:\", init_time, \"\\t\", \"loop:\", init_loops, \"cycle:\",cycle, \"\\t\", \"theta:\", theta, \"\\t\", \"gamma:\", gamma, \"\\t\", \"penalty:\", val, \"\\t\", \"min:\", min(results), \"\\t\", \"max:\", max(results), \"\\t\", \"average\", np.average(results))\n import matplotlib.pyplot as plt\n plt.hist(results, bins = range(-20,30))\n title = \"Value iter Game_results (2 dice)\" + str(theta) + \" - \" + str(gamma) + \" - \" + str(val)\n file = title + \".png\"\n plt.title(title)\n plt.savefig(file)\n plt.clf()\n results = []\n\nif __name__ == \"__main__\":\n main()","sub_path":"old/assignment_v0.1_purevalue_v2.py","file_name":"assignment_v0.1_purevalue_v2.py","file_ext":"py","file_size_in_byte":5365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"396871268","text":"import unittest\nimport os.path\nfrom sqlalchemy import *\nimport pandas as pd\nimport pytest\n\nfrom databass import *\nfrom databass.ops import *\n\n\n\n@pytest.fixture(scope=\"class\")\ndef setup(request):\n \"\"\"\n init and attach sqlite\n \"\"\"\n db = Database.db()\n tablename = \"tdata\"\n\n sqlite = create_engine(\"sqlite://\")\n for tname in db.tablenames:\n if tname in db._df_registry:\n db._df_registry[tname].to_sql(tname, sqlite, index=False)\n\n sqlite.execute(\"DROP TABLE IF EXISTS tdata\")\n df = pd.DataFrame(np.random.randint(0, 100, size=(1000, 4)), columns=list(\"abcd\"))\n db.register_dataframe(tablename, df)\n db._df_registry[tablename].to_sql(tablename, sqlite, index=False)\n\n request.cls.db = db\n request.cls.sqlite = sqlite\n request.cls.opt = Optimizer(db)\n return sqlite\n\n\n@pytest.mark.usefixtures('setup')\nclass TestBase(unittest.TestCase):\n\n def run_sqlite_query(self, qstr):\n res = self.sqlite.execute(qstr)\n sqlite_rows = list()\n for row in res:\n vals = []\n for v in row:\n if isinstance(v, str):\n vals.append(v)\n else:\n vals.append(float(v))\n sqlite_rows.append(vals)\n return sqlite_rows\n\n def run_databass_query(self, qstr):\n plan = parse(qstr)\n plan = plan.to_plan()\n return self.run_plan(plan)\n\n def run_plan(self, plan):\n databass_rows = list()\n plan = self.opt(plan)\n for row in plan:\n vals = []\n for v in row:\n if isinstance(v, str):\n vals.append(v)\n else:\n vals.append(float(v))\n databass_rows.append(vals)\n return databass_rows\n\n\n def run_query(self, qstr, order_matters=False):\n sqlite_rows = self.run_sqlite_query(qstr)\n databass_rows = self.run_databass_query(qstr)\n self.compare_results(\n sqlite_rows, databass_rows, order_matters)\n\n\n def compare_results(self, rows1, rows2, order_matters):\n if order_matters:\n rows1.sort()\n rows2.sort()\n try:\n self.assertEqual(len(rows1), len(rows2))\n for r1, r2 in zip(rows1, rows2):\n self.assertEqual(list(r1), list(r2))\n except Exception as e:\n print(rows1)\n print()\n print(rows2)\n raise e\n\n\n def check_schema(self, schema1, schema2):\n self.assertEqual(len(schema1.attrs), len(schema2.attrs))\n for i, attr in enumerate(schema1.attrs):\n self.assertEqual(attr.aname, schema2.attrs[i].aname)\n self.assertEqual(attr.get_type(), schema2.attrs[i].get_type())\n\n\n","sub_path":"test/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"373212656","text":"import gzip\nimport json\nimport os\nimport re\nimport sys\nimport tarfile\nimport zipfile\n\nimport globus_sdk\nfrom globus_sdk.base import BaseClient\nfrom globus_sdk.response import GlobusHTTPResponse\nfrom tqdm import tqdm\n\nfrom six import print_\n\nAUTH_SCOPES = {\n \"transfer\": \"urn:globus:auth:scope:transfer.api.globus.org:all\",\n \"search\": \"urn:globus:auth:scope:search.api.globus.org:search\",\n \"search_ingest\": \"urn:globus:auth:scope:search.api.globus.org:all\",\n \"mdf\": \"urn:globus:auth:scope:data.materialsdatafacility.org:all\",\n # \"urn:globus:auth:scope:api.materialsdatafacility.org:all\"\n \"publish\": (\"https://auth.globus.org/scopes/\"\n \"ab24b500-37a2-4bad-ab66-d8232c18e6e5/publish_api\")\n # \"urn:globus:auth:scope:publish.api.globus.org:all\"\n}\n\n\n# *************************************************\n# * Authentication utilities\n# *************************************************\n\ndef login(credentials=None, clear_old_tokens=False, **kwargs):\n \"\"\"Login to Globus services\n\n Arguments:\n credentials (str or dict): A string filename, string JSON, or dictionary\n with credential and config information.\n By default, looks in ~/mdf/credentials/globus_login.json.\n Contains:\n app_name (str): Name of script/client. This will form the name of the token cache file.\n services (list of str): Services to authenticate with.\n Services are listed in AUTH_SCOPES.\n client_id (str): The ID of the client, given when registered with Globus.\n Default is the MDF Native Clients ID.\n clear_old_tokens (bool): If True, delete old token file if it exists, forcing user to re-login.\n If False, use existing token file if there is one.\n Default False.\n\n Returns:\n dict: The clients and authorizers requested, indexed by service name.\n For example, if login() is told to auth with 'search'\n then the search client will be in the 'search' field.\n \"\"\"\n NATIVE_CLIENT_ID = \"98bfc684-977f-4670-8669-71f8337688e4\"\n DEFAULT_CRED_FILENAME = \"globus_login.json\"\n DEFAULT_CRED_PATH = os.path.expanduser(\"~/mdf/credentials\")\n\n def _get_tokens(client, scopes, app_name, force_refresh=False):\n token_path = os.path.join(DEFAULT_CRED_PATH, app_name + \"_tokens.json\")\n if force_refresh:\n if os.path.exists(token_path):\n os.remove(token_path)\n if os.path.exists(token_path):\n with open(token_path, \"r\") as tf:\n try:\n tokens = json.load(tf)\n # Check that requested scopes are present\n # :all scopes should override any scopes with lesser permissions\n # Some scopes are returned in multiples and should be separated\n existing_scopes = []\n for sc in [val[\"scope\"] for val in tokens.values()]:\n if \" \" in sc:\n existing_scopes += sc.split(\" \")\n else:\n existing_scopes.append(sc)\n permissive_scopes = [scope.replace(\":all\", \"\")\n for scope in existing_scopes\n if scope.endswith(\":all\")]\n missing_scopes = [scope for scope in scopes.split(\" \")\n if scope not in existing_scopes\n and not any([scope.startswith(per_sc)\n for per_sc in permissive_scopes])\n and not scope.strip() == \"\"]\n # If some scopes are missing, regenerate tokens\n # Get tokens for existing scopes and new scopes\n if len(missing_scopes) > 0:\n scopes = \" \".join(existing_scopes + missing_scopes)\n os.remove(token_path)\n except ValueError:\n # Tokens corrupted\n os.remove(token_path)\n if not os.path.exists(token_path):\n os.makedirs(DEFAULT_CRED_PATH, exist_ok=True)\n client.oauth2_start_flow(requested_scopes=scopes, refresh_tokens=True)\n authorize_url = client.oauth2_get_authorize_url()\n\n print_(\"It looks like this is the first time you're accessing this service.\",\n \"\\nPlease log in to Globus at this link:\\n\", authorize_url)\n auth_code = input(\"Copy and paste the authorization code here: \").strip()\n\n # Handle 401s\n try:\n token_response = client.oauth2_exchange_code_for_tokens(auth_code)\n except globus_sdk.GlobusAPIError as e:\n if e.http_status == 401:\n print_(\"\\nSorry, that code isn't valid.\"\n \" You can try again, or contact support.\")\n sys.exit(1)\n else:\n raise\n tokens = token_response.by_resource_server\n\n os.umask(0o077)\n with open(token_path, \"w\") as tf:\n json.dump(tokens, tf)\n print_(\"Thanks! You're now logged in.\")\n\n return tokens\n\n if type(credentials) is str:\n try:\n with open(credentials) as cred_file:\n creds = json.load(cred_file)\n except IOError:\n try:\n creds = json.loads(credentials)\n except ValueError:\n raise ValueError(\"Credential string unreadable\")\n elif type(credentials) is dict:\n creds = credentials\n else:\n try:\n with open(os.path.join(os.getcwd(), DEFAULT_CRED_FILENAME)) as cred_file:\n creds = json.load(cred_file)\n except IOError:\n try:\n with open(os.path.join(DEFAULT_CRED_PATH, DEFAULT_CRED_FILENAME)) as cred_file:\n creds = json.load(cred_file)\n except IOError:\n raise ValueError(\"Credentials/configuration must be passed as a \"\n + \"filename string, JSON string, or dictionary, or provided in '\"\n + DEFAULT_CRED_FILENAME\n + \"' or '\"\n + DEFAULT_CRED_PATH\n + \"'.\")\n\n native_client = globus_sdk.NativeAppAuthClient(creds.get(\"client_id\", NATIVE_CLIENT_ID),\n app_name=creds.get(\"app_name\", \"unknown\"))\n\n servs = []\n for serv in creds.get(\"services\", []):\n serv = serv.lower().strip()\n if type(serv) is str:\n servs += serv.split(\" \")\n else:\n servs += list(serv)\n # Translate services into scopes, pass bad/unknown services\n scopes = \" \".join([AUTH_SCOPES.get(sc, \"\") for sc in servs])\n\n all_tokens = _get_tokens(native_client, scopes, creds.get(\"app_name\", \"unknown\"),\n force_refresh=clear_old_tokens)\n\n clients = {}\n if \"transfer\" in servs:\n try:\n transfer_authorizer = globus_sdk.RefreshTokenAuthorizer(\n all_tokens[\"transfer.api.globus.org\"][\"refresh_token\"],\n native_client)\n clients[\"transfer\"] = globus_sdk.TransferClient(authorizer=transfer_authorizer)\n # Token not present\n except KeyError:\n print_(\"Error: Unable to retrieve Transfer tokens.\\n\"\n \"You may need to delete your old tokens and retry.\")\n clients[\"transfer\"] = None\n # Other issue\n except globus_sdk.GlobusAPIError as e:\n print_(\"Error: Unable to create Transfer client (\" + e.message + \").\")\n clients[\"transfer\"] = None\n # Remove processed service\n servs.remove(\"transfer\")\n\n if \"search_ingest\" in servs:\n try:\n ingest_authorizer = globus_sdk.RefreshTokenAuthorizer(\n all_tokens[\"search.api.globus.org\"][\"refresh_token\"],\n native_client)\n clients[\"search_ingest\"] = globus_sdk.SearchClient(authorizer=ingest_authorizer)\n # Token not present\n except KeyError:\n print_(\"Error: Unable to retrieve Search (ingest) tokens.\\n\"\n \"You may need to delete your old tokens and retry.\")\n clients[\"search_ingest\"] = None\n # Other issue\n except globus_sdk.GlobusAPIError as e:\n print_(\"Error: Unable to create Search (ingest) client (\" + e.message + \").\")\n clients[\"search_ingest\"] = None\n # Remove processed service\n servs.remove(\"search_ingest\")\n # And redundant service\n try:\n servs.remove(\"search\")\n # No issue if it isn't there\n except Exception:\n pass\n elif \"search\" in servs:\n try:\n search_authorizer = globus_sdk.RefreshTokenAuthorizer(\n all_tokens[\"search.api.globus.org\"][\"refresh_token\"],\n native_client)\n clients[\"search\"] = globus_sdk.SearchClient(authorizer=search_authorizer)\n # Token not present\n except KeyError:\n print_(\"Error: Unable to retrieve Search tokens.\\n\"\n \"You may need to delete your old tokens and retry.\")\n clients[\"search\"] = None\n # Other issue\n except globus_sdk.GlobusAPIError as e:\n print_(\"Error: Unable to create Search client (\" + e.message + \").\")\n clients[\"search\"] = None\n # Remove processed service\n servs.remove(\"search\")\n\n if \"mdf\" in servs:\n try:\n mdf_authorizer = globus_sdk.RefreshTokenAuthorizer(\n all_tokens[\"data.materialsdatafacility.org\"][\"refresh_token\"],\n native_client)\n clients[\"mdf\"] = mdf_authorizer\n # Token not present\n except KeyError:\n print_(\"Error: Unable to retrieve MDF tokens.\\n\"\n \"You may need to delete your old tokens and retry.\")\n clients[\"mdf\"] = None\n # Other issue\n except globus_sdk.GlobusAPIError as e:\n print_(\"Error: Unable to create MDF Authorizer (\" + e.message + \").\")\n clients[\"mdf\"] = None\n # Remove processed service\n servs.remove(\"mdf\")\n\n if \"publish\" in servs:\n try:\n publish_authorizer = globus_sdk.RefreshTokenAuthorizer(\n all_tokens[\"publish.api.globus.org\"][\"refresh_token\"],\n native_client)\n clients[\"publish\"] = DataPublicationClient(authorizer=publish_authorizer)\n # Token not present\n except KeyError:\n print_(\"Error: Unable to retrieve Publish tokens.\\n\"\n \"You may need to delete your old tokens and retry.\")\n clients[\"publish\"] = None\n # Other issue\n except globus_sdk.GlobusAPIError as e:\n print_(\"Error: Unable to create Publish client (\" + e.message + \").\")\n clients[\"publish\"] = None\n # Remove processed service\n servs.remove(\"publish\")\n\n # Warn of invalid services\n if servs:\n print_(\"\\n\".join([\"Unknown or invalid service: '\" + sv + \"'.\" for sv in servs]))\n\n return clients\n\n\ndef confidential_login(credentials=None):\n \"\"\"Login to Globus services as a confidential client (a client with its own login information).\n\n Arguments:\n credentials (str or dict): A string filename, string JSON, or dictionary\n with credential and config information.\n By default, uses the DEFAULT_CRED_FILENAME and DEFAULT_CRED_PATH.\n Contains:\n client_id (str): The ID of the client.\n client_secret (str): The client's secret for authentication.\n services (list of str): Services to authenticate with.\n Services are listed in AUTH_SCOPES.\n\n Returns:\n dict: The clients and authorizers requested, indexed by service name.\n For example, if confidential_login() is told to auth with 'search'\n then the search client will be in the 'search' field.\n \"\"\"\n DEFAULT_CRED_FILENAME = \"confidential_globus_login.json\"\n DEFAULT_CRED_PATH = os.path.expanduser(\"~/mdf/credentials\")\n # Read credentials\n if type(credentials) is str:\n try:\n with open(credentials) as cred_file:\n creds = json.load(cred_file)\n except IOError:\n try:\n creds = json.loads(credentials)\n except ValueError:\n raise ValueError(\"Credentials unreadable or missing\")\n elif type(credentials) is dict:\n creds = credentials\n else:\n try:\n with open(os.path.join(os.getcwd(), DEFAULT_CRED_FILENAME)) as cred_file:\n creds = json.load(cred_file)\n except IOError:\n try:\n with open(os.path.join(DEFAULT_CRED_PATH, DEFAULT_CRED_FILENAME)) as cred_file:\n creds = json.load(cred_file)\n except IOError:\n raise ValueError(\"Credentials/configuration must be passed as a \"\n + \"filename string, JSON string, or dictionary, or provided in '\"\n + DEFAULT_CRED_FILENAME\n + \"' or '\"\n + DEFAULT_CRED_PATH\n + \"'.\")\n\n conf_client = globus_sdk.ConfidentialAppAuthClient(creds[\"client_id\"], creds[\"client_secret\"])\n servs = []\n for serv in creds[\"services\"]:\n serv = serv.lower().strip()\n if type(serv) is str:\n servs += serv.split(\" \")\n else:\n servs += list(serv)\n\n clients = {}\n if \"transfer\" in servs:\n clients[\"transfer\"] = globus_sdk.TransferClient(\n authorizer=globus_sdk.ClientCredentialsAuthorizer(\n conf_client, scopes=AUTH_SCOPES[\"transfer\"]))\n # Remove processed service\n servs.remove(\"transfer\")\n\n if \"search_ingest\" in servs:\n clients[\"search_ingest\"] = globus_sdk.SearchClient(\n authorizer=globus_sdk.ClientCredentialsAuthorizer(\n conf_client, scopes=AUTH_SCOPES[\"search_ingest\"]))\n # Remove processed service\n servs.remove(\"search_ingest\")\n # And redundant service\n try:\n servs.remove(\"search\")\n # No issue if it isn't there\n except Exception:\n pass\n elif \"search\" in servs:\n clients[\"search\"] = globus_sdk.SearchClient(\n authorizer=globus_sdk.ClientCredentialsAuthorizer(\n conf_client, scopes=AUTH_SCOPES[\"search\"]))\n # Remove processed service\n servs.remove(\"search\")\n\n if \"mdf\" in servs:\n clients[\"mdf\"] = globus_sdk.ClientCredentialsAuthorizer(\n conf_client, scopes=AUTH_SCOPES[\"mdf\"])\n # Remove processed service\n servs.remove(\"mdf\")\n\n if \"publish\" in servs:\n clients[\"publish\"] = DataPublicationClient(\n authorizer=globus_sdk.ClientCredentialsAuthorizer(\n conf_client, scopes=AUTH_SCOPES[\"publish\"]))\n # Remove processed service\n servs.remove(\"publish\")\n\n # Warn of invalid services\n if servs:\n print_(\"\\n\".join([\"Unknown or invalid service: '\" + sv + \"'.\" for sv in servs]))\n\n return clients\n\n\ndef anonymous_login(services):\n \"\"\"Initialize services without authenticating to Globus Auth.\n\n Arguments:\n services (str or list of str): The services to initialize clients for.\n Note that not all services support unauthenticated clients.\n\n Returns:\n dict: The clients requested, indexed by service name.\n For example, if anonymous_login() is told to auth with 'search'\n then the search client will be in the 'search' field.\n \"\"\"\n if isinstance(services, str):\n services = [services]\n\n clients = {}\n # Initialize valid services\n if \"transfer\" in services:\n clients[\"transfer\"] = globus_sdk.TransferClient()\n services.remove(\"transfer\")\n\n if \"search\" in services:\n clients[\"search\"] = globus_sdk.SearchClient()\n services.remove(\"search\")\n\n if \"publish\" in services:\n clients[\"publish\"] = DataPublicationClient()\n services.remove(\"publish\")\n\n # Notify user of auth-only services\n if \"search_ingest\" in services:\n print_(\"Error: Service 'search_ingest' requires authentication.\")\n services.remove(\"search_ingest\")\n\n if \"mdf\" in services:\n print_(\"Error: Service 'mdf' requires authentication.\")\n services.remove(\"mdf\")\n\n # Warn of invalid services\n if services:\n print_(\"\\n\".join([\"Unknown or invalid service: '\" + sv + \"'.\" for sv in services]))\n\n return clients\n\n\n# *************************************************\n# * File utilities\n# *************************************************\n\ndef find_files(root, file_pattern=None, verbose=False):\n \"\"\"Find files recursively in a given directory.\n\n Arguments:\n root (str): The path to the starting (root) directory.\n file_pattern (str): A regular expression to match files against, or None to match all files.\n Default None.\n verbose: If True, will print_ status messages.\n If False, will remain silent unless there is an error.\n Default False.\n\n Yields:\n dict: The matching file's path information.\n Contains:\n path (str): The path to the directory containing the file.\n no_root_path (str): The path to the directory containing the file,\n with the path to the root directory removed.\n filename (str): The name of the file.\n \"\"\"\n if not os.path.exists(root):\n raise ValueError(\"Path '\" + root + \"' does not exist.\")\n # Add separator to end of root if not already supplied\n root += os.sep if root[-1:] != os.sep else \"\"\n for path, dirs, files in tqdm(os.walk(root), desc=\"Finding files\", disable=(not verbose)):\n for one_file in files:\n if not file_pattern or re.search(file_pattern, one_file):\n yield {\n \"path\": path,\n \"filename\": one_file,\n \"no_root_path\": path.replace(root, \"\")\n }\n\n\ndef uncompress_tree(root, verbose=False):\n \"\"\"Uncompress all tar, zip, and gzip archives under a given directory.\n Note that this process tends to be very slow.\n\n Arguments:\n root (str): The path to the starting (root) directory.\n verbose: If True, will print_ status messages.\n If False, will remain silent unless there is an error.\n Default False.\n \"\"\"\n for file_info in tqdm(find_files(root), desc=\"Uncompressing files\", disable=(not verbose)):\n dir_path = os.path.abspath(file_info[\"path\"])\n abs_path = os.path.join(dir_path, file_info[\"filename\"])\n if tarfile.is_tarfile(abs_path):\n tar = tarfile.open(abs_path)\n tar.extractall(dir_path)\n tar.close()\n elif zipfile.is_zipfile(abs_path):\n z = zipfile.ZipFile(abs_path)\n z.extractall(dir_path)\n z.close()\n else:\n try:\n with gzip.open(abs_path) as gz:\n file_data = gz.read()\n # Opens the absolute path, including filename, for writing\n # Does not include the extension (should be .gz or similar)\n with open(abs_path.rsplit('.', 1)[0], 'w') as newfile:\n newfile.write(str(file_data))\n # An IOErrorwill occur at gz.read() if the file is not a gzip\n except IOError:\n pass\n\n\n# *************************************************\n# * GMeta formatting utilities\n# *************************************************\n\ndef format_gmeta(data):\n \"\"\"Format input into GMeta format, suitable for ingesting into Globus Search.\n Format a dictionary into a GMetaEntry.\n Format a list of GMetaEntry into a GMetaList inside a GMetaIngest.\n\n Example usage:\n glist = []\n for document in all_my_documents:\n gmeta_entry = format_gmeta(document)\n glist.append(gmeta_entry)\n ingest_ready_document = format_gmeta(glist)\n\n Arguments:\n data (dict or list): The data to be formatted.\n If data is a dict, it must contain:\n data[\"mdf\"][\"landing_page\"] (str): A URI to a web page for the entry.\n data[\"mdf\"][\"acl\"] (list of str): A list of Globus UUIDs that are allowed to view the entry.\n If data is a list, it must consist of GMetaEntry documents.\n\n Returns:\n dict (if data is dict): The data as a GMetaEntry.\n dict (if data is list): The data as a GMetaIngest.\n \"\"\"\n if type(data) is dict:\n return {\n \"@datatype\": \"GMetaEntry\",\n \"@version\": \"2016-11-09\",\n \"subject\": data[\"mdf\"][\"landing_page\"],\n \"visible_to\": data[\"mdf\"].pop(\"acl\"),\n \"content\": data\n }\n\n elif type(data) is list:\n return {\n \"@datatype\": \"GIngest\",\n \"@version\": \"2016-11-09\",\n \"ingest_type\": \"GMetaList\",\n \"ingest_data\": {\n \"@datatype\": \"GMetaList\",\n \"@version\": \"2016-11-09\",\n \"gmeta\": data\n }\n }\n\n else:\n raise TypeError(\"Cannot format '\" + str(type(data)) + \"' into GMeta.\")\n\n\ndef gmeta_pop(gmeta, info=False):\n \"\"\"Remove GMeta wrapping from a Globus Search result.\n This function can be called on the raw GlobusHTTPResponse that Search returns,\n or a string or dictionary representation of it.\n\n Arguments:\n gmeta (dict, str, or GlobusHTTPResponse): The Globus Search result to unwrap.\n info (bool): If False, gmeta_pop will return a list of the results and discard the metadata.\n If True, gmeta_pop will return a tuple containing the results list,\n and other information about the query.\n Default False.\n\n Returns:\n list (if info=False): The unwrapped results.\n tuple (if info=True): The unwrapped results, and a dictionary of query information.\n \"\"\"\n if type(gmeta) is GlobusHTTPResponse:\n gmeta = json.loads(gmeta.text)\n elif type(gmeta) is str:\n gmeta = json.loads(gmeta)\n elif type(gmeta) is not dict:\n raise TypeError(\"gmeta must be dict, GlobusHTTPResponse, or JSON string\")\n results = []\n for res in gmeta[\"gmeta\"]:\n for con in res[\"content\"]:\n results.append(con)\n if info:\n fyi = {\n \"total_query_matches\": gmeta[\"total\"]\n }\n return results, fyi\n else:\n return results\n\n\n# *************************************************\n# * Globus utilities\n# *************************************************\n\ndef quick_transfer(transfer_client, source_ep, dest_ep, path_list, timeout=None):\n \"\"\"Perform a Globus Transfer and monitor for success.\n\n Arguments:\n transfer_client (TransferClient): An authenticated Transfer client.\n source_ep (str): The source Globus Endpoint ID.\n dest_ep (str): The destination Globus Endpoint ID.\n path_list (list of tuple of 2 str): A list of tuples containing the paths to transfer as\n (source, destination).\n Directory paths must end in a slash, and file paths must not.\n Example: [(\"/source/files/file.dat\", \"/dest/mydocs/doc.dat\"),\n (\"/source/all_reports/\", \"/dest/reports/\")]\n timeout (int): Time, in scores of seconds, to wait for a transfer to complete before erroring.\n Default None, which will wait until a transfer succeeds or fails.\n If this argument is -1, the transfer will submit but not wait at all.\n There is then no error checking.\n\n Returns:\n str: ID of the Globus Transfer.\n \"\"\"\n INTERVAL_SEC = 10\n tdata = globus_sdk.TransferData(transfer_client, source_ep, dest_ep, verify_checksum=True)\n for item in path_list:\n # Is not directory\n if item[0][-1] != \"/\" and item[1][-1] != \"/\":\n tdata.add_item(item[0], item[1])\n # Is directory\n elif item[0][-1] == \"/\" and item[1][-1] == \"/\":\n tdata.add_item(item[0], item[1], recursive=True)\n # Malformed\n else:\n raise globus_sdk.GlobusError(\"Cannot transfer file to directory or vice-versa: \"\n + str(item))\n\n res = transfer_client.submit_transfer(tdata)\n if res[\"code\"] != \"Accepted\":\n raise globus_sdk.GlobusError(\"Failed to transfer files: Transfer \" + res[\"code\"])\n\n iterations = 0\n while timeout is not None and timeout >= 0 and not transfer_client.task_wait(\n res[\"task_id\"],\n timeout=INTERVAL_SEC,\n polling_interval=INTERVAL_SEC):\n for event in transfer_client.task_event_list(res[\"task_id\"]):\n if event[\"is_error\"]:\n transfer_client.cancel_task(res[\"task_id\"])\n raise globus_sdk.GlobusError(\"Error transferring data: \" + event[\"description\"])\n if timeout and iterations >= timeout:\n transfer_client.cancel_task(res[\"task_id\"])\n raise globus_sdk.GlobusError(\"Transfer timed out after \"\n + str(iterations * INTERVAL_SEC)\n + \" seconds.\")\n iterations += 1\n\n return res[\"task_id\"]\n\n\ndef get_local_ep(transfer_client):\n \"\"\"Discover the local Globus Connect Personal endpoint's ID, if possible.\n\n Arguments:\n transfer_client (TransferClient): An authenticated Transfer client.\n\n Returns:\n str: The local GCP EP ID if it was discovered.\n If the ID is not discovered, an exception will be raised.\n (globus_sdk.GlobusError unless the user cancels the search)\n \"\"\"\n pgr_res = transfer_client.endpoint_search(filter_scope=\"my-endpoints\")\n ep_candidates = pgr_res.data\n # Check number of candidates\n if len(ep_candidates) < 1:\n # Nothing found\n raise globus_sdk.GlobusError(\"Error: No local endpoints found\")\n elif len(ep_candidates) == 1:\n # Exactly one candidate\n if not ep_candidates[0][\"gcp_connected\"]:\n # Is GCP, is not on\n raise globus_sdk.GlobusError(\"Error: Globus Connect is not running\")\n else:\n # Is GCServer or GCP and connected\n return ep_candidates[0][\"id\"]\n else:\n # >1 found\n # Filter out disconnected GCP\n ep_connections = [candidate for candidate in ep_candidates\n if candidate[\"gcp_connected\"] is not False]\n # Recheck list\n if len(ep_connections) < 1:\n # Nothing found\n raise globus_sdk.GlobusError(\"Error: No local endpoints running\")\n elif len(ep_connections) == 1:\n # Exactly one candidate\n if not ep_connections[0][\"gcp_connected\"]:\n # Is GCP, is not on\n raise globus_sdk.GlobusError(\"Error: Globus Connect is not active\")\n else:\n # Is GCServer or GCP and connected\n return ep_connections[0][\"id\"]\n else:\n # Still >1 found\n # Prompt user\n print_(\"Multiple endpoints found:\")\n count = 0\n for ep in ep_connections:\n count += 1\n print_(count, \": \", ep[\"display_name\"], \"\\t\", ep[\"id\"])\n print_(\"\\nPlease choose the endpoint on this machine\")\n ep_num = 0\n while ep_num == 0:\n usr_choice = input(\"Enter the number of the correct endpoint (-1 to cancel): \")\n try:\n ep_choice = int(usr_choice)\n if ep_choice == -1:\n # User wants to quit\n ep_num = -1\n elif ep_choice in range(1, count+1):\n # Valid selection\n ep_num = ep_choice\n else:\n # Invalid number\n print_(\"Invalid selection\")\n except Exception:\n print_(\"Invalid input\")\n\n if ep_num == -1:\n print_(\"Cancelling\")\n raise SystemExit\n return ep_connections[ep_num-1][\"id\"]\n\n\n# *************************************************\n# * Misc utilities\n# *************************************************\n\ndef dict_merge(base, addition):\n \"\"\"Merge one dictionary with another, recursively.\n Fields present in addition will be added to base.\n No data in base is deleted or overwritten.\n\n Arguments:\n base (dict): The dictionary being added to.\n addition (dict): The dictionary with additional data.\n\n Returns:\n dict: The merged base.\n \"\"\"\n if not isinstance(base, dict) or not isinstance(addition, dict):\n raise TypeError(\"dict_merge only works with dicts.\")\n\n for key, value in addition.items():\n # If the value is a dict, need to merge those\n if isinstance(value, dict):\n base[key] = dict_merge(base.get(key, {}), value)\n # Otherwise, if the key is not in base, add it\n elif key not in base.keys():\n base[key] = value\n\n return base\n\n\n# *************************************************\n# * Clients\n# *************************************************\n\nclass DataPublicationClient(BaseClient):\n \"\"\"Publish data with Globus Publish.\"\"\"\n\n def __init__(self, base_url=\"https://publish.globus.org/v1/api/\", **kwargs):\n app_name = kwargs.pop('app_name', 'DataPublication Client v0.1')\n BaseClient.__init__(self, \"datapublication\", base_url=base_url,\n app_name=app_name, **kwargs)\n self._headers['Content-Type'] = 'application/json'\n\n def list_schemas(self, **params):\n return self.get('schemas', params=params)\n\n def get_schema(self, schema_id, **params):\n return self.get('schemas/{}'.format(schema_id), params=params)\n\n def list_collections(self, **params):\n try:\n return self.get('collections', params=params)\n except Exception as e:\n print_('FAIL: {}'.format(e))\n\n def list_datasets(self, collection_id, **params):\n return self.get('collections/{}/datasets'.format(collection_id),\n params=params)\n\n def push_metadata(self, collection, metadata, **params):\n return self.post('collections/{}'.format(collection),\n json_body=metadata, params=params)\n\n def get_dataset(self, dataset_id, **params):\n return self.get('datasets/{}'.format(dataset_id),\n params=params)\n\n def get_submission(self, submission_id, **params):\n return self.get('submissions/{}'.format(submission_id),\n params=params)\n\n def delete_submission(self, submission_id, **params):\n return self.delete('submissions/{}'.format(submission_id),\n params=params)\n\n def complete_submission(self, submission_id, **params):\n return self.post('submissions/{}/submit'.format(submission_id),\n params=params)\n\n def list_submissions(self, **params):\n return self.get('submissions', params=params)\n","sub_path":"mdf_toolbox/toolbox.py","file_name":"toolbox.py","file_ext":"py","file_size_in_byte":32346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"308645702","text":"# Raul Cerda\n# raul.cerda@csu.fullerton.edu\n# Project 3: Space Invaders\n\nimport pygame\nfrom pygame.sprite import Group\nfrom settings import Settings\nfrom game_stats import GameStats\nfrom scoreboard import Scoreboard\nfrom button import Button\nfrom ship import Ship\nimport game_functions as gf\n\n\n# Main game loop with initializations\ndef run_game():\n pygame.init()\n ai_settings = Settings()\n screen = pygame.display.set_mode((ai_settings.screen_width, ai_settings.screen_height))\n pygame.display.set_caption(\"Space Invaders\")\n play_button = Button(screen, \"PLAY GAME\", 1)\n score_button = Button(screen, \"HIGH SCORES\", 2)\n stats = GameStats(ai_settings)\n sb = Scoreboard(ai_settings, screen, stats)\n\n ship = Ship(ai_settings, screen)\n bullets = Group()\n aliens = Group()\n ai_bullets = Group()\n bunkers = Group()\n\n gf.create_fleet(ai_settings, screen, ship, aliens)\n\n while True:\n gf.check_events(ai_settings, screen, stats, sb, play_button, score_button, ship, aliens, bullets, ai_bullets,\n bunkers)\n if stats.game_active:\n ship.update()\n gf.update_bullets(ai_settings, screen, stats, ship, sb, aliens, bullets, ai_bullets, bunkers)\n gf.update_aliens(ai_settings, stats, screen, sb, ship, aliens, bullets, ai_bullets)\n gf.update_screen(ai_settings, screen, stats, sb, ship, aliens, bullets, play_button, score_button, ai_bullets,\n bunkers)\n\n\nrun_game()\n","sub_path":"alien_invasion.py","file_name":"alien_invasion.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"28585138","text":"import speech_recognition as sr\nimport webbrowser as wb\nfrom googletrans import Translator\nfrom gtts import gTTS\nimport os\n\n\nr1=sr.Recognizer()\nr2=sr.Recognizer()\nr3=sr.Recognizer()\n\nwith sr.Microphone() as source:\n print('Speak now')\n audio = r3.listen(source)\nif 'hi' in r2.recognize_google(audio):\n r2=sr.Recognizer()\n url='https://www.youtube.com/results?search_query='\n #while(1):\n with sr.Microphone() as source:\n #print('search your query: ')\n audio = r2.listen(source)\n\n try:\n get = r2.recognize_google(audio)\n if 'exit' in get:\n exit(0)\n print(get)\n # wb.get().open_new(url+get)\n\n except sr.UnknownValueError:\n print('error')\n except sr.RequestError as e:\n print('failed'.format(e))\n\nmytext = get\n\ntranslator = Translator()\nresult = translator.translate(mytext,dest='hi')\nprint(result.text)\n\nlanguage = 'hi'\nmyobj = gTTS(text=result.text, lang=language, slow=False)\nmyobj.save(\"welcome.mp3\")\nos.system(\"welcome.mp3\")\n","sub_path":"Full.py","file_name":"Full.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"153892484","text":"import machine\nfrom ssd1306 import SSD1306_I2C\n\nsda=machine.Pin(16)\nscl=machine.Pin(17)\ni2c=machine.I2C(0,sda=sda, scl=scl, freq=400000)\noled = SSD1306_I2C(128, 64, i2c)\noled.fill(0)\noled.text(\"Hello World!\", 0, 0)\noled.show()\nprint('Done')","sub_path":"src/oled/i2c/01-hello-world.py","file_name":"01-hello-world.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"86540008","text":"import json \nimport PlayerInfo as info\nimport pandas as p\nimport requests as rq\nimport urllib.request\nimport shutil\nimport os\n#from java.util import HashMap\n\nmap = {}\npic_dir = os.environ['HOME'] + \"/NBAStats/Pics/\"\np.options.display.max_columns = 50\nclass Nba():\n def __init__(self):\n with open('players.json') as f:\n self.data = json.load(f)\n\n def get_data(self):\n return self.data\n\n def parse_data(self, data):\n for i in range(len(data)):\n first_name = data[i]['firstName']\n last_name = data[i]['lastName']\n player_id = data[i]['playerId']\n name = first_name + \" \" + last_name\n map[name] = player_id; \n \n def find_id(self, player_name):\n for key, value in map.items():\n if key == player_name: \n return value\n\n def get_player_bio(self, info, id):\n url = \"http://stats.nba.com/stats/commonplayerinfo?\"\n api_params = {'PlayerID' : id,'LeagueID' : '00'}\n x = rq.get(url, params=api_params).json()\n headers = x['resultSets'][0]['headers']\n results = x['resultSets'][0]['rowSet']\n info.set_birthday(results[0][headers.index('BIRTHDATE')])\n info.set_school(results[0][headers.index('SCHOOL')])\n info.set_height(results[0][headers.index('HEIGHT')])\n info.set_weight(results[0][headers.index('WEIGHT')])\n info.set_years_pro(results[0][headers.index('SEASON_EXP')])\n info.set_team(results[0][headers.index('TEAM_NAME')])\n \n def get_player_stats(self, info, id):\n url = \"http://stats.nba.com/stats/playercareerstats?\"\n api_params = {'PlayerID' : id,'LeagueID' : '00', 'PerMode' : 'PerGame'}\n x = rq.get(url, params=api_params).json()\n \n headers = x['resultSets'][1]['headers']\n results = x['resultSets'][1]['rowSet']\n info.set_ppg(results[0][headers.index('PTS')])\n info.set_ast(results[0][headers.index('AST')])\n info.set_reb(results[0][headers.index('REB')])\n info.set_stl(results[0][headers.index('STL')])\n \n def get_player_pic(self, id):\n url = \"http://stats.nbsa.com/media/players/230x185/\"+str(id)+\".png\"\n img = str(id) + \".png\"\n return urllib.request.urlretrieve(url, img)[0]\n\ndef check_pic(pic):\n if not os.path.exists(pic_dir + pic):\n shutil.move(pic, pic_dir)\n else:\n os.remove(pic)\n\ndef gen_html(df):\n f = open('report.html', 'w')\n\n head = \"\"\"\n Report\n \"\"\"\n\n body = \"\" + df.to_html() + \"\"\n \n message = head + body + \"\\n\"\n\n f.write(message)\n f.close()\n\nif __name__ == \"__main__\":\n nba = Nba()\n data = nba.get_data()\n nba.parse_data(data)\n\n player_name = input(\"Enter Player Name: \")\n info = info.PlayerInfo(player_name)\n id = nba.find_id(player_name)\n \n df = nba.get_player_bio(info, id)\n nba.get_player_stats(info, id)\n info.print_data()\n\n pic = nba.get_player_pic(id)\n check_pic(pic)\n gen_html(df)\n","sub_path":"Nba.py","file_name":"Nba.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"435525364","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# __author__ = \"Q1mi\"\n# Date: 2017/11/24\n\n\"\"\"\nPython全栈课前练习题\n\"\"\"\n\ns = \"Alex SB 哈哈\\r\\nx:1\\r\\ny:2\\r\\nz:3\\r\\n\\r\\n自行车\"\n\n\n# 问题1:如何取到[\"Alex SB 哈哈\\r\\nx:1\\r\\ny:2\\r\\nz:3\", \"自行车\"]?\nret1 = s.split('\\r\\n\\r\\n')\nprint(ret1)\n\n\n# 问题2:如何在上面结果基础上拿到[\"Alex\", \"SB\", \"哈哈\"]?\nret2 = ret1[0].split('\\r\\n')\nprint(ret2)\nret3 = ret2[0].split(' ')\nprint(ret3)\n\n# 问题3:如何在上面结果基础上拿到\"SB\"?\nret4 = ret3[1]\nprint(ret4)\n\n\n# ------------------------------------------------------------------------------------------\n\n\n# 有一个列表,他的内部是一些元祖,元祖的第一个元素是姓名,第二个元素是爱好。\n# 现在我给你一个姓名,如\"Egon\",如果有这个姓名,就打印出他的爱好,没有就打印查无此人。\n\nlist1 = [\n (\"Alex\", \"烫头\"),\n (\"Egon\", \"街舞\"),\n (\"Yuan\", \"喝茶\")\n]\n\nfor i in list1:\n if i[0] == 'Egon':\n print(\"%s's hobby is %s!\" % (i[0], i[1]))\n break\nelse:\n print('not found!')\n# ------------------------------------------------------------------------------------------\n\n# 我有一个HTML文件\"login.html\"\n\n# 问题1:我如何读取它的内容保存到变量html_s?\nf = open('login.html', mode='r', encoding='utf8')\nhtml_s = f.read()\nprint(html_s)\nf.close()\n\n\n# 问题2:我如何读取它的二进制内容保存到变量html_b?\nf = open('login.html', mode='rb')\nhtml_b = f.read()\nprint(html_b)\nf.close()\n\n\n# ------------------------------------------------------------------------------------------\n\ns2 = \"Alex 花了一百万买了辆电动车,真@@xx@@。\"\n\n# 问题1:如何把上面的s2转变成\"Alex 花了一百万买了辆电动车,真SB。\"\nret5 = s2.replace('@@xx@@', 'SB')\nprint(ret5)\n\n","sub_path":"Python全栈第14期/4-Django/day58/01_课前练习题.py","file_name":"01_课前练习题.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"408174458","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/lxsameer/src/yellowencom/dauth/backends.py\n# Compiled at: 2012-06-29 07:21:55\nimport pickle, json, hashlib\nfrom urllib import urlencode\nimport urllib2\nfrom django.conf import settings\nfrom django.contrib.auth.models import User, check_password\n\nclass DaarmaanBackend(object):\n \"\"\"\n Authenticate against the Daarmaan gauth.\n \"\"\"\n service = settings.SERVICE_NAME\n key = settings.SERVICE_KEY\n daarmaan = settings.DAARMAAN_SERVER\n\n def authenticate(self, **kwargs):\n \"\"\"\n Try to authenticate to daarmaan SSO using provided informations.\n kwargs dictionary should contains below keys:\n\n token: Actual ticket from daarmaan\n request: current request object\n hash_: the SHA1 checksum provided by daarmaan.\n \"\"\"\n token = kwargs.get('token', None)\n request = kwargs.get('request', None)\n hash_ = kwargs.get('hash_', None)\n if not token or not hash_ or not request:\n raise ValueError(\"You should provide 'request', 'token' and 'hash_' parameters\")\n if self.is_valid(token, hash_):\n data = self.validate(token)\n user, created = User.objects.get_or_create(username=data['username'])\n user.first_name = data['first_name']\n user.last_name = data['last_name']\n user.email = data['email']\n if created:\n user.pk = data['id']\n user.save()\n return user\n return\n return\n\n def get_user(self, user_id):\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return\n\n return\n\n def is_valid(self, token, hash_):\n \"\"\"\n Check for token and hash integrity.\n \"\"\"\n key = settings.SERVICE_KEY\n m = hashlib.sha1()\n m.update(token + key)\n if hash_ == m.hexdigest():\n return True\n return False\n\n def validate(self, token):\n \"\"\"\n Try to validate the given token for a valid user.\n \"\"\"\n url = '%s/verification/' % self.daarmaan.lstrip('/')\n m = hashlib.sha1()\n m.update(token + self.key)\n hash_ = m.hexdigest()\n params = {'token': token, 'hash': hash_, \n 'service': self.service}\n url = '%s?%s' % (url, urlencode(params))\n response = urllib2.urlopen(url)\n if response.code == 200:\n json_data = json.loads(response.read())\n if json_data['hash']:\n hash_ = json_data['hash']\n data = json_data['data']\n m = hashlib.sha1()\n m.update(data['username'] + self.key)\n if m.hexdigest() == hash_:\n return data\n return False\n else:\n return False","sub_path":"pycfiles/Daarmaan-0.2.2.tar/backends.py","file_name":"backends.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"628743652","text":"from typing import List\nimport collections\n\ntests = {\n 1: [\"eat\",\"tea\",\"tan\",\"ate\",\"nat\",\"bat\"],\n 2: [\"\"],\n 3: [\"a\"],\n 4: [\"ab\",\"cd\",\"ef\"],\n 5: [\"abc\", \"bca\", \"cba\"]\n}\n\nres = {\n # 현재 이 코드에서는 결과에 순서가 뒤섞이는 것을 실패로 확인합니다.\n # 정확한 테스트는 leetcode / GeeksForGeeks에서 진행해보는 것이 좋습니다\n 1: [['eat', 'tea', 'ate'], ['tan', 'nat'], ['bat']],\n 2: [[\"\"]],\n 3: [[\"a\"]],\n 4: [['ab'], ['cd'], ['ef']],\n 5: [['abc', 'bca', 'cba']]\n}\n\ndef is_same_2d_list(alist: List, blist: List):\n a_set = set(map(tuple, alist))\n b_set = set(map(tuple, blist))\n\n return sorted(a_set) == sorted(b_set)\n\ndef check_result(index: int, output: List):\n if index > len(tests):\n raise RuntimeError(f'Failed to get {index}th case')\n\n return is_same_2d_list(output, res.get(index, []))\n\ndef groupAnagrams(strs: List[str]) -> List[List[str]]:\n hashmap = collections.defaultdict(list)\n\n for s in strs:\n count = [0] * 26\n\n for ch in s:\n count[ord(ch) - ord('a')] += 1\n hashmap[tuple(count)].append(s)\n\n return hashmap.values()\n\ndef main():\n for index, input_list in tests.items():\n output= groupAnagrams(input_list)\n\n if check_result(index, output):\n print(f'Test case {index} is correct: value {output}')\n else:\n print(f'Test case {index} is failed: value {output}')\n\nif __name__ == '__main__':\n main()","sub_path":"String/2_4_anagram/anagram_count.py","file_name":"anagram_count.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"100647308","text":"from tornado.options import define as tor_define, options as tor_options\nfrom tornado.web import Application as TorApplication\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\n\nfrom cloud_require.config import tornado_config\n\n\ndef application():\n tornado_app = TorApplication(handlers=tornado_config.routers(), **tornado_config.settings())\n return tornado_app\n\n\ndef tor_log(init_log=True):\n if init_log:\n tor_options.logging = \"WARNING\"\n tor_options.log_file_prefix = \"./log/tor_status_{0}.log\".format(tor_options.port)\n tor_options.log_rotate_mode = 'time'\n tor_options.log_rotate_when = 'D'\n tor_options.log_rotate_interval = 1\n\n\nif __name__ == \"__main__\":\n tor_define(\"port\", default=tornado_config.port, type=int, help=\"Run on the given port\")\n tor_log(tornado_config.open_log)\n tor_options.parse_command_line()\n http_server = HTTPServer(application(),\n max_buffer_size=tornado_config.max_buffer_size,\n max_body_size=tornado_config.max_body_size,\n xheaders=tornado_config.remote_proxy)\n http_server.bind(tor_options.port, tornado_config.address)\n http_server.start(tornado_config.process_num)\n IOLoop.current().start()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"56695590","text":"#coding: utf-8\n\nimport sqlite3\n\ndef connect():\n return sqlite3.connect(\"zhihu.db\")\n\ndef insert_table(table, args):\n conn = connect()\n cursor = conn.cursor()\n keys = args.keys()\n key_str = ','.join(['`{}`'.format(key) for key in keys])\n value_str = ','.join(['?' for key in keys])\n values = [str(e) for e in list(args.values())]\n sql_tpl = 'INSERT INTO `{}` ({}) VALUES ({})'\n sql = sql_tpl.format(table, key_str, value_str)\n # print(sql_tpl.format(table, key_str, ','.join([\"'{}'\".format(e) for e in list(args.values())])))\n cursor.execute(sql, tuple(values))\n conn.commit()\n return cursor.lastrowid\n\ndef update_table(table, args, where):\n conn = connect()\n cursor = conn.cursor()\n keys = args.keys()\n key_str = ','.join(['`{}`=?'.format(key) for key in keys])\n values = [str(e) for e in list(args.values())]\n where_str = ','.join(['`{}`=?'.format(key) for key in where.keys()])\n where_values = [str(e) for e in list(where.values())]\n values.append(*where_values)\n sql_tpl = 'UPDATE `{0}` SET {1} WHERE {2}'\n sql = sql_tpl.format(table, key_str, where_str)\n key_repr = ','.join(['`{}`=\"{}\"'.format(key, str(value)) for key, value in args.items()])\n where_repr = ','.join(['`{}`=\"{}\"'.format(key, value) for key, value in where.items()])\n # print(sql_tpl.format(table, key_repr, where_repr))\n cursor.execute(sql, tuple(values))\n return conn.commit()\n","sub_path":"fetch/python/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"641535809","text":"from authentication.serializers import UserSerializer\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nimport os\nfrom .models import File\nfrom django.contrib.auth.models import User\n\nPARENT_DIR = \"./files\"\n\ndef CurrentUser(request):\n if not request.user.is_authenticated:\n return None\n return request.user\n\n# Make a post request with 'data' containing file data and 'filename' containing filename\n@api_view(['POST'])\n@permission_classes((IsAuthenticated, ))\ndef UploadFile(request):\n if request.method == 'POST':\n usr = CurrentUser(request)\n if usr is None:\n return Response({\"success\": False, \"message\": \"User not logged in.\"}, status = status.HTTP_403_FORBIDDEN)\n \n filetext = request.data.get('data', '')\n filename = request.data.get('filename', '')\n\n if filename == '':\n return Response({\"success\":False, \"message\": \"Invalid filename\"}, status=status.HTTP_400_BAD_REQUEST)\n \n filepath = PARENT_DIR + \"/\" + usr.username + \"/\" + filename\n\n if os.path.exists(filepath):\n return Response({\"success\":False, \"message\": \"File already exists.\"}, status=status.HTTP_400_BAD_REQUEST)\n \n f = open(filepath, \"w+\")\n f.write(filetext)\n f.close()\n File.objects.create(owner = usr, filename = filename, relative_location = '', is_file = True)\n return Response({\"success\":True, \"message\": \"Uploaded Successfully\"}, status=status.HTTP_200_OK)\n \n return Response({\"success\":False, \"message\": \"Make a POST request.\"}, status=status.HTTP_400_BAD_REQUEST)","sub_path":"api/fileHandling/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"327304191","text":"import scrapy\r\n\r\nclass TerraSpider(scrapy.Spider):\r\n\r\n name = 'Uol'\r\n start_urls = { \r\n 'https://www.uol.com.br/'\r\n }\r\n\r\n def parse(self,response):\r\n ###Valor cotação do dólar\r\n dolars = response.xpath ('//*[@id=\"HU_header\"]/div[2]/div/div[2]/div[2]/ul/li[1]/a/span[2]/text()')\r\n print(\"Cotação Dolar:{}\".format(len(dolars)))\r\n\r\n for dolar in dolars:\r\n conteudo = dolar.extract().strip()\r\n if conteudo !=\"\":\r\n yield {\r\n 'Cotação do dolar': conteudo\r\n }","sub_path":"Q1.py","file_name":"Q1.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"332585072","text":"import pandas as pd \nimport numpy as np\n\nfrom os import listdir\nfrom os.path import isfile, join\n\np_stock_id = '股票交易代码'\np_stock_title = '股票名称'\np_cash_divid_pt = '税前派现金额(人民币)(元)(1:X)'\np_date = '除权日'\np_cash_divid_at = '税后派现金额(人民币)(元)(1:X)'\np_stock_divid_rat = '送股比例(1:X)'\np_increa_trans_rat = '转增比例(1:X)'\np_reser_rat = '送转比例(1:X)'\np_allot_prc = '配股价格(元)'\np_allot_rat = '实际配股比例'\n\nSTOCK_LIST = ['000002', '000099', '000004', '000005', '000006', '399300'] \n\ndef read_tech(path = '/home/lcc/dataset/kline_5minute/sz/'):\n files = [f for f in listdir(path) if isfile(join(path, f))]\n df_tech = None\n for f in files:\n date = f.split('.')[0]\n df = pd.read_csv(path+f, dtype={'stock_id': str})\n df = df[df['stock_id'].isin(STOCK_LIST)]\n df['date'] = date\n if df_tech is None:\n df_tech = df\n else:\n df_tech = pd.concat([df_tech, df])\n df_tech['date'] = pd.to_datetime(df_tech.date)\n return df_tech\n\ndef read_panel(fname='/home/lcc/dataset/stock_info'):\n df_panel = pd.read_csv(fname)\n df_panel = df_panel[(df_panel[p_stock_id].isin(STOCK_LIST))]\n df_panel[p_date] = pd.to_datetime(df_panel[p_date])\n return df_panel\n\n","sub_path":"read_stock.py","file_name":"read_stock.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"637850003","text":"\"\"\"\r\nSegment-Tree\r\n add_query(p,q,x):[p,q]にxを足す\r\n sum_query(i):[p,q]の合計\r\n\"\"\"\r\n\r\nclass SegmentTree():\r\n def __init__(self,n):\r\n self.offset=self.setOffset(n)\r\n self.segTree=[0]*(2*self.offset+1)\r\n self.lazyTree=[0]*(2*self.offset+1)\r\n\r\n def setOffset(self,n):\r\n if n==1:\r\n return 1\r\n else:\r\n return 2**(n-1).bit_length()-1\r\n\r\n def lazyIndex(self,p,q):\r\n if p==0 and q==self.offset:\r\n return\r\n L=p+self.offset\r\n R=q+self.offset\r\n l=(L-1)//2\r\n r=(R-1)//2\r\n Lflag=False\r\n Rflag=False\r\n while l0:\r\n yield l\r\n l=(l-1)//2\r\n yield 0\r\n\r\n def lazyUpdate(self,*index):\r\n for i in reversed(index):\r\n v=self.lazyTree[i]\r\n if v!=0:\r\n self.lazyTree[2*i+1]+=v//2\r\n self.segTree[2*i+1]+=v//2\r\n self.lazyTree[2*i+2]+=v//2\r\n self.segTree[2*i+2]+=v//2\r\n self.lazyTree[i]=0\r\n\r\n def add_query(self,p,q,x):\r\n *index,=self.lazyIndex(p,q)\r\n self.lazyUpdate(*index)\r\n p+=self.offset\r\n q+=self.offset\r\n n=1\r\n\r\n while p<=q:\r\n if not p&1:\r\n self.lazyTree[p]+=x*n\r\n self.segTree[p]+=x*n\r\n if q&1:\r\n self.lazyTree[q]+=x*n\r\n self.segTree[q]+=x*n\r\n p=p//2\r\n q=(q-2)//2\r\n n*=2\r\n for i in index:\r\n self.segTree[i]=self.segTree[2*i+1]+self.segTree[2*i+2]\r\n\r\n def sum_query(self,p,q):\r\n self.lazyUpdate(*self.lazyIndex(p,q))\r\n p+=self.offset\r\n q+=self.offset\r\n segsum=0\r\n\r\n while q>=p:\r\n if not p&1:\r\n segsum+=self.segTree[p]\r\n if q&1:\r\n segsum+=self.segTree[q]\r\n p=p//2\r\n q=(q-2)//2\r\n return segsum\r\n\r\n def show(self):\r\n print(\"####\")\r\n for a in self.segTree:\r\n if a==0:\r\n print(\" \",end=\" \")\r\n else:\r\n print(a,end=\" \")\r\n print(\"\\n####\")\r\n for a in self.lazyTree:\r\n if a==0:\r\n print(\" \",end=\" \")\r\n else:\r\n print(a,end=\" \")\r\n print(\"\\n####\")\r\n print()\r\n\r\nn,q=map(int,input().split())\r\nST=SegmentTree(n)\r\nans=[]\r\nfor i in range(q):\r\n qry=list(map(int,input().split()))\r\n if qry[0]==0:\r\n ST.add_query(qry[1]-1,qry[2]-1,qry[3])\r\n else:\r\n ans.append(ST.sum_query(qry[1]-1,qry[2]-1))\r\nprint(*ans,sep='\\n')\r\n","sub_path":"Tree/LazySegmentTree_RSQ_RAQ.py","file_name":"LazySegmentTree_RSQ_RAQ.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"233595241","text":"'''\nGiven an array of integers and a sum B, find all unique combinations in the array where the sum is equal to B. \nThe same number may be chosen from the array any number of times to make B.\n\nNote:\n 1. All numbers will be positive integers.\n 2. Elements in a combination (a1, a2, …, ak) must be in non-descending order. (ie, a1 ≤ a2 ≤ … ≤ ak).\n 3. The combinations themselves must be sorted in ascending order.\n\nExample 1:\nInput:\nN = 4\narr[] = {7,2,6,5}\nB = 16\nOutput:\n(2 2 2 2 2 2 2 2)\n(2 2 2 2 2 6)\n(2 2 2 5 5)\n(2 2 5 7)\n(2 2 6 6)\n(2 7 7)\n(5 5 6)\n\nExample 2:\nInput:\nN = 11\narr[] = {6,5,7,1,8,2,9,9,7,7,9}\nB = 6\nOutput: (1 1 1 1 1 1)\n(1 1 1 1 2)\n(1 1 2 2)\n(1 5)\n(2 2 2)\n(6)\n\nhttps://www.youtube.com/watch?v=irFtGMLbf-s\n\n'''\n\ndef sumUtil(a,n,s,result,temp,index):\n # if we have exceeded sum,or no more element is left\n if(index>=n or s<0):\n return\n \n # if we reached the given sum\n if(s==0): \n result.append(temp[:])\n return\n \n # append current element in our temp result\n temp.append(a[index])\n \n # recurse for lesser sum, with curr index\n sumUtil(a,n,s-temp[-1],result,temp,index)\n \n # pop last element added to temp for backtrack\n temp.pop()\n\n sumUtil(a,n,s,result,temp,index+1) # recurse for next position\n\n\n\n\ndef combinationalSum(a,s):\n '''\n :param a: given array a\n :param n: size of a\n :param s: given sum to be achieved\n :return: list containing list of numbers in ascending order, giving the combinational sum\n '''\n # put it in set\n a=set(a)\n \n # removed duplicates from array a\n a=list(a)\n \n # sort to maintain order\n a.sort() \n result = []\n \n # initial index is 0\n sumUtil(a,len(a),s,result,[],0) \n \n # return result\n return result ","sub_path":"geeksforgeeks/backtracking/4_Combination_Sum.py","file_name":"4_Combination_Sum.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"410211315","text":"import numpy as np\nfrom astropy.table import Table, unique\nfrom astropy import units as u\nfrom astropy.io import fits\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nimport pandas as pd\nfrom schwimmbad import SerialPool, MultiPool, MPIPool\nimport h5py\nfrom plot_tools import error_ellipse\nimport time\n\nmin_columns = ['source_id', 'ra', 'dec', 'parallax', 'pmra', 'pmdec', 'parallax_error',\n 'pmra_error', 'pmdec_error', 'parallax_pmra_corr', 'parallax_pmdec_corr',\n 'pmra_pmdec_corr']\n\ndef construct_table(gaia_table_file, kepler_table_file=None, minimal=True):\n hdul = fits.open(gaia_table_file)\n gaia_src_tbl = Table(hdul[1].data)\n if minimal:\n gaia_src_tbl = gaia_src_tbl[min_columns]\n return gaia_src_tbl.to_pandas()\n elif kepler_table_file is None:\n print('must supply kepler_table_file kwarg for non-minimal return')\n gaia_src_tbl = gaia_src_tbl.to_pandas() \n hdul = fits.open(kepler_table_file)\n kepler_tbl = Table(hdul[1].data)\n gaia_kepler_matches = kepler_tbl['kepid', 'kepler_gaia_ang_dist', 'source_id', 'nconfp', 'nkoi', 'planet?']\n gaia_kepler_matches = gaia_kepler_matches.to_pandas()\n gaia_kepler_matches.sort_values(['kepid', 'kepler_gaia_ang_dist'], inplace=True)\n gaia_kepler_matches.drop_duplicates('kepid', inplace=True)\n table = gaia_src_tbl.merge(gaia_kepler_matches, on='source_id', how='left')\n return table \n \ndef read_from_fits(filename):\n hdul = fits.open(filename)\n return hdul[0].data \n \ndef save_as_fits(filename, data):\n print(\"saving as {0}...\".format(filename))\n hdu = fits.PrimaryHDU(data)\n hdul = fits.HDUList([hdu])\n hdul.writeto(filename, overwrite=True)\n hdul.close()\n \ndef make_x(star):\n \"\"\"\n returns a vector of x = [parallax, pmra, pmdec]\n \"\"\"\n names = ['parallax', 'pmra', 'pmdec']\n return star.loc[names].values.astype('f')\n\ndef make_xerr(star):\n \"\"\"\n returns a vector of xerr = [parallax_error, pmra_error, pmdec_error]\n \"\"\"\n err_names = ['parallax_error', 'pmra_error', 'pmdec_error']\n return star.loc[err_names].values.astype('f') \n \ndef ppm_check(star1, star2, sigma=5.):\n \"\"\"\n Returns True if the differences between parallax, pmra, and pmdec are all below \n the sigma threshold.\n \"\"\"\n x1 = make_x(star1)\n x2 = make_x(star2)\n if np.any(np.isnan([x1,x2])):\n return False\n xerr1 = make_xerr(star1)\n xerr2 = make_xerr(star2)\n if np.any(np.isnan([xerr1, xerr2])):\n return False\n if np.any(np.abs(x1 - x2)/np.sqrt(xerr1**2 + xerr2**2) >= sigma):\n return False\n return True\n \ndef make_cov(star):\n \"\"\"\n returns covariance matrix C corresponding to x\n \"\"\"\n names = ['parallax', 'pmra', 'pmdec']\n C = np.diag(make_xerr(star)**2)\n for i, name1 in enumerate(names):\n for j, name2 in enumerate(names):\n if j >= i:\n continue\n corr = star.loc[\"{0}_{1}_corr\".format(name2, name1)]\n C[i, j] = corr * np.sqrt(C[i, i] * C[j, j])\n C[j, i] = C[i, j]\n return C\n\ndef chisq(star1, star2):\n \"\"\"\n calculates chisquared for two stars based on their parallax and 2D proper motions\n \"\"\"\n deltax = make_x(star1) - make_x(star2)\n cplusc = make_cov(star1) + make_cov(star2)\n return np.dot(deltax, np.linalg.solve(cplusc, deltax))\n \ndef calc_chisq_for_pair(m, primary):\n if ppm_check(primary, m):\n return chisq(primary, m)\n else:\n return -1\n \ndef calc_chisqs_for_row(i,row):\n row_of_chisqs = np.zeros_like(row) - 1\n primary = table.iloc[i]\n row_mask = (row > -1) & (row > i) # indices in row for matches to compute\n matches = table.iloc[row[row_mask]] # ignore non-matches and duplicates\n if np.sum(row_mask) > 0:\n row_of_chisqs[row_mask] = matches.apply(calc_chisq_for_pair, args=(primary,), axis=1)\n return row_of_chisqs\n \ndef calc_chisqs_for_table(table, pairs, save=False, save_every=1e6, save_name='chisqs.fits'):\n chisqs = np.zeros_like(pairs) - 1.\n for i,row in tqdm(enumerate(pairs)):\n chisqs[i] = calc_chisqs_for_row(i,row)\n if save and (i % save_every == 1):\n save_as_fits(save_name, chisqs) \n if save:\n save_as_fits(save_name, chisqs) \n return chisqs\n \ndef worker(data):\n \"\"\"\n Wrapper function for parallelization\n \"\"\"\n i, row = data\n row_of_chisqs = calc_chisqs_for_row(i, row)\n return row_of_chisqs\n \ndef callback(row_of_chisqs, data):\n \"\"\"\n Save chisquared row\n \"\"\"\n i = data[0]\n print(\"callback: row {0}\".format(i))\n with h5py.File('chisqs.hdf5', 'r+') as f:\n f['chisqs'][i] = row_of_chisqs\n \ndef read_match_attr(table, ind1, ind2, attr):\n return table.iloc[ind1][attr], table.iloc[ind2][attr]\n \ndef plot_xs(table, i, sigma=1):\n fs = 12\n star1 = table.iloc[pairs_ind1s[i]]\n star2 = table.iloc[pairs_ind2s[i]]\n x1 = make_x(star1)\n cov1 = make_cov(star1)\n x2 = make_x(star2)\n cov2 = make_cov(star2)\n fig = plt.figure(figsize=(12,4))\n ax1 = fig.add_subplot(131)\n error_ellipse(ax1, x1[0], x1[1], cov1[:2,:2], ec='red', sigma=sigma)\n error_ellipse(ax1, x2[0], x2[1], cov2[:2,:2], ec='blue', sigma=sigma)\n ax1.set_xlim([min([x1[0], x2[0]]) - 5., max([x1[0], x2[0]]) + 5.])\n ax1.set_ylim([min([x1[1], x2[1]]) - 5., max([x1[1], x2[1]]) + 5.])\n ax1.set_xlabel('Parallax (mas)', fontsize=fs)\n ax1.set_ylabel('PM RA (mas yr$^{-1}$)', fontsize=fs)\n\n ax2 = fig.add_subplot(133)\n error_ellipse(ax2, x1[1], x1[2], cov1[1:,1:], ec='red', sigma=sigma)\n error_ellipse(ax2, x2[1], x2[2], cov2[1:,1:], ec='blue', sigma=sigma)\n ax2.set_xlim([min([x1[1], x2[1]]) - 5., max([x1[1], x2[1]]) + 5.])\n ax2.set_ylim([min([x1[2], x2[2]]) - 5., max([x1[2], x2[2]]) + 5.])\n ax2.set_xlabel('PM RA (mas yr$^{-1}$)', fontsize=fs)\n ax2.set_ylabel('PM Dec (mas yr$^{-1}$)', fontsize=fs)\n \n ax3 = fig.add_subplot(132)\n c1 = np.delete(np.delete(cov1, 1, axis=0), 1, axis=1)\n c2 = np.delete(np.delete(cov2, 1, axis=0), 1, axis=1)\n error_ellipse(ax3, x1[0], x1[2], c1, ec='red', sigma=sigma)\n error_ellipse(ax3, x2[0], x2[2], c2, ec='blue', sigma=sigma)\n ax3.set_xlim([min([x1[0], x2[0]]) - 5., max([x1[0], x2[0]]) + 5.])\n ax3.set_ylim([min([x1[2], x2[2]]) - 5., max([x1[2], x2[2]]) + 5.])\n ax3.set_xlabel('Parallax (mas)', fontsize=fs)\n ax3.set_ylabel('PM Dec (mas yr$^{-1}$)', fontsize=fs)\n \n fig.subplots_adjust(wspace = 0.5)\n fig.text(0.5, 0.95, 'match #{0}'.format(i), horizontalalignment='center', \n transform=ax3.transAxes, fontsize=fs+2)\n\nif __name__ == '__main__':\n print(\"loading data...\")\n start = time.time()\n gaia_table_file = '../data/gaia-kepler-dustin.fits'\n kepler_table_file = '../data/kepler_dr2_1arcsec.fits'\n table = construct_table(gaia_table_file, minimal=True) # table is a global variable\n print(\"loading data table took {0} s\".format(time.time() - start))\n \n print(\"loading pair indices...\")\n pairs_start = time.time()\n pairs_file = '../data/matched-pairs-dustin.fits'\n pairs = read_from_fits(pairs_file) # pairs is a global variable\n print(\"loading pairs array took {0} s\".format(time.time() - pairs_start))\n \n print(\"calculating chisquared...\") \n with h5py.File('chisqs.hdf5', 'w') as f:\n dset = f.create_dataset('chisqs', data=np.zeros_like(pairs) - 1)\n \n #tasks = list(zip(range(len(table)), table.iterrows()))\n tasks = list(zip(range(10000), pairs[:10000,:]))\n \n pool = MultiPool()\n map_start = time.time()\n results = pool.map(worker, tasks, callback=callback)\n map_end = time.time()\n print(\"mapping took {0} s\".format(map_end - map_start))\n pool.close()\n \n with h5py.File('chisqs.hdf5', 'r+') as f:\n chisqs = np.copy(f['chisqs'])\n \n chisqs2 = calc_chisqs_for_table(table, pairs[:10000,:])\n \n\n \n \n if False: # basic diagnostics\n print(\"chisqareds calculated, checking on matches...\") \n plt.hist(chisqs[(chisqs > 0.) & (chisqs < 50.)], bins=500)\n plt.xlabel('$\\chi^2$', fontsize=16)\n plt.ylabel('# Pairs', fontsize=16)\n plt.yscale('log')\n plt.savefig('chisq_keplerpairs.png')\n \n matches_mask = (chisqs > 0) & (chisqs < 5)\n print(\"{0} matches found with chisq < 5\".format(np.sum(matches_mask)))\n len_inds, len_matches = np.shape(pairs)\n pairs_inds = np.array([np.arange(len_inds),]*len_matches).transpose()\n pairs_ind1s = pairs_inds[matches_mask]\n pairs_ind2s = pairs[matches_mask]\n \n i = np.random.randint(0, len(pairs_ind1s))\n print(\"match {0}: source_ids {1}\".format(i, \n read_match_attr(table, pairs_ind1s[i], pairs_ind2s[i], 'source_id')))\n print(\"saved chisquared = {0:.5f}\".format(chisqs[pairs_ind1s[i]][np.where(pairs[pairs_ind1s[i]] \n == pairs_ind2s[i])[0][0]]))\n plot_xs(i, sigma=3)\n \n if False: # check for Kepler pairs\n table = construct_table(gaia_table_file, kepler_table_file=kepler_table_file, minimal=False)\n ind1_is_kic = np.isfinite(table.iloc[pairs_ind1s]['kepid'])\n ind2_is_kic = np.isfinite(table.iloc[pairs_ind2s]['kepid'])\n one_is_kic = np.any(np.vstack([ind1_is_kic, ind2_is_kic]), axis=0)\n both_are_kic = np.all(np.vstack([ind1_is_kic, ind2_is_kic]), axis=0)\n \n print(\"{0} pairs have one KIC member. {1} pairs have both members as KIC targets.\".format(one_is_kic, \n both_are_kic))\n \n\n print(\"total execution took {0} s\".format(time.time() - start))","sub_path":"code/find_pairs.py","file_name":"find_pairs.py","file_ext":"py","file_size_in_byte":9911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"268078867","text":"from django.shortcuts import render, redirect\n\nfrom .models import Keyword, Result\nfrom .forms import CrawlForm\nfrom .util import get_page, save_results\n\n\ndef index(request):\n return render(request, 'keywords/index.html')\n\ndef keyword_detail(request, keyword_id):\n keyword = Keyword.objects.filter(pk=keyword_id).first()\n other_keywords = Keyword.objects.exclude(pk=keyword_id)\n context = {'keyword': keyword, 'other_keywords': other_keywords, }\n return render(request, 'keywords/keyword-detail.html', context)\n\ndef crawl(request):\n if request.method == 'POST':\n form = CrawlForm(request.POST)\n \n if form.is_valid():\n cd = form.cleaned_data\n \n page, error = get_page(cd[\"url\"])\n if error:\n return render(request, 'keywords/crawl.html', dict(\n form=form, error=error\n ))\n else:\n result = save_results(request, cd, page)\n \n return redirect('results')\n else:\n form = CrawlForm()\n return render(request, 'keywords/crawl.html', {'form': form})\n\ndef results(request):\n result_id = request.session.get(\"result_id\")\n \n try:\n result = Result.objects.get(pk=result_id)\n except:\n context=dict(error=\"No result found.\")\n return render(request, 'keywords/results.html', context)\n \n keyword = result.keyword\n allresults = Result.objects.filter(\n keyword=keyword,\n word_count__gt=0\n )\n context = {\"result\": result, \"keyword\": keyword, \"allresults\": allresults }\n return render(request, 'keywords/results.html', context)\n","sub_path":"keywords/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"131300782","text":"import unittest\nimport os\nfrom unittest.mock import MagicMock\nfrom mascotas import sqlite, Mascota, mostrarMascota\n\nclass TestMascotas(unittest.TestCase):\n #crear la base de datos y darle valores\n def setUp(self):\n self.db = sqlite(\"TestMascotas.db\")\n cur = self.db.conn.cursor()\n cur.execute(''' CREATE TABLE IF NOT EXISTS mascotas\n (nombre Text,\n especie Text,\n raza Text,\n edad Integer,\n genero Text)\n ''')\n\n self.db.guardar(Mascota(\"Scooby\", \"Perro\", \"Pastor Aleman\", 6, \"Macho\"))\n self.db.guardar(Mascota(\"Zeuz\", \"Perro\", \"Chuihuahua\", 2, \"Macho\"))\n self.db.guardar(Mascota(\"Hachi\", \"Perro\", \"Kunhao\", 3, \"Hembra\"))\n\n #borrar la base de datos\n def tearDown(self):\n self.db.conn.close()\n os.remove(\"TestMascotas.db\")\n\n #pruebas unitarias con mock\n def testShowAll(self):\n entrada = [Mascota(\"Scooby\", \"Perro\", \"Pastor Aleman\", 6, \"Macho\"),\n Mascota(\"Zeuz\", \"Perro\", \"Chuihuahua\", 2, \"Macho\"),\n Mascota(\"Hachi\", \"Perro\", \"Kunhao\", 3, \"Hembra\")]\n\n salida_esperada = [Mascota(\"Scooby\", \"Perro\", \"Pastor Aleman\", 6, \"Macho\"),\n Mascota(\"Zeuz\", \"Perro\", \"Chuihuahua\", 2, \"Macho\"),\n Mascota(\"Hachi\", \"Perro\", \"Kunhao\", 3, \"Hembra\")]\n\n dbMock = MagicMock()\n dbMock.mostrar.return_value = entrada\n\n real = mostrarMascota(dbMock)\n self.assertEqual(salida_esperada, real)\n\n #test integracion\n def test_integration(self):\n salida_esperada = [Mascota(\"Scooby\", \"Perro\", \"Pastor Aleman\", 6, \"Macho\"),\n Mascota(\"Zeuz\", \"Perro\", \"Chuihuahua\", 2, \"Macho\"),\n Mascota(\"Hachi\", \"Perro\", \"Kunhao\", 3, \"Hembra\")]\n\n real = mostrarMascota(self.db)\n self.assertEqual(salida_esperada, real)\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"ene-jun-2019/Luis Ornelas/Practica 7/mascotas_test.py","file_name":"mascotas_test.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"302640352","text":"# -*- coding: utf-8 -*-\n\n# project: fshell\n# author: s0nnet\n# time: 2017-05-14\n# desc: web bean层_webshell库\n\n\nif __name__ == \"__main__\":\n import sys\n import os\n\n sys.path.append(\"../base\")\n sys.path.append(\"..\")\n sys.path.append(\"../dao\")\n\nfrom fsm_cfg import *\nfrom fs_util import *\n\n\n# from fsm_result_fileatt_dao import *\n\n\nclass FsmDetWebshell:\n\n @staticmethod\n def det_webshell_list(userId, page, count):\n\n webshellList = [\n {\"webshell_id\": 102, \"webshell_name\":\"db-dump.php\", \"webshell_md5\":\"3bc43416a6d3a20214a7d6428bb0e2f1\", \"webshell_type\":u\"PHP小马\", \"webshell_risk\":5, \"det_tm\":\"2017-05-17\"},\n {\"webshell_id\": 103, \"webshell_name\":\"db-dump.php\", \"webshell_md5\":\"3bc43416a6d3a20214a7d6428bb0e2f2\", \"webshell_type\":u\"PHP大马\", \"webshell_risk\":5, \"det_tm\":\"2017-05-17\"},\n {\"webshell_id\": 104, \"webshell_name\":\"db-dump.php\", \"webshell_md5\":\"3bc43416a6d3a20214a7d6428bb0e2f3\", \"webshell_type\":u\"PHP小马\", \"webshell_risk\":5, \"det_tm\":\"2017-05-17\"},\n {\"webshell_id\": 105, \"webshell_name\":\"db-dump.php\", \"webshell_md5\":\"3bc43416a6d3a20214a7d6428bb0e2f4\", \"webshell_type\":u\"PHP大马\", \"webshell_risk\":5, \"det_tm\":\"2017-05-16\"},\n {\"webshell_id\": 106, \"webshell_name\":\"db-dump.php\", \"webshell_md5\":\"3bc43416a6d3a20214a7d6428bb0e2f5\", \"webshell_type\":u\"PHP大马\", \"webshell_risk\":5, \"det_tm\":\"2017-05-14\"},\n {\"webshell_id\": 107, \"webshell_name\":\"db-dump.php\", \"webshell_md5\":\"3bc43416a6d3a20214a7d6428bb0e2f6\", \"webshell_type\":u\"PHP小马\", \"webshell_risk\":5, \"det_tm\":\"2017-05-15\"},\n {\"webshell_id\": 108, \"webshell_name\":\"db-dump.php\", \"webshell_md5\":\"3bc43416a6d3a20214a7d6428bb0e2f7\", \"webshell_type\":u\"PHP小马\", \"webshell_risk\":5, \"det_tm\":\"2017-05-16\"},\n {\"webshell_id\": 109, \"webshell_name\":\"db-dump.php\", \"webshell_md5\":\"3bc43416a6d3a20214a7d6428bb0e2f8\", \"webshell_type\":u\"PHP小马\", \"webshell_risk\":5, \"det_tm\":\"2017-05-13\"},\n {\"webshell_id\": 112, \"webshell_name\":\"db-dump.php\", \"webshell_md5\":\"3bc43416a6d3a20214a7d6428bb0e2f9\", \"webshell_type\":u\"PHP小马\", \"webshell_risk\":5, \"det_tm\":\"2017-05-12\"},\n {\"webshell_id\": 113, \"webshell_name\":\"cmd-recv.jsp\", \"webshell_md5\":\"3bc43416a6d3a20214a7d6428bb0e223\", \"webshell_type\":u\"JSP一句话马\", \"webshell_risk\":5, \"det_tm\":\"2017-05-14\"}\n ]\n\n return True, webshellList\n\n","sub_path":"fs_manager/src/bean/fsm_det_webshell.py","file_name":"fsm_det_webshell.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"199598697","text":"from amadeus import Client, ResponseError\nimport json\nfrom flask import Flask, render_template, request \nimport iso8601\nimport twilio.rest\n\napp = Flask(__name__)\n\ndef getDuration(t1, t2):\n t1 = iso8601.parse_date(t1)\n t2 = iso8601.parse_date(t2)\n diff = t2-t1\n days, seconds = diff.days, diff.seconds\n hours = days * 24 + seconds // 3600\n minutes = (seconds % 3600) // 60\n return [str(hours)+\"H \"+str(minutes)+\"M\"]\n\ndef dateWords(d):\n return str(iso8601.parse_date(d).strftime('%A %d %B %Y')).title()\n\namadeus = Client(\n client_id='',\n client_secret='AMADEUS API KEY SECRET HERE<>'\n)\n\niata_codes = {\n 'LAS': 'Las Vegas',\n 'ORD': 'Chicago',\n 'JFK': 'New York',\n 'SFO': 'San Francisco',\n 'DEN': 'Denver',\n 'SLC': 'Salt Lake City',\n 'LAX': 'Los Angles'\n}\n@app.route('/search', methods=['GET', 'POST'])\ndef flight_search():\n fromad = request.args.get(\"from\")\n to = request.args.get(\"to\")\n onDate = request.args.get(\"date\")\n adults = request.args.get(\"adults\")\n\n response = amadeus.shopping.flight_offers_search.get(originLocationCode=fromad, destinationLocationCode=to, departureDate=onDate, adults=adults)\n return render_template(\"index.html\", len = len(response.data), flights = response.data, codes=iata_codes, getDate = iso8601.parse_date, getDuration=getDuration, dateWords=dateWords, leng=len)\n\n@app.route('/flights')\ndef landing():\n return render_template(\"land.html\")\n\n@app.route('/map')\ndef maps():\n return render_template(\"map.html\")\n\n@app.route('/')\ndef index():\n return render_template(\"main.html\")\n@app.route('/tours1')\ndef tours():\n return render_template(\"tours.html\")\n@app.route('/tours')\ndef tour():\n return render_template(\"tours1.html\")\n\n@app.route('/food')\ndef food():\n return render_template(\"food.html\")\n\n@app.route('/sms', methods=['GET'])\ndef sms():\n typef = request.args.get(\"type\")\n namef = request.args.get(\"name\")\n addf = request.args.get(\"address\")\n cno = request.args.get(\"cno\")\n # client credentials are read from TWILIO_ACCOUNT_SID and AUTH_TOKEN\n account_sid = '' # Found on Twilio Console Dashboard\n auth_token = ''\n clnt = twilio.rest.Client(account_sid, auth_token)\n\n# this is the Twilio sandbox testing number\n from_whatsapp_number='whatsapp:+14155238886'\n# replace this number with your own WhatsApp Messaging number\n to_whatsapp_number='whatsapp:+15102169073'\n\n clnt.messages.create(body='Hey I have some '+namef+' ('+typef+') '+'available for free at '+addf+' You can call us at '+cno,\n from_=from_whatsapp_number,\n to=to_whatsapp_number)\n return ''''''\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000, threaded=True, debug=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"243362763","text":"n = int(input())\ndata = []\nfor i in range(n):\n x, l = map(int, input().split())\n data.append([x, l])\ndata = sorted(data, key = lambda x: x[0] + x[1])\nans = 0\npre = -(10 ** 10)\nfor i in range(n):\n if data[i][0] - data[i][1] >= pre:\n ans += 1\n pre = data[i][0] + data[i][1]\n\nprint(ans)","sub_path":"boot/hard/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"428161601","text":"import logging\nfrom unittest.mock import Mock\n\nimport pytest\nfrom ophyd.sim import make_fake_device\n\nfrom pcdsdevices.lens import XFLS, LensStack, SimLensStack\n\nlogger = logging.getLogger(__name__)\n\n\n@pytest.fixture(scope='function')\ndef fake_xfls():\n FakeXFLS = make_fake_device(XFLS)\n xfls = FakeXFLS('TST:XFLS', name='lens')\n xfls.state.sim_put(4)\n xfls.state.sim_set_enum_strs(('Unknown', 'LENS1', 'LENS2', 'LENS3', 'OUT'))\n return xfls\n\n\ndef test_xfls_states(fake_xfls):\n logger.debug('test_xfls_states')\n xfls = fake_xfls\n # Remove\n xfls.state.put(4)\n assert xfls.removed\n assert not xfls.inserted\n # Insert\n xfls.state.put(3)\n assert not xfls.removed\n assert xfls.inserted\n # Unknown\n xfls.state.put(0)\n assert not xfls.removed\n assert not xfls.inserted\n\n\ndef test_xfls_motion(fake_xfls):\n logger.debug('test_xfls_motion')\n xfls = fake_xfls\n xfls.remove()\n assert xfls.state.get() == 4\n\n\ndef test_xfls_subscriptions(fake_xfls):\n xfls = fake_xfls\n # Subscribe a pseudo callback\n cb = Mock()\n xfls.subscribe(cb, event_type=xfls.SUB_STATE, run=False)\n # Change readback state\n xfls.state.put(4)\n assert cb.called\n\n\ndef test_LensStack_align(presets, monkeypatch):\n logger.debug('test_LensStack_align')\n\n def mocktweak(self):\n lens.x.move(lens.x.position+1)\n lens.y.move(lens.y.position+1)\n lens = SimLensStack(name='test',\n x_prefix='x_motor',\n y_prefix='y_motor',\n z_prefix='z_motor')\n monkeypatch.setattr(LensStack, 'tweak', mocktweak)\n lens.align(0)\n assert lens.z.position == 0\n assert lens.x.position == 1.5\n assert lens.y.position == 1.5\n","sub_path":"tests/test_lens.py","file_name":"test_lens.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"320056035","text":"import os\nfrom core.handler.base import ApiHandler, route, noblock, success\nfrom core.lib.ip2Region import Ip2Region\n\nROOT_PATH = os.path.abspath(os.path.join(\n os.path.dirname(__file__), os.pardir, os.pardir, os.pardir))\n\nip_reg = Ip2Region(os.path.join(ROOT_PATH, 'conf', 'ip2region.db'))\n\n\n@route(r'/ip/region')\nclass IpRegionHandler(ApiHandler):\n\n @noblock\n def get(self):\n ipaddr = self.get_arg('ip', None)\n data = ip_reg.memorySearch(ipaddr)\n return success(data=data)\n","sub_path":"core/handler/api/ipaddr.py","file_name":"ipaddr.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"149358317","text":"from random import choice, randint\nimport dictogram\nimport sampling\nfrom pprint import pprint\n\ndef gen_markov(words, order=1):\n pairs = make_pairs(words.split(\" \"), order)\n markov_dict = {}\n for word1, word2 in pairs:\n if word1 in markov_dict:\n markov_dict[word1].add_count(word2)\n else:\n markov_dict[word1] = dictogram.Dictogram([word2])\n return markov_dict\n\ndef gen_sentence(chain):\n start_list = []\n for pair in list(chain.keys()):\n if pair[0]==\"[START]\":\n start_list.append(pair)\n word_tuple = start_list[randint(0,len(start_list)-1)]\n return_sentence = ''\n for word in word_tuple[1:]:\n return_sentence += ' ' + word\n while word_tuple[-1]!=\"[STOP]\":\n new_word = sampling.sample(chain[word_tuple])\n return_sentence += ' ' + new_word \n word_list = list(word_tuple[1:])\n word_list.append(new_word)\n word_tuple = tuple(word_list)\n return return_sentence[:-7] + '.'\n\ndef removePuncuation(text):\n punc_list = [',', '\"', ':',\"’\",\"‘\",'(',')']\n new_word = ''\n for _, char in enumerate(text):\n if(char not in punc_list):\n new_word += char\n return new_word\n\ndef make_pairs(corpus, order):\n output = []\n for i in range(len(corpus)-order):\n words = []\n for index in range(order):\n words.append(removePuncuation(corpus[index+i]))\n output.append((tuple(words),removePuncuation(corpus[i+order])))\n return output\n\nif __name__ == '__main__':\n corpus = open('ready_player_one.txt')\n text = ''\n for line in corpus:\n text +=line[:-1]+' '\n chain = gen_markov(text, 2)\n # pprint(chain)\n print(gen_sentence(chain))","sub_path":"Code/markov.py","file_name":"markov.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"408822246","text":"\"\"\"\nThis Scheduler is quite easy. There are only two calls you need to know about, tick and schedule.\nHere is an example:\n\nimport time\n#Our little framerate (better to use something a little more advanced, but this works just fine.\nfps = 0.03\n\ndef hello(name):\n\t#Just a random little function that prints 'hello name'\n\tprint(\"hello %s\" % name)\n\n#There is one argument that goes in Scheduler, it is the amount you wished each time to be multiplied by. Pygame for example, ticks in incraments of 1000 each second where as time.sleep is in incraments of 1 for 1 second.\n#Default is 1.0, for pygame it would be 0.001.\n\nc = Scheduler()\nc.schedule(function=hello, delay=1, repeats=2, before_delay=True, name=\"Fred\")\n\nwhile c.events:\n\tc.tick(fps)\n\ttime.sleep(fps)\n\"\"\"\n\nclass Scheduler:\n\t\"\"\"Call tick to run a tick and schedular to add an event to a queue\"\"\"\n\n\tdef __init__(self, time_format=1):\n\t\tself.events = set()\n\t\tself.new_events = set()\n\t\tself.time_format = time_format\n\n\n\tdef tick(self, elapsed_time):\n\t\t\"\"\"Call this each iteration of the game loop with an argument of the amount of time that has elapsed in seconds\"\"\"\n\t\tdone_events = []\n\t\tfor event in self.events:\n\t\t\tif event.should_run():\n\t\t\t\tevent.run()\n\t\t\tif event.done:\n\t\t\t\tdone_events.append(event)\n\t\t\tevent.tick(elapsed_time*self.time_format)\n\t\tself.events = self.events | self.new_events\n\t\tself.new_events = set()\n\t\t[self.events.remove(e) for e in done_events]\n\n\tdef schedule(self, function, delay=0, repeats=1, before_delay=False, name=None, *args, **kwargs):\n\t\t\"\"\"function is the name of the callback function that will run with the given arguments, delay is the amount of time to wait (0 for every tick), repeats is the amount of times the event will run (0 or less) for infinent, before_delay says that the function run before the delay, name is the title of the event, and the wrest are arguments for the function\"\"\"\n\t\te = EventMaker(function, delay, repeats, before_delay, name, *args, **kwargs)\n\t\tself.new_events.add(e)\n\n\tdef unschedule(self, event_name):\n\t\t\"\"\"Call this with the event name as a string to remove it from the queue\"\"\"\n\t\tevents = self.events | self.new_events\n\t\tfor i in events:\n\t\t\tif i.name == event_name:\n\t\t\t\ti.done = True\n\nclass EventMaker:\n\t\"\"\"This class is the event. It has all the functions to run the event.\"\"\"\n\tdef __init__(self, function, delay, repeats, before_delay, name, *args, **kwargs):\n\t\t\"\"\"function is the name of the callback function that will run with the given arguments, delay is the amount of time to wait (0 for every tick), repeats is the amount of times the event will run (0 or less) for infinent, before_delay says that the function run before the delay, name is the title of the event, and the wrest are arguments for the function\"\"\"\n\t\tself.function = function\n\t\tself.delay = delay\n\t\tself.repeats = repeats\n\t\tself.before_delay = before_delay\n\t\tself.name = name\n\t\tself.args = args\n\t\tself.kwargs = kwargs\n\n\t\t#Our operation variables:\n\t\tself.elapsed_time = 0\n\t\tself.done = False\n\n\tdef tick(self, elapsed_time):\n\t\t\"\"\"adds time to the elapsed_time\"\"\"\n\t\tself.elapsed_time += elapsed_time\n\n\tdef should_run(self):\n\t\t\"\"\"Checks if the event should run\"\"\"\n\t\tif self.before_delay and not self.elapsed_time and self.repeats:\n\t\t\tself.repeats += 1\n\t\t\treturn True\n\t\telif self.elapsed_time >= self.delay:\n\t\t\treturn True\n\n\tdef run(self):\n\t\t\"\"\"Runs the event\"\"\"\n\t\tself.repeats -= 1\n\t\tif self.repeats or not self.before_delay:\n\t\t\tself.function(*self.args, **self.kwargs)\n\t\tif self.repeats == 0:\n\t\t\tself.done = True\n\t\tself.elapsed_time = 0\n\n\nif __name__ == '__main__':\n\tdef hello(name):\n\t\tprint(\"hello %s\" % name)\n\n\tc = Scheduler(0.001)\n\tc.schedule(hello, 1, 2, True, name=\"Fred\")\n\n\timport pygame\n\tpygame.init()\n\tfps = 30\n\tfpsClock = pygame.time.Clock()\n\n\twhile c.events:\n\t\tc.tick(fpsClock.tick(fps))","sub_path":"venv/Lib/site-packages/pyaudiogame/ticker.py","file_name":"ticker.py","file_ext":"py","file_size_in_byte":3790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"134663485","text":"import requests\nimport datetime\nimport pickle\nimport json\nimport pandas as pd\n\ndef create_assist_date(datestart = None,dateend = None):\n # 创建日期辅助表\n\n if datestart is None:\n datestart = '20160101'\n if dateend is None:\n dateend = datetime.datetime.now().strftime('%Y%m%d')\n\n # 转为日期格式\n datestart=datetime.datetime.strptime(datestart,'%Y%m%d')\n dateend=datetime.datetime.strptime(dateend,'%Y%m%d')\n date_list = []\n date_list.append(datestart.strftime('%Y%m%d'))\n while datestart\")\n if (pCadastro == 's') or (pCadastro == 'S'):\n ProdutoControl.novoCadastro()\n elif opcaoMenu == '3':\n peditar = 0\n while peditar < 1:\n\n opcaoEditar = ProdutoControl.menuEditarProduto()\n\n if opcaoEditar == '1':\n ProdutoControl.editarNome()\n elif opcaoEditar == '2':\n ProdutoControl.editarPreco()\n elif opcaoEditar == '3':\n ProdutoControl.editarQuantidade()\n elif opcaoEditar == '4':\n ProdutoControl.deletarProduto()\n elif opcaoEditar == '0':\n peditar = 1\n else:\n print(\"---------------------------------------------------------------------------------------\\nComando Inválido!!!!!\\n---------------------------------------------------------------------------------------\")\n peditar = 0\n elif opcaoMenu == '4':\n ProdutoControl.pesquisa()\n elif opcaoMenu == '5':\n ProdutoControl.filtros()\n elif opcaoMenu == '0':\n pmenu = 1\n else:\n print(\"---------------------------------------------------------------------------------------\\nComando Inválido!!!!!\\n---------------------------------------------------------------------------------------\")\n pmenu = 0\n\n\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"298377892","text":"'''\n writer: dororongju\n github: https://github.com/djkim1991/pytorchStudy/issues/7\n'''\nfrom example07.MyNeuralNetwork import MyNeuralNetwork\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nclass Example:\n @staticmethod\n def sequential_example():\n network = MyNeuralNetwork()\n train_loader, test_loader = network.load_data()\n\n optimizer = torch.optim.SGD(params=network.parameters(), lr=0.001, momentum=0.9)\n loss_function = nn.CrossEntropyLoss()\n\n epoch_size = 3\n for epoch in range(epoch_size):\n for i, data in enumerate(train_loader):\n inputs, labels = data\n inputs, labels = Variable(inputs), Variable(labels)\n\n if torch.cuda.is_available():\n inputs = inputs.cuda()\n labels = labels.cuda()\n network.cuda()\n\n optimizer.zero_grad()\n out = network.forward(inputs)\n loss = loss_function(out, labels)\n loss.backward()\n optimizer.step()\n\n if(i % 100 == 0):\n print('{0}: loss is {1}'.format(i, loss))\n\n print(\"train over\")\n\n total = 0\n correct = 0\n for _, data in enumerate(test_loader):\n images, labels = data\n\n if torch.cuda.is_available():\n images = images.cuda()\n labels = labels.cuda()\n\n out = network.forward(Variable(images))\n _, predicted = torch.max(out.data, 1)\n\n total += labels.size(0)\n correct += (predicted == labels).sum()\n\n print('Accuracy is {0}%'.format(100*correct/total))\n\n\nif __name__ == '__main__':\n Example.sequential_example()\n","sub_path":"example07/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"121636846","text":"import sys\nscript, input_encoding, error = sys.argv\n\n\ndef main(language_file, encoding, errors):\n line = language_file.readline()\n#readline() will read a row of content of the laguage_file.It will return content when it meet '\\n'.\n if line:\n print_line(line, encoding, errors)\n return main(language_file, encoding, errors)\n#There is a loop.You will find the if-statement keeps this functions from looping forever.\n\ndef print_line(line, encoding, errors):\n next_lang = line.strip()\n#Return a copy of the string with the leading and trailing characters removed. \n#The chars argument is a string specifying the set of characters to be removed. \n#If omitted or None, the chars argument defaults to removing whitespace. \n#The chars argument is not a prefix or suffix; rather, all combinations of its values are stripped:\n#The outermost leading and trailing chars argument values are stripped from the string. \n#Characters are removed from the leading end until reaching a string character that is \n#not contained in the set of characters in chars. A similar action takes place on the \n#trailing end.\n\n raw_bytes = next_lang.encode(encoding, errors=errors)\n#str.encode(encoding=\"utf-8\", errors=\"strict\")\n#Return an encoded version of the string as a bytes object. Default encoding is 'utf-8'. #errors may be given to set a different error handling scheme. The default for errors \n#is 'strict', meaning that encoding errors raise a UnicodeError. Other possible values \n#are 'ignore', 'replace', 'xmlcharrefreplace', 'backslashreplace' and any other name \n#registered via codecs.register_error(), see section Error Handlers. For a list of \n#possible encodings, see section Standard Encodings.\n \n cooked_string = raw_bytes.decode(encoding, errors=errors)\n#bytes.decode(encoding=\"utf-8\", errors=\"strict\")\n#It's similiar to str.encoding()\n print(raw_bytes, \"<==>\", cooked_string)\n\n\nlanguages = open(\"languages.txt\", encoding='utf-8')\n\nmain(languages, input_encoding, error)\n","sub_path":"ex23.py","file_name":"ex23.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"3621650","text":"from fancyimpute import SimpleFill\n\nfrom low_rank_data import XY, XY_incomplete, missing_mask\nfrom common import reconstruction_error\nfrom random_initialization import initialize_random_seed\n\n\ndef test_matrix_factorization_with_low_rank_random_matrix():\n initialize_random_seed() # for reproducibility\n solver = SimpleFill(\n fill_method='random')\n initialize_random_seed()\n XY_completed = solver.fit(XY_incomplete, missing_mask)\n _, missing_mae = reconstruction_error(\n XY,\n XY_completed,\n missing_mask,\n name=\"MatrixFactorization\")\n initialize_random_seed()\n XY_completed = solver.transform(XY_incomplete, missing_mask)\n _, missing_mae = reconstruction_error(\n XY,\n XY_completed,\n missing_mask,\n name=\"MatrixFactorization\")\n\nif __name__ == \"__main__\":\n test_matrix_factorization_with_low_rank_random_matrix()\n","sub_path":"test/test_simplefill.py","file_name":"test_simplefill.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"403696240","text":"from resources.helpers import * \nfrom classes.UserDB import UserDB\nfrom classes.Switch import Switch\n\nif __name__==\"__main__\": \n # Variables\n user_db = UserDB()\n user_db.load_user_data() # Load data from json file\n switch = Switch(user_db)\n q = \"\"\n\n while q != 'q':\n choice_messages()\n choice = input(\"\")\n clear()\n switch.check(choice)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"147971395","text":"'''\nCreated: Feb 22, 2017\n\n@author: jes97210\n\nThis file provides a \"course format\", a general structure that all courses on\nthe UBC site share. They typically have a course code, credits, title, and\ndesription. They can also have prereqs, equivalents, and coreqs.\nThis file also provides some simple methods.\n'''\n\nimport sys\nimport re\n\ncourse_code = re.compile(\"[A-Z]{4}\\s[0-9]{3}\")\ncourse_credit = re.compile(\"\\(\\d\\)\")\n\n\nclass Course:\n\n def __init__(self, code, credits, title, desc, prq, equ, corq):\n self.code = code\n self.credits = credits\n self.title = title\n self.desc = desc\n self.prq = prq\n self.equ = equ\n self.corq = corq\n\n'''\nThis takes an array of strings, and makes a new Course object.\nTakes: Arr ([String])\n ASSUMES: len(Arr) >= 2\n First entry in Arr has the course code, credits, and title\n Second entry has course description\n Subsequent entries may have prereqs, equivalents, and/or coreqs\nReturns: a Course object\n'''\ndef new_course(Arr):\n a = Arr[0]\n b = Arr[1]\n c = Arr[2:]\n\n # Processing a\n # Typically: 'course_code course_credit title'\n a.strip()\n a1 = course_code.match(a)\n if a1 is None:\n raise IndexError('No course code found!')\n a2 = course_credit.search(a)\n if a2 is None:\n a2 = \"3\" # default if nothing found\n print('No course credit found for: ' + a1.group())\n a3 = a[a1.end():]\n a3.strip()\n else:\n a3 = a[a2.end():]\n a3.strip()\n a2 = a2.group()\n\n a1 = a1.group()\n a1 = a1.replace(\" \", \"\")\n\n # Processing b\n # Basically nothing needed\n b.strip()\n\n # Processing c\n c1 = None # Prerequisite\n c2 = None # Equivalency\n c3 = None # Corequisite\n for i in c:\n if i.startswith('Prerequisite:'):\n c1 = i.replace('Prerequisite:', '', 1).strip()\n elif i.startswith('Corequisite:'):\n c2 = i.replace('Corequisite:', '', 1).strip()\n elif i.startswith('Equivalency:'):\n c3 = i.replace('Equivalency:', '', 1).strip()\n else:\n raise ValueError('Unexpected string: ' + i)\n\n # Make the Course instance\n return Course(a1, a2, a3, b, c1, c2, c3)\n","sub_path":"course_scraper/course_format.py","file_name":"course_format.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"54425573","text":"def _first(iterator):\n try:\n return next(iterator)\n except StopIteration:\n raise ValueError(\"iterable must not be empty to get first element\")\n\n\ndef split(iterable):\n \"\"\"\n Split the head from the rest of the elements in iterable.\n\n Raises:\n ValueError if iterable is empty\n \"\"\"\n it = iter(iterable)\n first = _first(it)\n\n def rest_fn():\n try:\n while True:\n yield next(it)\n except StopIteration:\n pass\n\n rest = rest_fn()\n return first, rest\n\n\ndef first(iterable):\n \"\"\"\n Get the first entry from iterable.\n\n Raises:\n ValueError if iterable is empty\n \"\"\"\n return _first(iter(iterable))\n\n\ndef single(iterable):\n \"\"\"\n Get the single entry from iterable.\n\n Raises:\n ValueError if more than 1 entry found.\n \"\"\"\n it = iter(iterable)\n first = _first(it)\n try:\n next(it)\n raise ValueError(\"iterable had more than one entry\")\n except StopIteration:\n return first\n","sub_path":"shape_tfds/core/collection_utils/iterable.py","file_name":"iterable.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"577956175","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport argparse\nimport logging\nimport sys\n\nfrom db.mongo import Mongo\nfrom db.ppasses import get_production_records\nfrom db.usernames import Usernames\nusers = Usernames()\n\nlogging.basicConfig(level=logging.INFO, format='%(message)s')\n\n\ndef parse_args(argv):\n parser = argparse.ArgumentParser(description='Display mongo entitlement records for a user')\n parser.add_argument('-a', '--all', action='store_true', default=False, help='All customer record')\n parser.add_argument('-i', '--atv', action='store_true', default=False, help='Apple TV records')\n parser.add_argument('-l', '--list_users', action='store_true', default=False, help='List all QA usernames')\n parser.add_argument('-p', '--prod_user', action='store_true', default=False, help='Production user')\n parser.add_argument('-v', '--vodafone', action='store_true', default=False, help='Vodafone records')\n parser.add_argument('user', action=\"store\", nargs='?', help='Username or profileid')\n\n if len(argv) == 1:\n parser.print_usage()\n exit(1)\n else:\n return parser.parse_args(argv[1:])\n\n\ndef command_line_runner(argv=None):\n if argv is None:\n argv = sys.argv\n\n args = parse_args(argv)\n\n # List all QA users\n if args.list_users:\n print(users.list_usernames())\n return\n\n if args.user:\n profileid = users.get_profileid(args.user)\n if profileid:\n if args.prod_user:\n print(get_production_records(profileid))\n else:\n print(get_records(profileid, args))\n return\n\n\ndef get_records(profileid, include):\n mongo = Mongo(profileid)\n report = ''\n report += mongo.get_accounts()\n report += mongo.get_entitlements()\n\n if include.atv or include.all:\n report += mongo.get_atv_subscriptions()\n\n if include.vodafone or include.all:\n report += mongo.get_vodafone_accounts()\n return report\n\n\nif __name__ == '__main__':\n sys.exit(command_line_runner())\n","sub_path":"db/passes.py","file_name":"passes.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"379529184","text":"MONTHS = {\n 1: 'January',\n 2: 'Febuary',\n 3: 'March',\n 4: 'April',\n 5: 'May',\n 6: 'June',\n 7: 'July',\n 8: 'August',\n 9: 'September',\n 10: 'October',\n 11: 'November',\n 12: 'December'\n}\n\nDAYS = {\n 0: 'Monday',\n 1: 'Tuesday',\n 2: 'Wednesday',\n 3: 'Thursday',\n 4: 'Friday',\n 5: 'Saturday',\n 6: 'Sunday'\n}\n\ndef date(timestamp, pattern):\n output = (pattern\n .replace('%%', '%')\n .replace('%Y', str(timestamp.year))\n .replace('%m', str(timestamp.month))\n .replace('%d', str(timestamp.day))\n .replace('%H', str(timestamp.hour))\n .replace('%M', str(timestamp.minute))\n .replace('%S', str(timestamp.second))\n .replace('%A', month_name(timestamp))\n .replace(\"%B\", weekday_name(timestamp)))\n print(output)\n\n\ndef month_name(timestamp):\n return MONTHS[timestamp.month]\n\n\ndef weekday_name(timestamp):\n return DAYS[timestamp.weekday()]\n","sub_path":"date/date.py","file_name":"date.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"172648201","text":"n = int(input())\nmy_dict = {}\n\nfor i in range(n):\n order, key = input().split(' ')\n if order == 'insert':\n my_dict[key] = True\n elif order== 'find':\n if key in my_dict.keys():\n print('yes')\n else:\n print('no')\n","sub_path":"Python_codes/p02269/s458956564.py","file_name":"s458956564.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"508983260","text":"#!/usr/bin/python3\nimport ipaddress as ipa\nimport socket, struct\nimport sys,os\npath = os.path.abspath(os.path.realpath(__file__)+\"/../..\")\nsys.path.append(path)\nsys.path.append(\"/etc/networkmanagement\")\nimport helpers\nimport server_config\n\nprint(\"#DO NOT EDIT - This file was generated automatically from an MySQL-Database\")\n\ncontexts = helpers.Context.get_all()\n\nfor context in contexts:\n devices = helpers.Device.get_where(\"context=%s AND type = 'dhcp'\",(context.id,))\n if len(devices) is 0 and not context.dhcp:\n continue\n print()\n print(\"#\"+context.description)\n network = ipa.ip_network(context.iprange)\n naddr= str(network.network_address)\n baddr= str(network.broadcast_address)\n first_host = socket.inet_ntoa(struct.pack(\"!L\", struct.unpack(\"!L\", socket.inet_aton(naddr))[0]+1))\n last_host = socket.inet_ntoa(struct.pack(\"!L\", struct.unpack(\"!L\", socket.inet_aton(baddr))[0]-1))\n\n if context.dhcp:\n print(\"dhcp-range=\"+first_host+\",\"+last_host+\",1h\")\n else:\n print(\"dhcp-range=\"+naddr+\",static,1h\")\n\n for device in devices:\n tag = \"internet\" if device.internet else \"nointernet\"\n print(\"dhcp-host=\"+device.identifier+\",\"+device.ip+\",set:\"+tag+\" #\"+device.description+\" (\"+device.hostname+\")\")\n\n\n","sub_path":"generateconfig/generatednsmasqconf.py","file_name":"generatednsmasqconf.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"157650487","text":"# python3\n\n\"\"\"\nIn this class, I did ... Input variables are:\n\n\nexample python code use:\n\n\nAuthor: Phong Nguyen (vietphong.nguyen@gmail.com)\nLast modified: SEP 2019\n\"\"\"\n\n\nimport xlrd\n\nfrom read_LC_table_from_file import to_interval, to_sex, to_age, to_race, to_survival\n\n\ndef read_distant_cancer_table_from_file(file_name):\n print(\"Reading Distant Cancer table from file [\" + file_name + \"] ...\",end=\"\")\n # Summary interval from 1 month to 120 months\n INTERVAL = 120\n # Sex: 0=male 1=female\n SEX = 2\n # Race and origin recode (NHW, NHB, NHAIAN, NHAPI, Hispanic)\n # 0=Non-Hispanic White,\n # 1=Non-Hispanic Black,\n # 2=Hispanic (All Races),\n # 3=Non-Hispanic American Indian/Alaska Native,\n # 4=Non-Hispanic Asian or Pacific Islander\n # 5=Non-Hispanic Unknown Race\n RACE = 6\n # Age deciles (40-49 years, 50-59 years, 60-69 years, 70-79 years, 80+ years)\n AGE = 5\n\n table = [[[[[] for i in range(AGE)] for i in range(RACE)] for i in range(SEX)] for i in range(INTERVAL)]\n\n input_workbook = xlrd.open_workbook(file_name)\n input_worksheet = input_workbook.sheet_by_name(\"Survival rate-Distant cancer\")\n for i in range(1, input_worksheet.nrows):\n interval = to_interval(input_worksheet.cell_value(i, 1))\n sex = to_sex(input_worksheet.cell_value(i, 2))\n race = to_race(input_worksheet.cell_value(i, 3))\n age = to_age(input_worksheet.cell_value(i, 4))\n survival = to_survival(input_worksheet.cell_value(i, 5))\n # print(interval, sex, race, age)\n table[interval - 1][sex][race][age] = survival\n\n print(\"done\")\n return table\n","sub_path":"read_distant_cancer_table_from_file.py","file_name":"read_distant_cancer_table_from_file.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"377559228","text":"#\n# [62] Unique Paths\n#\n# https://leetcode.com/problems/unique-paths/description/\n#\n# algorithms\n# Medium (43.45%)\n# Total Accepted: 200.2K\n# Total Submissions: 460.8K\n# Testcase Example: '3\\n2'\n#\n# A robot is located at the top-left corner of a m x n grid (marked 'Start' in\n# the diagram below).\n#\n# The robot can only move either down or right at any point in time. The robot\n# is trying to reach the bottom-right corner of the grid (marked 'Finish' in\n# the diagram below).\n#\n# How many possible unique paths are there?\n#\n#\n# Above is a 7 x 3 grid. How many possible unique paths are there?\n#\n# Note: m and n will be at most 100.\n#\n# Example 1:\n#\n#\n# Input: m = 3, n = 2\n# Output: 3\n# Explanation:\n# From the top-left corner, there are a total of 3 ways to reach the\n# bottom-right corner:\n# 1. Right -> Right -> Down\n# 2. Right -> Down -> Right\n# 3. Down -> Right -> Right\n#\n#\n# Example 2:\n#\n#\n# Input: m = 7, n = 3\n# Output: 28\n#\n#\n\n\nclass Solution:\n def uniquePaths(self, m, n):\n \"\"\"\n :type m: int\n :type n: int\n :rtype: int\n \"\"\"\n dp = [[1]*m for _ in range(n)]\n\n for i in range(1, m):\n dp[0][i] = 1\n for i in range(1, n):\n dp[i][0] = 1\n\n for ni in range(1, n):\n for mi in range(1, m):\n dp[ni][mi] = dp[ni][mi-1] + dp[ni-1][mi]\n\n return dp[n-1][m-1]\n\n\ndef assert_eq(actual, expected):\n if actual != expected:\n raise AssertionError('expected: %s, actual: %s' % (expected, actual))\n\n\ndef test(input_, output):\n assert_eq(Solution().uniquePaths(*input_), output)\n\n\nif __name__ == '__main__':\n test((1, 1), 1)\n test((3, 2), 3)\n test((7, 3), 28)\n","sub_path":"myleetcode/062_unique_paths/62.unique-paths.python3.py","file_name":"62.unique-paths.python3.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"647508590","text":"# -*- coding: UTF-8 -*-\nfrom urllib import request\nfrom urllib import error\nif __name__==\"__main__\":\n url=input(\"要访问的网址:\")\n req=request.Request(url)\n try:\n response=request.urlopen(req)\n htm=response.read().decode('utf-8')\n print(htm)\n except error.HTTPError as e:\n #服务器正确但是没有那个资源\n print(e.code)\n except error.URLError as e:\n #服务器不存在\n print(e.reason)\n","sub_path":"爬虫/urllib_test04.py","file_name":"urllib_test04.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"394113409","text":"from __future__ import absolute_import\nimport sys\nimport time\nfrom .utils import format_partition, mount\nfrom ...utils import *\nfrom ...config import usr_cfg, setup_logger\n\n\nlogger = setup_logger(__name__)\n\n\ndef partition():\n system('clear')\n print_title(\"Step 4) Manual Partititon (careful in this step)\")\n time.sleep(1)\n\n confirm_drive = cinput('> Please confirm the drive by typing ' \\\n + '{0}: '.format(usr_cfg['drive']), COLORS['OKBLUE'])\n if confirm_drive != usr_cfg['drive']:\n print_error(\"Mismatch in drives. Try again.\")\n partition()\n system(\"lsblk {0}\".format(usr_cfg['drive']))\n\n _table_cmd = \"fdisk -l {0} | grep Disklabel | cut -d ' ' -f 3\"\n partition_table = system_output(_table_cmd.format(usr_cfg['drive']))\n\n if partition_table == 'gpt':\n print_info(\"For the GPT partition table, the suggested partition \" \\\n + \"scheme looks likethis:\\nmountpoint partition \" \\\n + \"partition type boot flagsuggested size\\n_________\" \\\n + \"____________________________________________________________\" \\\n + \"____________________\\n/boot /dev/sdx1 EFI\" \\\n + \" System Partition Yes 260-512 MiB\\n\\n[SWAP] \" \\\n + \" /dev/sdx2 Linux swap No More \" \\\n + \"than 512 MiB\\n\\n/ /dev/sdx3 Linux \" \\\n + \"(ext4) No Remainder of the device\\n\\nWARNING\" \\\n + \": If dual-booting with an existing installation of Windows on \" \\\n + \"a UEFI/GPT system,\\navoid reformatting the UEFI partition, as \" \\\n + \"this includes the Windows .efifile required to boot it.\")\n elif partition_table == 'dos':\n print_info(\"For the MBR partition table, the suggested partition \" \\\n + \"scheme looks like this:\\nmountpoint partition \" \\\n + \"partition type boot flag suggested size\\n__________\" \\\n + \"______________________________________________________________\" \\\n + \"_________________\\n[SWAP] /dev/sdx1 Linux \" \\\n + \"swap No More than 512 MiB\\n\\n/ \" \\\n + \" /dev/sdx2 Linux (ext4) Yes \" \\\n + \"Remainder of the device\")\n\n usr_cfg['manual_partition_table'] = partition_table\n\n if query_yes_no('''> I've read this and wish to continue to the partitioner.''', 'yes'):\n sp.call(\"clear\", shell=True)\n sp.call('cfdisk {0}'.format(usr_cfg['drive']), shell=True)\n sp.call(\"clear\", shell=True)\n sp.call('lsblk {0}'.format(usr_cfg['drive']), shell=True)\n if not query_yes_no('''> Are you sure your partitions are set up correctly?''', 'yes'):\n partition()\n else:\n sys.exit()\n\n format_partition()\n mount()\n","sub_path":"asinstaller/partitions/manual/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"583922896","text":"import os\n\n\ndef copy_file():\n\t# 先打开文件,再把这个文件写入到另一个目录下\n\twith open(r'C:\\Users\\Lenovo\\Desktop\\abc.txt', 'rt') as fp:\n\t stream = fp.read()\n\t filepath = fp.name\n\t filename = filepath[filepath.rfind(\"\\\\\")+1:]\n\t # 当前路径\n\t path = os.path.dirname(__file__)\n\t final_filename = os.path.join(path,filename)\n\t with open(final_filename, 'w') as fp:\n\t fp.write(stream)\n\treturn None\n\n\ndef remove_dir():\n\tpath = r'C:\\Users\\Lenovo\\Desktop\\f'\n\t# 要删除这个path下的所有文件,先确保这个文件夹下是不是全为文件。\n\tfilelist = os.listdir(path)\n\tfor file in filelist:\n\t\tos.remove(os.path.join(path,file))\n\telse:\n\t\t# 删除文件夹\n\t\tos.rmdir(path)\n\n\ndef os_api():\n\tpath = r'C:\\Users\\Lenovo\\Desktop\\abc.txt'\n\tprint(os.path.isabs(path))\n\tprint(os.path.abspath(\"./abc.txt\"))\n\tprint(os.path.dirname(__file__))\n\tprint(os.getcwd())\n\tprint(os.path.split(path))\n\tprint(os.path.splitext(path))\n\tprint(os.path.getsize(\"./abc.txt\"),\"字节\")\n\tprint(os.path.isdir(path))\n\n\n\nif __name__ == \"__main__\":\n\tremove_dir()\n\n\t","sub_path":"part03/file_os.py","file_name":"file_os.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"580968309","text":"from django.conf import settings\nfrom django.db import models\nfrom django.db.models import Q, Sum, F\nfrom django_extensions.db.fields import AutoSlugField\nfrom ordered_model.models import OrderedModel, OrderedModelQuerySet\nfrom django.utils.translation import ugettext_lazy as _\nfrom embed_video.fields import EmbedVideoField\n\nfrom coapp.academy.courses.models import UserModuleLesson\nfrom coapp.core.actions.models import Actions\nfrom coapp.core.checkpoints.models import Checkpoint, Checkpoints, UserCheckpoint\nfrom coapp.core.content.models import ContentField\nfrom coapp.core.models import TimeStampedBase, OwnedBase, ChoiceEnum\nfrom coapp.core.users.models import User\nfrom coapp.core.utils.date import get_current_time\nfrom coapp.core.utils.videos import VideoService\n\n\nclass Journey(TimeStampedBase, OrderedModel):\n name = models.CharField(\n verbose_name=_('name'),\n max_length=255,\n unique=True\n )\n video_url = EmbedVideoField(\n verbose_name=_('video url'),\n blank=True\n )\n content = ContentField(\n verbose_name=_('content'),\n blank=True,\n )\n\n @property\n def thumbnail(self):\n if self.video_url:\n return VideoService.get_thumbnail(self.video_url)\n return ''\n\n # Reverse: quests\n\n class Meta(OrderedModel.Meta):\n default_related_name = 'journeys'\n\n def __str__(self):\n return self.name\n\n\nclass QuestQuerySet(OrderedModelQuerySet):\n\n def search(self, query):\n queryset = self.filter(Q(name__icontains=query) | Q(content__icontains=query))\n return queryset\n\n def unlocked_by(self, user):\n queryset = self.filter(unlocked_by=user)\n return queryset\n\n def locked_for(self, user):\n queryset = self.exclude(unlocked_by=user)\n return queryset\n\n\ndef get_default_journey():\n return Journey.objects.get_or_create(name='My Legend')[0].pk\n\n\nclass Quest(TimeStampedBase, OrderedModel):\n journey = models.ForeignKey(\n to=Journey,\n on_delete=models.CASCADE,\n default=get_default_journey,\n )\n name = models.CharField(\n verbose_name=_('name'),\n max_length=255,\n )\n\n code = AutoSlugField(\n verbose_name=_('code'),\n populate_from='name',\n editable=True,\n )\n\n intro_video_url = EmbedVideoField(\n verbose_name=_('intro video url'),\n blank=True,\n )\n\n @property\n def intro_thumbnail(self):\n return VideoService.get_thumbnail(self.intro_video_url)\n\n # Rewards\n\n outro_video_url = EmbedVideoField(\n verbose_name=_('outro video url'),\n blank=True\n )\n\n @property\n def outro_thumbnail(self):\n return VideoService.get_thumbnail(self.outro_video_url)\n\n experience_1 = models.PositiveSmallIntegerField(\n verbose_name=_('apples'),\n default=0,\n )\n\n experience_2 = models.PositiveSmallIntegerField(\n verbose_name=_('smileys'),\n default=0,\n )\n\n experience_3 = models.PositiveSmallIntegerField(\n verbose_name=_('suns'),\n default=0,\n )\n\n experience_4 = models.PositiveSmallIntegerField(\n verbose_name=_('hearts'),\n default=0,\n )\n\n experience_5 = models.PositiveSmallIntegerField(\n verbose_name=_('bubbles'),\n default=0,\n )\n\n experience_6 = models.PositiveSmallIntegerField(\n verbose_name=_('bulbs'),\n default=0,\n )\n\n experience_7 = models.PositiveSmallIntegerField(\n verbose_name=_('moons'),\n default=0,\n )\n\n @property\n def objective_experience(self):\n return self.objectives.aggregate(experience=Sum(\n F('experience_1') + F('experience_2') +\n F('experience_3') + F('experience_4') + F('experience_5') +\n F('experience_6') + F('experience_7'))\n ).get('experience') or 0\n\n @property\n def total_experience(self):\n bonus_experience = self.experience_1 + self.experience_2 + self.experience_3 + self.experience_4 + self.experience_5 + self.experience_6 + self.experience_7\n objective_experience = self.objective_experience\n return objective_experience + bonus_experience\n\n # Misc\n\n content = ContentField(\n verbose_name=_('content'),\n blank=True,\n )\n\n # Reverse: objectives\n\n unlocked_by = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n related_name='unlocked_quests',\n through='UserQuest',\n )\n\n order_with_respect_to = 'journey'\n\n objects = QuestQuerySet.as_manager()\n\n class Meta(OrderedModel.Meta):\n default_related_name = 'quests'\n unique_together = ['journey', 'name']\n\n def __str__(self):\n return self.name\n\n\nclass QuestObjective(TimeStampedBase, OrderedModel):\n quest = models.ForeignKey(\n verbose_name=_('quest'),\n to=Quest,\n on_delete=models.CASCADE,\n )\n\n name = models.CharField(\n verbose_name=_('name'),\n max_length=255,\n )\n\n code = AutoSlugField(\n verbose_name=_('code'),\n populate_from='name',\n editable=True,\n )\n\n video_url = EmbedVideoField(\n verbose_name=_('video url'),\n blank=True,\n )\n\n @property\n def thumbnail(self):\n return VideoService.get_thumbnail(self.video_url)\n\n content = ContentField(\n verbose_name=_('content'),\n blank=True,\n )\n\n checkpoint = models.ForeignKey(\n verbose_name=_('checkpoint'),\n to=Checkpoint,\n on_delete=models.SET_NULL,\n null=True, blank=True,\n help_text=_('The checkpoint is required to complete the obective.'),\n )\n\n # Reward(s?)\n\n outro_text = models.TextField(\n verbose_name=_('outro text'),\n blank=True,\n )\n\n experience_1 = models.PositiveSmallIntegerField(\n verbose_name=_('apples'),\n default=0,\n )\n\n experience_2 = models.PositiveSmallIntegerField(\n verbose_name=_('smileys'),\n default=0,\n )\n\n experience_3 = models.PositiveSmallIntegerField(\n verbose_name=_('suns'),\n default=0,\n )\n\n experience_4 = models.PositiveSmallIntegerField(\n verbose_name=_('hearts'),\n default=0,\n )\n\n experience_5 = models.PositiveSmallIntegerField(\n verbose_name=_('bubbles'),\n default=0,\n )\n\n experience_6 = models.PositiveSmallIntegerField(\n verbose_name=_('bulbs'),\n default=0,\n )\n\n experience_7 = models.PositiveSmallIntegerField(\n verbose_name=_('moons'),\n default=0,\n )\n\n order_with_respect_to = 'quest'\n\n class Meta(OrderedModel.Meta):\n default_related_name = 'objectives'\n\n def __str__(self):\n return self.name\n\n\nclass UserQuest(OwnedBase, TimeStampedBase):\n quest = models.ForeignKey(\n to=Quest,\n on_delete=models.CASCADE,\n )\n\n class Status(ChoiceEnum):\n CURRENT = 'current'\n READY = 'ready' # Ready to be completed (all objectives done)\n DONE = 'done'\n\n @classmethod\n def open(cls):\n return [cls.CURRENT.value]\n\n @classmethod\n def closed(cls):\n return [status.value for status in [cls.DONE, cls.READY]]\n\n status = models.CharField(\n verbose_name=_('status'),\n choices=Status.get_choices(),\n max_length=Status.max_length(),\n default=Status.default(),\n )\n\n completed_on = models.DateTimeField(\n verbose_name=_('completed on'),\n null=True, blank=True,\n )\n\n objectives = models.ManyToManyField(\n to=QuestObjective,\n through='UserQuestObjective',\n )\n\n class Meta:\n default_related_name = 'user_quests'\n unique_together = ['owner', 'quest']\n verbose_name = _(\"user quest\")\n verbose_name_plural = _(\"user quests\")\n ordering = ['owner', 'quest']\n\n def __str__(self):\n return f'[{self.owner}] {self.quest}'\n\n def has_completed_all_objectives(self):\n has_uncompleted_objectives = self.user_quest_objectives.filter(\n status__in=UserQuestObjective.Status.open()).exists()\n return not has_uncompleted_objectives\n\n def update_status(self, commit=True):\n \"\"\"\n Checking if the quest is or has been completed.\n If so: Update the status from CURRENT to READY.\n :return: status:\n \"\"\"\n if self.status == self.Status.CURRENT.value:\n if self.has_completed_all_objectives():\n self.status = self.Status.READY.value\n if commit:\n self.save()\n return self.status\n\n def complete(self, commit=True):\n if self.status == self.Status.READY.value:\n\n # Updating the status.\n self.status = self.Status.DONE.value\n self.completed_on = get_current_time()\n if commit:\n self.save()\n\n # Making sure the user gets his next quest if present.\n self.add_next_user_quest()\n\n return True\n return False\n\n def add_next_user_quest(self):\n \"\"\"\n Add the next quest for the owner if there is one.\n This also adds the first objective to that relation if possible.\n :return:\n \"\"\"\n next_quest = self.quest.next()\n if next_quest:\n user_quest, created = UserQuest.objects.get_or_create(\n owner=self.owner,\n quest=next_quest,\n )\n if created:\n # Add the first user quest objective.\n user_quest.add_next_user_quest_objective()\n return user_quest\n\n def add_next_user_quest_objective(self):\n last_objective = self.objectives.last()\n if last_objective:\n next_objective = last_objective.next()\n else:\n next_objective = self.quest.objectives.first()\n\n if next_objective:\n user_quest_objective, created = UserQuestObjective.objects.get_or_create(\n user_quest=self,\n objective=next_objective,\n )\n if created:\n return user_quest_objective\n\n\nclass UserQuestObjective(TimeStampedBase):\n user_quest = models.ForeignKey(\n verbose_name=_('user quest'),\n to=UserQuest,\n on_delete=models.CASCADE,\n )\n\n objective = models.ForeignKey(\n to=QuestObjective,\n on_delete=models.CASCADE,\n )\n\n class Status(ChoiceEnum):\n CURRENT = 'current'\n READY = 'ready' # Requirements are met and ready to be celebrated.\n DONE = 'done'\n\n @classmethod\n def open(cls):\n return [cls.CURRENT.value]\n\n @classmethod\n def closed(cls):\n return [status.value for status in [cls.DONE, cls.READY]]\n\n status = models.CharField(\n verbose_name=_('status'),\n choices=Status.get_choices(),\n max_length=Status.max_length(),\n default=Status.default(),\n )\n\n completed_on = models.DateTimeField(\n verbose_name=_('completed on'),\n null=True, blank=True,\n )\n\n @property\n def owner(self):\n return self.user_quest.owner\n\n class Meta:\n default_related_name = 'user_quest_objectives'\n unique_together = ['user_quest', 'objective']\n verbose_name = _(\"user quest objective\")\n verbose_name_plural = _(\"user quest objectives\")\n ordering = ['user_quest', 'objective']\n\n def __str__(self):\n return f'[{self.user_quest}] {self.objective}'\n\n def complete(self, commit=True):\n if self.status == self.Status.READY.value:\n\n # Updating the status.\n self.status = self.Status.DONE.value\n self.completed_on = get_current_time()\n\n if commit:\n self.save()\n\n # Making sure the user gets his next objective if present.\n next_user_quest_objective = self.user_quest.add_next_user_quest_objective()\n if not next_user_quest_objective:\n self.user_quest.update_status(commit=commit)\n\n return True\n return False\n\n def has_checkpoint(self):\n checkpoint = self.objective.checkpoint\n if checkpoint:\n user = self.user_quest.owner\n return user.checkpoints.filter(pk=checkpoint.pk).exists()\n return False\n\n def update_status(self, commit=True):\n \"\"\"\n Checking if the objective requirements are or have been met.\n If so: Updating the status.\n :param commit: Save changes?\n :return: status:\n \"\"\"\n if self.status == self.Status.CURRENT.value:\n # Only check if not already completed.\n if self.check_custom_logic() or self.has_checkpoint():\n self.status = self.Status.READY.value\n if commit:\n self.save()\n return self.status\n\n def check_custom_logic(self):\n \"\"\"\n Version 1: Custom if statements.\n TODO: Version 2: Custom if functions map.\n :return: Boolean\n True: Objective requirement has been completed.\n False: Objective requirement has not been completed.\n \"\"\"\n code = self.objective.code\n user: User = self.owner\n if code == 'temple':\n # User has created his character story.\n return user.has_story\n if code == 'caves':\n user.add_checkpoint('caves')\n # User has harvested his Farm once.\n return user.actions.filter(name=Actions.HARVEST_FARM.value).exists()\n if code == 'fair':\n user.add_checkpoint('fair')\n # User done his first adventure.\n return user.reviewed_adventures.exists()\n if code == 'palace':\n user.add_checkpoint('palace')\n # User has set his first focus.\n return user.focuses.exists()\n if code == 'camp':\n user.add_checkpoint('camp')\n # User has: accepted guidelines, onboard chat and shared his emotion.\n has_guidelines = user.has_checkpoint(Checkpoints.GUIDELINES.value)\n has_chat = bool(user.chat_id)\n has_shared_emotion = user.actions.filter(name=Actions.UPDATE_STATUS.value).exists()\n return has_guidelines and has_chat and has_shared_emotion\n if code == 'studio':\n user.add_checkpoint('studio')\n # User has written his first journal entry.\n return user.journal_entries.exists()\n if code == 'academy':\n user.add_checkpoint('academy')\n # User has completed his first course lesson.\n return user.user_modules.filter(user_module_lessons__status=UserModuleLesson.Status.DONE.value).exists()\n return False\n","sub_path":"backend/coapp/temple/quests/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":14741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"465932642","text":"import json\nimport os, math\nimport numpy as np\n\npath = os.path.join(\"save\", 'js', \"model_data.json\")\nwith open(path, 'r') as fi:\n\tdata = json.loads(fi.read())\n\ndef check_dimensions(data, asNp = True):\n\tif not asNp:\n\t\treturn data\n\t\n\tif len(data)==1 and len(data[0])==1:\n\t\treturn np.matrix(data[0])\n\t\treturn np.array(data[0][0])\n\telif len(data)==1:\n\t\treturn np.matrix(data)\n\t\treturn np.array(data[0])\n\telse:\n\t\treturn np.matrix(data)\n\t\nembedding = check_dimensions(data[\"embedding\"]) # 3D\nvar0 = check_dimensions(data[\"cell_variables\"][0]) # 2D\nvar1 = check_dimensions(data[\"cell_variables\"][1]) # 1D !!\nsoftmax_w = check_dimensions(data[\"softmax_w\"])\nsoftmax_b = check_dimensions(data[\"softmax_b\"])\n\ninputE = check_dimensions(data[\"iterations\"][0][\"input_embedded\"]) # 3D\ninputS = check_dimensions(data[\"iterations\"][0][\"input_squeezed\"]) # 3D\ninputS2 = check_dimensions(data[\"iterations\"][1][\"input_squeezed\"]) # 3D\ninitC = check_dimensions(data[\"iterations\"][0][\"init_state_c\"]) # 2D\ninitH = check_dimensions(data[\"iterations\"][0][\"init_state_h\"]) # 2D\nfinalC = check_dimensions(data[\"iterations\"][0][\"final_state_c\"]) # 2D\nfinalH = check_dimensions(data[\"iterations\"][0][\"final_state_h\"]) # 2D\nfinalC2 = check_dimensions(data[\"iterations\"][1][\"final_state_c\"]) # 2D\nfinalH2 = check_dimensions(data[\"iterations\"][1][\"final_state_h\"]) # 2D\n\ndef nprint(x):\n\tprint(x.shape)\n\ndef stepprint(x, h, c):\n\tprint()\n\tprint (\"x\", x)\n\tprint (\"h\", h)\n\tprint (\"c\", c)\n\nprint(\"embedding\")\nnprint(embedding)\nprint(\"input\")\nnprint(inputE)\nnprint(inputS)\nprint(\"state\")\nnprint(initC)\nnprint(initH)\nnprint(finalC)\nnprint(finalH)\nprint(\"var\")\nnprint(var0)\nnprint(var1)\n\n\n\"\"\"\ninputE = np.matrix(inputE[0])\ninputS = np.matrix(inputS[0])\n\ninitC = np.matrix(initC)\ninitH = np.matrix(initH)\nfinalC = np.matrix(finalC)\nfinalH = np.matrix(finalH)\n\nvar0 = np.matrix(var0)\nvar1 = np.matrix(var1) # np.vector\n\"\"\"\nsigmoid = lambda x: 1. / (1.+math.exp(-x))\nsigmoidv = np.vectorize(sigmoid)\n\ndst = lambda a, b : np.linalg.norm(np.subtract(a, b))\n\nn = 24\n\n\"\"\"\na = np.array([1,2])\nb = np.array([3,4])\n#c = np.stack([a, b] , axis=None)\nc = np.concatenate((a, b), axis=0)\nprint(a, b, c)\n\"\"\"\n\n#https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/rnn_cell_impl.py\ndef lstm_cell(x, h, c, split_permutation):\n\t\n\t# GITHUB \n\t# c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])\n\t# m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])\n\t# inputs, m_prev\n\tv = np.concatenate((x, h), axis=1)\n\t#print(\"vconcat\")\n\t#nprint(v)\n\n\tv = np.matmul(v, var0)\n\tv = np.add(v, var1)\n\t#print(\"vmultadd\")\n\t#nprint(v)\n\t\n\tsplits = [ \n\t\tv[ :, 0:n ],\n\t\tv[ :, n:2*n ],\n\t\tv[ :, 2*n:3*n ],\n\t\tv[ :, 3*n: ] ];\n\t\n\t_forget_bias =1.0\n\t\n\t# GITHUB i = input_gate, j = new_input, f = forget_gate, o = output_gate\n\t# [ 3, 2, 0, 1 ] o is last, f is 3rd, i is 1st, ctmp is 2nd ---- OK\n\to = sigmoidv(splits[split_permutation[0]])\n\tf = sigmoidv(np.add( splits[split_permutation[1]], _forget_bias)) # GITHUB line.857 c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * self._activation(j))\n\ti = sigmoidv(splits[split_permutation[2]])\n\tctmp = np.tanh(splits[split_permutation[3]])\n\t#print(\"vsplit\", o, f, i, ctmp)\n\n\tcprev = c\n\tcnext = np.add( np.multiply(cprev, f), np.multiply(i, ctmp) )\n\n\thnext = np.multiply(np.tanh(cnext), o)\n\n\treturn hnext, cnext\n\t#return cnext[0,0] - finalC[0,0]\n\t\n\tprint(\"cnext\")\n\tprint(cnext)\n\n\t#print(\"tanh(cnext)\", np.tanh(cnext))\n\t#print(\"o\", o)\n\n\tprint(\"finalC\")\n\tprint(finalC)\n\t\n\tprint(\"hnext\") \n\tprint(hnext)\n\n\tprint(\"finalH\")\n\n\tprint(finalH)\n\n\t#print(cv[\"chars\"])\n\t#print(cv[\"vocab\"])\n\nperm = [ 3, 2, 0, 1 ]\n#perm = [ 2, 3, 0, 1 ]\n\nh, c = initH, initC\n\nx = inputS\nstepprint(x, h, c)\nh, c = lstm_cell(x, h, c, perm)\nprint(\"errc\", dst(c, finalC))\nprint(\"errh\", dst(h, finalH))\n#h, c = finalH, finalC\n\nx = inputS2\nstepprint(x, h, c)\nh, c = lstm_cell(x, h, c, perm)\nprint(\"errc\", dst(c, finalC2))\nprint(\"errh\", dst(h, finalH2))\n\nprint()\n\ndef tryall():\n\tfor a in range(4):\n\t\tfor b in range(4):\n\t\t\tfor c in range(4):\n\t\t\t\tfor d in range(4):\n\t\t\t\t\tp = [ a, b, c, d ]\n\t\t\t\t\tp.sort()\n\t\t\t\t\tr = np.add( np.array(p), np.array([ 0, -1, -2, -3 ]) )\n\t\t\t\t\tr = np.dot(r, r)\n\t\t\t\t\tif r==0:\n\t\t\t\t\t\th2, c2 = lstm_cell(inputS, initH, initC, [ a, b, c, d ])\n\t\t\t\t\t\terr = c2[0,0] - finalC[0,0]\n\t\t\t\t\t\terr = np.linalg.norm(np.subtract(c2, finalC))\n\t\t\t\t\t\tprint([ a, b, c, d ], \"err\", err)\n\ntryall()\n","sub_path":"quicktest.py","file_name":"quicktest.py","file_ext":"py","file_size_in_byte":4387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"285720212","text":"import os\nfrom PIL import Image as pil\n# Import python image library to handle images\nimport numpy as np\n# Import numpy library to convert the image to array\n\n# Define a global variable\n#@app.route('/uploads')\ndef load_image():\n im=pil.open(\"uploads/123.jpg\").convert(\"L\")\n # Open one image from folder and convert it to black and white image\n im=im.resize((28,28))\n # Resize the image\n #im.show()\n new_im=im.save(\"uploads/456.jpg\")\n # Save it as a new image with other name\n #im=np.array(im,'f')\n new_im=np.array(im,'f')\n # Convert image to numpy array format\n \n #im=np.reshape[28,28]\n #im=np.reshape(im,(28,28))\n \n\n rows,cols=new_im.shape\n for i in range(rows):\n for j in range(cols):\n if(new_im[i,j]<=128):\n new_im[i,j]=0\n else:\n new_im[i,j]=1\n new_array=np.reshape(new_im,(1,784))\n # Set a new arrar to reshape the original array as one row and 784 cloumns \n #im=pil.fromarray(im)\n\n #im.save(\"6.jpg\")\n print(new_array)\n \n # data=im.getdata()\n #data=np.matrix(data)\n\n #data=np.reshape(data,(28,28))\n #new_im=pil.fromarray(data)\n #print(np.asarray(im))\n #new_im.save(\"aa.jpg\")\n\nif __name__==\"__main__\":\n load_image()\n\n","sub_path":"Resave.py","file_name":"Resave.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"227990744","text":"import spacy\nfrom pprint import pprint\nimport os\nimport argparse\nfrom utils.preprocess_func import load_data, load_emb_vocab, build_vocab, build_embedding, build_data\nfrom utils.preprocess_utils import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--glove_path', type=str, default='data/glove.840B.300d.txt',\n help='Path to stored glove vectors')\nparser.add_argument('--data_dir', type=str, default='./data',\n help='Path to raw dataset, new dataset is written here')\nparser.add_argument('--embedding_dim', type=int, default=300,\n help='Dimensions to use for GLoVE embeddings')\n\nargs = parser.parse_args()\n\npath_to_glove = args.glove_path\ndata_dir = args.data_dir\nEMBED_DIM = args.embedding_dim\n\nNLP = spacy.load('en', disable=['vectors', 'textcat', 'parser'])\n\n# Load data from relevant files located in atgs.data_dir directory (data/ by default)\ntrain_data = load_data(fname=os.path.join(data_dir, 'train-v2.0.json'), train=True)\ndev_data = load_data(fname=os.path.join(data_dir, 'dev-v2.0.json'), train=True)\n# pprint(dev_data.shape)\n\n# Load glove embeddings\nemb_vocab = load_emb_vocab(fname=path_to_glove, dim=EMBED_DIM)\n# pprint(emb_vocab)\n\n# Build vocabulary consisting of all words in the train and dev sets\n# set batch size according to your RAM usage\nvocab, vocab_tag, vocab_ner = build_vocab(train_data + dev_data, emb_vocab, sort_all=True)\n\n# Build the word embeddings using glove vectors for our vocabulary\nemb = build_embedding(fname=path_to_glove, vocab=vocab, dim=EMBED_DIM)\n\n# Store embeddings, vocab for later use\nmeta_path = os.join.path(args.data_dir, f'{args.meta}_{version}.pick')\nmeta = {'vocab': vocab, 'vocab_tag': vocab_tag, 'vocab_ner': vocab_ner, 'embedding': emb}\nwith open(meta_path, 'wb') as f:\n pickle.dump(meta, f)\n\n# Make final data and save in data_dir (default: data/)\nbuild_data(train_data, vocab, vocab_tag, vocab_ner, os.path.join(data_dir, f'train_data_{version}.json'),\n is_train=True, NLP=NLP)\nbuild_data(dev_data, vocab, vocab_tag, vocab_ner, os.path.join(data_dir, f'dev_data_{version}.json'),\n is_train=False, NLP=NLP)\n","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"569212281","text":"\"\"\"\n @Time : 2018/9/2 16:56\n @Author : wangcai\n @File : scikit-learn-cnn.py\n 使用 scikit-learn 实现 K-近邻算法\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn import datasets\n\n# 加载 IRIS 数据集合\nscikit_iris = datasets.load_iris()\n# 转换为 padnas 的 DataFrame 结构,以便于观察数据\npd_iris = pd.DataFrame(data= np.c_[scikit_iris['data'], scikit_iris['target']],\n columns= np.append(scikit_iris.feature_names, ['y']))\nprint(pd_iris.head())\n\n# 选择全部特征参与训练\nX = pd_iris[scikit_iris.feature_names]\ny = pd_iris['y']\n\n# (1) 选择模型\nfrom sklearn.neighbors import KNeighborsClassifier\nknn = KNeighborsClassifier(n_neighbors = 1)\n\n# (2) 训练模型\nknn.fit(X, y)\n# (3) 预测新数据\nknn.predict([[4, 3, 5, 3]])\nprint(knn.predict([[4, 3, 5, 3]]))\n","sub_path":"k-Nearest-Neighbor/scikit-learn-cnn.py","file_name":"scikit-learn-cnn.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"391098773","text":"import http.client\nserver = \"openapi.naver.com\"\nclient_id = \"ZuLj_9774MFbh52EAnsz\"\nclient_secret = \"0fgHPxzAdQ\"\nconn = http.client.HTTPSConnection(server)\nkeyword = \"5g\"\nkeyword = keyword.encode(\"utf-8\")\nconn.request(\"GET\", \"/v1/search/doc.xml?query={0}&display=10&start=1\".format(keyword),\n None,{\"X-Naver-Client-Id\": client_id, \"X-Naver-Client-Secret\": client_secret}) \nreq = conn.getresponse() \nprint(req.status, req.reason)\ncLen = req.getheader(\"Content-Length\")\nprint(req.read(int(cLen)).decode('utf-8'))","sub_path":"네이버 학술검색 api.py","file_name":"네이버 학술검색 api.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"20711171","text":"from selenium import webdriver\nimport pickle, time\n\nuser = \"Ironstone1_\"\npasw = \"Gingko1234\"\nurl = \"steamcommunity.com\"\n\ndef login():\n print(\"logging into '\" + url + \"'!\")\n try:\n # LOAD COOKIES\n driver.get(\"https://\"+ url +\"/login\")\n for cookie in pickle.load(open(url+\".txt\", \"rb\")): driver.add_cookie(cookie)\n\n driver.refresh()\n driver.find_element_by_xpath('//*[@id=\"account_pulldown\"]')\n\n except:\n # LOGIN INTO STEAM COMMUNITY\n driver.find_element_by_xpath('//input[@name=\"username\"]').send_keys(user)\n driver.find_element_by_xpath('//input[@name=\"password\"]').send_keys(pasw)\n driver.find_element_by_xpath('//div[@id=\"login_btn_signin\"]//*').click()\n\n while url+\"/login\" in driver.current_url:\n time.sleep(1)\n\n driver.get(\"https://\" +url)\n pickle.dump(driver.get_cookies(), open(url+\".txt\", \"wb\"))\n\n driver.get(\"https://scrap.tf/login\")\n driver.find_element_by_xpath('//input[@id=\"imageLogin\"]').click()\n while 'steamcommunity.com/openid/login' in driver.current_url: time.sleep(1)\n\ndef scroll():\n elem = '//div[@class=\"panel-body pag-done pag-loading\"]'\n \n driver.get(\"https://scrap.tf/raffles\")\n while driver.find_element_by_xpath(elem).text != \"That's all, no more!\":\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(2)\n\ndef enter_raffles():\n Raffles = []\n scroll()\n\n link = 'a[href*=\"/raffles/\"]'\n elem = '//i18n[@rel=\"%entered-raffles\"]//var'\n total = int(driver.find_element_by_xpath(elem).text.split(\"/\")[1])\n \n for i in range(total):\n panel = '//div[@class=\"panel-raffle\"]['+str(i+1)+']'\n panel = driver.find_element_by_xpath(panel)\n href = panel.find_element_by_css_selector(link).get_attribute(\"href\")\n \n if panel.get_attribute(\"style\") != 'opacity: 0.6;':\n Raffles.append(href)\n\n for raffle in Raffles: #enter raffles\n try:\n driver.get(raffle)\n button = driver.find_element_by_id(\"raffle-enter\")\n\n if button.text == 'Enter Raffle':\n button.click(), time.sleep(2.5)\n print(\"entered raffle '\" + raffle + \"'!\")\n\n except:\n print(\"failed to enter raffle '\" + raffle + \"'!\")\n\ndef start():\n print(\"entering raffles...\")\n enter_raffles()\n driver.get(\"https://scrap.tf/raffles\")\n\n print(time.strftime(\"[+] Raffles Check: %e %B %H:%M\"))\n print(driver.find_element_by_xpath('//i18n[@rel=\"%entered-raffles\"]').text)\n\nprint(open(\"start_banner.txt\", encoding=\"utf-8\").read()[1:]) # display banner\nprint(\"loading webdriver...\")\ndriver = webdriver.Chrome()\n\nlogin()\n\nwhile True:\n try:\n start()\n time.sleep(300)\n except:\n time.sleep(120)\n","sub_path":"Python/!PYTHON/Programming/game/TF2/bot 5/scrap.tf/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"517359111","text":"from math import cos, pi, sqrt, sin, hypot, atan2\nfrom itertools import repeat\nfrom random import randrange\nimport numpy as np\nfrom matplotlib.widgets import Slider\nfrom matplotlib import pyplot as plt\n\n\nN = 1024\nTEST_AMPLITUDES = [1, 3, 5, 8, 10, 12, 16]\nTEST_PHASES = [pi / 6, pi / 4, pi / 3, pi / 2, 3 * pi / 4, pi]\n\nERROR_LEVEL = 0.001\nPOLY_COUNT = 30\n\n\ndef signal_point(x, N):\n return 10 * cos((2 * pi * x) / N)\n\n\ndef fourier_path(func, sequence, j, N):\n result = sum(x * func(2 * pi * i * j / N) for i, x in enumerate(sequence))\n return (2 / N) * result\n\n\ndef fourier_spectrum(sequence):\n N = len(sequence)\n spectrum_list = []\n for j in range(N):\n cosine = fourier_path(cos, sequence, j, N)\n sine = fourier_path(sin, sequence, j, N)\n amplitude = hypot(sine, cosine)\n phase = atan2(sine, cosine)\n spectrum_list.append((amplitude, phase if abs(amplitude) > ERROR_LEVEL else 0))\n\n return spectrum_list\n\n\ndef fast_fourier_spectrum(x):\n x = np.asarray(x, dtype=float)\n N = x.shape[0]\n\n if N == 1:\n return x\n\n X_even = fast_fourier_spectrum(x[::2])\n X_odd = fast_fourier_spectrum(x[1::2])\n factor = np.exp(-2j * np.pi * np.arange(N) / N)\n result = np.concatenate([X_even + factor[:N // 2] * X_odd, X_even + factor[N // 2:] * X_odd])\n return result\n\n\ndef get_phase(x):\n return - np.arctan2(np.imag(x), np.real(x))\n\n\ndef get_fast_fourier_spectrum(spectrum):\n fft_result = fast_fourier_spectrum(spectrum)\n N = len(spectrum)\n amplitudes = [abs(x) * 2 / N for x in fft_result]\n phases = [get_phase(x) if amplitudes[i] > ERROR_LEVEL else 0 for i, x in enumerate(fft_result)]\n return list(zip(amplitudes, phases))\n\n\ndef spectrum_point(index, j, N, amplitude, phase):\n return amplitude * cos((2 * pi * index * j / N) - phase)\n\n\ndef polyharmonic(index, N, repeats, spectrum):\n return sum(spectrum_point(index, j, N, spectrum[j][0], spectrum[j][1]) for j in range(repeats))\n\n\ndef restore_signal(spectrum):\n sequence = []\n N = len(spectrum)\n for i in range(N):\n sequence.append(polyharmonic(i, N, N // 2 - 1, spectrum))\n\n return sequence\n\n\ndef randoms_from(values, length=None):\n _range = range(length) if length is not None else repeat(0)\n values_len = len(values)\n for _ in _range:\n yield values[randrange(0, values_len)]\n\n\ndef filter_signal(spectrum, filter_predicate):\n length = len(spectrum)\n half_length = length // 2\n\n sequence = []\n for item in enumerate(spectrum):\n index, value = item\n if index > half_length:\n index = length - index\n\n sequence.append(value if filter_predicate(index) else (0, 0))\n\n return list(sequence)\n\n\ndef print_plot(signals, labels, w, h):\n colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'darkorange', 'sienna', 'navy', 'blueviolet', 'gray']\n plots = []\n fig = plt.figure(figsize=(15, 7))\n plt.grid(True)\n\n for i, unit in enumerate(signals):\n plt.subplot(h, w, i + 1)\n tempPlot, = plt.plot(unit, color=colors[i])\n plots.append(tempPlot)\n\n plt.figlegend(plots, labels, loc='upper left')\n return plots\n\n\ndef main():\n labels = [\"Оригинальный сигнал\", \"Восстановленый сигнал\", \"Амплитудный спектр\", \"Фазовый спектр\"]\n\n # Task 2\n original_signal = [signal_point(i, N) for i in range(N)]\n\n spectrum = fourier_spectrum(original_signal)\n amplitude, phase = zip(*spectrum)\n restored_signal = restore_signal(spectrum)\n plots = print_plot([original_signal, restored_signal, amplitude, phase], labels, 2, 2)\n\n\n # Prepase polyharmonic signal\n test_spectrum = [(0, 0)] + list( zip(list(randoms_from(TEST_AMPLITUDES, POLY_COUNT)), list(randoms_from(TEST_PHASES, POLY_COUNT))))\n polyharmonic_original_signal = [polyharmonic(i, N, len(test_spectrum), test_spectrum) for i in range(N)]\n\n\n # Task 3\n spectrum = fourier_spectrum(polyharmonic_original_signal)\n amplitude, phase = zip(*spectrum)\n restored_signal = restore_signal(spectrum)\n\n plots = print_plot([polyharmonic_original_signal, restored_signal, amplitude, phase], labels, 2, 2)\n\n\n # Task 4\n spectrum = get_fast_fourier_spectrum(polyharmonic_original_signal)\n amplitude, phase = zip(*spectrum)\n restored_signal = restore_signal(spectrum)\n\n plots = print_plot([polyharmonic_original_signal, restored_signal, amplitude, phase], labels, 2, 2)\n\n # Task 5\n spectrum = fourier_spectrum(polyharmonic_original_signal)\n amplitude, phase = zip(*spectrum)\n\n spectrum_high = filter_signal(spectrum, lambda x: x < 15)\n spectrum_low = filter_signal(spectrum, lambda x: x > 15)\n spectrum_medium = filter_signal(spectrum, lambda x: (x > 5) and (x < 15))\n\n amplitude_hign, phase_high = zip(*spectrum_high)\n amplitude_low, phase_low = zip(*spectrum_low)\n amplitude_medium, phase_medium = zip(*spectrum_medium)\n\n signal_high = restore_signal(spectrum_high)\n signal_low = restore_signal(spectrum_low)\n signal_medium = restore_signal(spectrum_medium)\n\n labels = [\"Оригинальный сигнал\", \"Спектр амплитудный\", \"Спектр фазовый\", \"ВЧ-фильтр\", \"ВЧ-спектр амплитудный\", \"ВЧ-спектр фазовый\"]\n plots = print_plot([polyharmonic_original_signal, amplitude, phase, signal_high, amplitude_hign, phase_high], labels, 3, 2)\n\n labels = [\"НЧ-фильтр\", \"НЧ-спектр амплитудный\", \"НЧ-спектр фазовый\", \"Фильтр(5:15) - сигнал\", \"Фильтр(5:15) - спектр амплитудный\", \"Фильтр(5:15) - спектр фазовый\"]\n plots = print_plot([signal_low, amplitude_low, phase_low, signal_medium, amplitude_medium, phase_medium], labels, 3, 2)\n\n plt.show()\n\nif __name__ == '__main__':\n main()\n","sub_path":"Lab3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"507376232","text":"import tvm,os\nfrom tvm import relay\nfrom tvm.relay import testing\nfrom tvm.contrib import utils as util\nfrom tvm.contrib import graph_runtime\nimport numpy as np\n\nbatch = 2\nidim = 10\nodim = 20\ndshape=(batch,idim)\nwshape=(idim, odim)\nbshape=(odim,)\nconst_data=np.full(dshape,1.0,\"float32\")\ndata=relay.var(\"data\", relay.TensorType(dshape, \"float32\"))\nweight=relay.var(\"weight\", relay.TensorType(wshape, \"float32\"))\nbias=relay.var(\"bias\", relay.TensorType(bshape, \"float32\"))\n\nconst=relay.const(const_data, \"float32\")\nnet=relay.add(data, const)\nnet=relay.nn.dense(net,weight)\nf=relay.Function(relay.analysis.free_vars(net),net)\nm, p=testing.create_workload(f)\n\nbyoc = \"zendnn\"\n@tvm.ir.register_op_attr(\"add\", \"target.\"+byoc)\ndef _support(attr):\n return True\n\n@tvm.ir.register_op_attr(\"nn.dense\", \"target.\"+byoc)\ndef _support(attr):\n return True\n\npm=relay.transform.AnnotateTarget(byoc)(m)\nmod=relay.transform.PartitionGraph()(pm)\n\ngraph, lib, params =relay.build(mod, \"llvm\")\n\nfor m in lib.imported_modules:\n print(m.get_source())\n\ndef update_lib(lib):\n test_dir = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))\n source_dir = os.path.join(test_dir, \"..\" )\n contrib_path = os.path.join(source_dir, \"src\", \"runtime\", \"contrib\")\n print(contrib_path)\n kwargs={}\n # Setup the gcc flag to compile ZEDNN code.\n #kwargs[\"options\"] = [\"-DLIBM_ENABLE=1\", \"-std=c++17\", \"-O3\", \"-fPIC\", \"-fopenmp\",\n # \"-DBIAS_ENABLED=1\", \"-DZENDNN_ENABLE=1\",\n # \"-I/scratch/staff/huaj/tvm/zendnn/ZenDNN/inc\",\n # \"-I/scratch/staff/huaj/tvm/zendnn/ZenDNN/aocl-linux-gcc-4.0/amd-blis/include\",\n # \"-I/scratch/staff/huaj/tvm/zendnn/ZenDNN/aocl-linux-gcc-4.0/amd-libm/include\",\n # \"-L/scratch/staff/huaj/tvm/zendnn/ZenDNN/_out/lib\",\n # \"-lamdZenDNN\",\n # \"-L/scratch/staff/huaj/tvm/zendnn/ZenDNN/aocl-linux-gcc-4.0/amd-blis/lib\",\n # \"-lblis-mt\",\n # \"-L/scratch/staff/huaj/tvm/zendnn/ZenDNN/aocl-linux-gcc-4.0/amd-libm/lib\",\n # \"-lalm\",\n kwargs[\"options\"] = [ \"-std=c++17\", \"-O3\",\"-I\" + contrib_path,\n \"-I/scratch/staff/huaj/tvm/zendnn/ZenDNN/inc\"]\n\n tmp_path = util.tempdir()\n lib_name = 'lib.so'\n lib_path = tmp_path.relpath(lib_name)\n\n # The generated C code with DNNL APIs is compiled to a binary lib.so.\n lib.export_library(lib_path, fcompile=False, **kwargs)\n\n # Load the lib.so back to a runtime module.\n lib = tvm.runtime.load_module(lib_path)\n return lib\n\nlib = update_lib(lib)\nmod = graph_runtime.create(graph, lib, tvm.cpu(0))\n\nidata =np.full(dshape,1.0, \"float32\")\nwdata =np.full(wshape,2.0, \"float32\")\nmod.set_input(\"data\", idata)\nmod.set_input(\"weight\", wdata)\nmod.set_input(**params)\nmod.run()\nout=mod.get_output(0)\nprint(out)\n","sub_path":"zendnn/testzednn.py","file_name":"testzednn.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"387679418","text":"import json\nimport sys\nimport pygame\nimport game_classes.control as gc\n\n# opens game's onfig file\nwith open('config.json') as game_setup_file:\n game_setup = json.load(game_setup_file)\n\n# some generic values to update the game settings more easealy\nwindow_size = game_setup[\"screen\"][\"width\"], game_setup[\"screen\"][\"heigh\"]\n# sets the size of the screen of pygamge\nscreen = pygame.display.set_mode(window_size)\n\n# sets values of the background\nred = game_setup[\"background\"][\"red\"]\ngreen = game_setup[\"background\"][\"green\"]\nblue = game_setup[\"background\"][\"blue\"]\nscreen.fill((red, green, blue))\nangle = 0\n\n# caracters\nmacgyver = game_setup[\"macgyver\"][\"img_path\"]\nenemy = game_setup[\"enemy\"][\"img_path\"]\n\n# walls & ground\nwalls_path = \"./ressource/structures.png\"\ntiles_path = \"./ressource/floor-tiles-20x20.png\"\n\n# objects to collect\ntube_path = \"./ressource/tube_plastique2.png\"\npointer_path = \"./ressource/aiguille.png\"\nneedle_path = \"./ressource/seringue.png\"\nether_path = \"./ressource/ether.png\"\n\n# indicators and decoratiom\nitems_path = \"./ressource/items.png\"\ndeco_path = \"./ressource/decorations.png\"\ngear_path = \"./ressource/equipment-32x32.png\"\nlifebar_path = \"./ressource/lifebar-32x32.png\"\npeople_path = \"./ressource/personnages.png\"\ncrusader_path = \"./ressource/tile-crusader-logo.png\"\n\ngameObjects = []\n\n# initiate all objects\nbackground = gc.Background(walls_path, screen.get_width(), screen.get_height())\n\n# initiate player (only moving object) \nplayer = gc.Player(macgyver, 1, (0,0,32,43))\n\n# we add it to a list so we can iterate through objects in order to display them\ngameObjects.append(player)\n\n# initiate gardian\nenemy = gc.Enemy(enemy, 1, (0,0,32,36))\ngameObjects.append(enemy)\n\n# static elements which are displayed on top right corner and indicate status to player\nitems = gc.ObjectS(items_path, 2, (32,0, 32, 32), 600, 0)\ngameObjects.append(items)\n\npeople = gc.ObjectS(people_path, 2, (227, 3, 24, 30), 662, 2)\ngameObjects.append(people)\n\nlifebar = gc.ObjectS(lifebar_path, 2, (123, 3, 3, 30), 720, 2)\ngameObjects.append(lifebar)\n\n# display and distribute objects which will be collected\ntube = gc.ObjectS(tube_path, 1, (0, 0, 259, 158), 390, 200)\ngameObjects.append(tube)\n\npointer = gc.ObjectS(pointer_path, 1, (0, 0, 545, 720), 190, 400)\ngameObjects.append(pointer)\n\nneedle = gc.ObjectS(needle_path, 1, (0, 0, 90, 90), 90, 500)\ngameObjects.append(needle)\n\nether = gc.ObjectS(ether_path, 1, (18, 18, 225, 210), 90, 100)\ngameObjects.append(ether)\n\n# we add to the collision group the objects which can collide with player\nplayer.collisionGroup.append(enemy)\nplayer.collisionGroup.append(tube)\nplayer.collisionGroup.append(needle)\nplayer.collisionGroup.append(ether)\n\n# refresh display\npygame.display.flip()\n\n# initiate some timing for better display performances\nclock = pygame.time.Clock()\n\n# game main loop\nwhile not pygame.key.get_pressed()[pygame.K_q]:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n \n #update objects\n for gameObject in gameObjects:\n gameObject.update()\n\n # render objects\n if player.collision:\n screen.fill((255,0,0))\n else:\n screen.blit(background.image, (0,0))\n \n for gameObject in gameObjects:\n screen.blit(gameObject.image, (gameObject.rect.x, gameObject.rect.y))\n\n pygame.display.flip()\n\n # defines frame rate\n clock.tick(30)\n ","sub_path":"pygame_img.py","file_name":"pygame_img.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"449947717","text":"################################################################################\n# Code for managing and training a Variational Collaborative Generative Loop. #\n# #\n# Note: This is ongoing research and very much in flux. #\n################################################################################\n\n# basic python\nimport numpy as np\nimport numpy.random as npr\nfrom collections import OrderedDict\n\n# theano business\nimport theano\nimport theano.tensor as T\n#from theano.tensor.shared_randomstreams import RandomStreams as RandStream\nfrom theano.sandbox.cuda.rng_curand import CURAND_RandomStreams as RandStream\n\n# phil's sweetness\nfrom NetLayers import HiddenLayer, DiscLayer, safe_log, softplus_actfun, \\\n apply_mask\nfrom DKCode import get_adam_updates, get_adadelta_updates\nfrom GIPair import GIPair\n\n#################\n# FOR PROFILING #\n#################\n#from theano import ProfileMode\n#profmode = theano.ProfileMode(optimizer='fast_run', linker=theano.gof.OpWiseCLinker())\n\n#############################\n# SOME HANDY LOSS FUNCTIONS #\n#############################\n\ndef logreg_loss(Y, class_sign):\n \"\"\"\n Simple binomial deviance (i.e. logistic regression) loss.\n\n This assumes that all predictions in Y have the same target class, which\n is indicated by class_sign, which should be in {-1, +1}. Note: this does\n not \"normalize\" for the number of predictions in Y.\n \"\"\"\n loss = T.sum(softplus_actfun(-class_sign * Y))\n return loss\n\ndef ns_nce_pos(f, k=1.0):\n \"\"\"\n Negative-sampling noise contrastive estimation, for target distribution.\n \"\"\"\n loss = T.sum(T.log(1.0 + k*T.exp(-f)))\n return loss\n\ndef ns_nce_neg(f, k=1.0):\n \"\"\"\n Negative-sampling noise contrastive estimation, for base distribution.\n \"\"\"\n loss = T.sum(f + T.log(1.0 + k*T.exp(-f)))\n return loss\n\ndef lsq_loss(Yh, Yt=0.0):\n \"\"\"\n Least-squares loss for predictions in Yh, given target Yt.\n \"\"\"\n loss = T.sum((Yh - Yt)**2.0)\n return loss\n\ndef hinge_loss(Yh, Yt=0.0):\n \"\"\"\n Unilateral hinge loss for Yh, given target Yt.\n \"\"\"\n residual = Yt - Yh\n loss = T.sum((residual * (residual > 0.0)))\n return loss\n\ndef hinge_sq_loss(Yh, Yt=0.0):\n \"\"\"\n Unilateral squared-hinge loss for Yh, given target Yt.\n \"\"\"\n residual = Yt - Yh\n loss = T.sum((residual * (residual > 0.0))**2.0)\n return loss\n\ndef ulh_loss(Yh, Yt=0.0, delta=0.5):\n \"\"\"\n Unilateral Huberized least-squares loss for Yh, given target Yt.\n \"\"\"\n residual = Yt - Yh\n quad_loss = residual**2.0\n line_loss = (2.0 * delta * abs(residual)) - delta**2.0\n # Construct mask for quadratic loss region\n quad_mask = (abs(residual) < delta) * (residual > 0.0)\n # Construct mask for linear loss region\n line_mask = (abs(residual) >= delta) * (residual > 0.0)\n # Combine the quadratic and linear losses\n loss = T.sum((quad_loss * quad_mask) + (line_loss * line_mask))\n return loss\n\ndef cat_entropy(p):\n \"\"\"\n Compute the entropy of (row-wise) categorical distributions in p.\n \"\"\"\n row_ents = -T.sum((p * safe_log(p)), axis=1, keepdims=True)\n return row_ents\n\ndef cat_prior_dir(p, alpha=0.1):\n \"\"\"\n Log probability under a dirichlet prior, with dirichlet parameter alpha.\n \"\"\"\n log_prob = T.sum((1.0 - alpha) * safe_log(p))\n return log_prob\n\ndef cat_prior_ent(p, ent_weight=1.0):\n \"\"\"\n Log probability under an \"entropy-type\" prior, with some \"weight\".\n \"\"\"\n log_prob = -cat_entropy * ent_weight\n return log_prob\n\ndef binarize_data(X):\n \"\"\"\n Make a sample of bernoulli variables with probabilities given by X.\n \"\"\"\n X_shape = X.shape\n probs = npr.rand(*X_shape)\n X_binary = 1.0 * (probs < X)\n return X_binary.astype(theano.config.floatX)\n\ndef sample_masks(X, drop_prob=0.3):\n \"\"\"\n Sample a binary mask to apply to the matrix X, with rate mask_prob.\n \"\"\"\n probs = npr.rand(*X.shape)\n mask = 1.0 * (probs > drop_prob)\n return mask.astype(theano.config.floatX)\n\ndef sample_patch_masks(X, im_shape, patch_shape):\n \"\"\"\n Sample a random patch mask for each image in X.\n \"\"\"\n obs_count = X.shape[0]\n rs = patch_shape[0]\n cs = patch_shape[1]\n off_row = npr.randint(1,high=(im_shape[0]-rs-1), size=(obs_count,))\n off_col = npr.randint(1,high=(im_shape[1]-cs-1), size=(obs_count,))\n dummy = np.zeros(im_shape)\n mask = np.zeros(X.shape)\n for i in range(obs_count):\n dummy = (0.0 * dummy) + 1.0\n dummy[off_row[i]:(off_row[i]+rs), off_col[i]:(off_col[i]+cs)] = 0.0\n mask[i,:] = dummy.ravel()\n return mask.astype(theano.config.floatX)\n\nclass VCGLoop(object):\n \"\"\"\n Controller for training a self-looping VAE using guidance provided by a\n classifier. The classifier tries to discriminate between samples generated\n by the looped VAE while the VAE minimizes a variational generative model\n objective and also shifts mass away from regions where the classifier can\n discern that the generated data is denser than the training data.\n\n This class can also train \"policies\" for reconstructing partially masked\n inputs. A reconstruction policy can readily be trained to share the same\n parameters as a policy for generating transitions while self-looping.\n\n The generator must be an instance of the GenNet class implemented in\n \"GenNet.py\". The discriminator must be an instance of the PeaNet class,\n as implemented in \"PeaNet.py\". The inferencer must be an instance of the\n InfNet class implemented in \"InfNet.py\".\n\n Parameters:\n rng: numpy.random.RandomState (for reproducibility)\n Xd: symbolic var for providing points for starting the Markov Chain\n Xt: symbolic var for providing samples from the target distribution\n i_net: The InfNet instance that will serve as the inferencer\n g_net: The GenNet instance that will serve as the generator\n d_net: The PeaNet instance that will serve as the discriminator\n chain_len: number of steps to unroll the VAE Markov Chain\n data_dim: dimension of the generated data\n prior_dim: dimension of the model prior\n params: a dict of parameters for controlling various costs\n lam_l2d: regularization on squared discriminator output\n \"\"\"\n def __init__(self, rng=None, Xd=None, Xc=None, Xm=None, Xt=None, \\\n i_net=None, g_net=None, d_net=None, chain_len=None, \\\n data_dim=None, prior_dim=None, params=None):\n # Do some stuff!\n self.rng = RandStream(rng.randint(100000))\n self.data_dim = data_dim\n self.prior_dim = prior_dim\n if params is None:\n self.params = {}\n else:\n self.params = params\n if 'cost_decay' in self.params:\n self.cost_decay = self.params['cost_decay']\n else:\n self.cost_decay = 0.1\n if 'chain_type' in params:\n assert((params['chain_type'] == 'walkback') or \\\n (params['chain_type'] == 'walkout'))\n self.chain_type = params['chain_type']\n else:\n self.chain_type = 'walkout'\n\n # symbolic var for inputting samples for initializing the VAE chain\n self.Xd = Xd\n # symbolic var for masking subsets of the state variables\n self.Xm = Xm\n # symbolic var for controlling subsets of the state variables\n self.Xc = Xc\n # symbolic var for inputting samples from the target distribution\n self.Xt = Xt\n # integer number of times to cycle the VAE loop\n self.chain_len = chain_len\n # symbolic matrix of indices for data inputs\n self.It = T.arange(self.Xt.shape[0])\n # symbolic matrix of indices for noise/generated inputs\n self.Id = T.arange(self.chain_len * self.Xd.shape[0]) + self.Xt.shape[0]\n\n # get a clone of the desired VAE, for easy access\n self.GIP = GIPair(rng=rng, Xd=self.Xd, Xc=self.Xc, Xm=self.Xm, \\\n g_net=g_net, i_net=i_net, data_dim=self.data_dim, \\\n prior_dim=self.prior_dim, params=None, shared_param_dicts=None)\n self.IN = self.GIP.IN\n self.GN = self.GIP.GN\n self.kld2_scale = self.IN.kld2_scale\n self.use_encoder = self.IN.use_encoder\n assert(self.use_encoder == self.GN.use_decoder)\n # self-loop some clones of the main VAE into a chain.\n # ** All VAEs in the chain share the same Xc and Xm, which are the\n # symbolic inputs for providing the observed portion of the input\n # and a mask indicating which part of the input is \"observed\".\n # These inputs are used for training \"reconstruction\" policies.\n self.IN_chain = []\n self.GN_chain = []\n self.Xg_chain = []\n _Xd = self.Xd\n for i in range(self.chain_len):\n if (i == 0):\n # start the chain with data provided by user\n _IN = self.IN.shared_param_clone(rng=rng, \\\n Xd=apply_mask(Xd=_Xd, Xc=self.Xc, Xm=self.Xm))\n _GN = self.GN.shared_param_clone(rng=rng, Xp=_IN.output)\n else:\n # continue the chain with samples from previous VAE\n _IN = self.IN.shared_param_clone(rng=rng, \\\n Xd=apply_mask(Xd=_Xd, Xc=self.Xc, Xm=self.Xm))\n _GN = self.GN.shared_param_clone(rng=rng, Xp=_IN.output)\n if self.use_encoder:\n # use the \"decoded\" output of the previous generator as input\n # to the next inferencer, which will re-encode it prior to\n # inference\n _Xd = _GN.output_decoded\n else:\n # use the \"encoded\" output of the previous generator as input\n # to the next inferencer, as the inferencer won't try to \n # re-encode it prior to inference\n _Xd = _GN.output\n self.IN_chain.append(_IN)\n self.GN_chain.append(_GN)\n self.Xg_chain.append(_Xd)\n\n # make a clone of the desired discriminator network, which will try\n # to discriminate between samples from the training data and samples\n # generated by the self-looped VAE chain.\n self.DN = d_net.shared_param_clone(rng=rng, \\\n Xd=T.vertical_stack(self.Xt, *self.Xg_chain))\n\n zero_ary = np.zeros((1,)).astype(theano.config.floatX)\n # init shared var for weighting nll of data given posterior sample\n self.lam_chain_nll = theano.shared(value=zero_ary, name='vcg_lam_chain_nll')\n self.set_lam_chain_nll(lam_chain_nll=1.0)\n # init shared var for weighting posterior KL-div from prior\n self.lam_chain_kld = theano.shared(value=zero_ary, name='vcg_lam_chain_kld')\n self.set_lam_chain_kld(lam_chain_kld=1.0)\n # init shared var for weighting chain diffusion rate (a.k.a. velocity)\n self.lam_chain_vel = theano.shared(value=zero_ary, name='vcg_lam_chain_vel')\n self.set_lam_chain_vel(lam_chain_vel=1.0)\n # init shared var for weighting nll of data given posterior sample\n self.lam_mask_nll = theano.shared(value=zero_ary, name='vcg_lam_mask_nll')\n self.set_lam_mask_nll(lam_mask_nll=0.0)\n # init shared var for weighting posterior KL-div from prior\n self.lam_mask_kld = theano.shared(value=zero_ary, name='vcg_lam_mask_kld')\n self.set_lam_mask_kld(lam_mask_kld=0.0)\n # init shared var for controlling l2 regularization on params\n self.lam_l2w = theano.shared(value=zero_ary, name='vcg_lam_l2w')\n self.set_lam_l2w(lam_l2w=1e-4)\n # shared var learning rates for all networks\n self.lr_dn = theano.shared(value=zero_ary, name='vcg_lr_dn')\n self.lr_gn = theano.shared(value=zero_ary, name='vcg_lr_gn')\n self.lr_in = theano.shared(value=zero_ary, name='vcg_lr_in')\n # shared var momentum parameters for all networks\n self.mom_1 = theano.shared(value=zero_ary, name='vcg_mom_1')\n self.mom_2 = theano.shared(value=zero_ary, name='vcg_mom_2')\n self.it_count = theano.shared(value=zero_ary, name='vcg_it_count')\n # shared var weights for adversarial classification objective\n self.dw_dn = theano.shared(value=zero_ary, name='vcg_dw_dn')\n self.dw_gn = theano.shared(value=zero_ary, name='vcg_dw_gn')\n # init parameters for controlling learning dynamics\n self.set_all_sgd_params()\n \n self.set_disc_weights() # init adversarial cost weights for GN/DN\n # set a shared var for regularizing the output of the discriminator\n self.lam_l2d = theano.shared(value=(zero_ary + params['lam_l2d']), \\\n name='vcg_lam_l2d')\n\n # setup weights for weighting the quality of the reconstruction\n # differently over multiple steps of reconstruction.\n nll_weights = np.linspace(0.0, 5.0, num=self.chain_len)\n nll_weights = nll_weights / np.sum(nll_weights)\n nll_weights = nll_weights.astype(theano.config.floatX)\n self.mask_nll_weights = theano.shared(value=nll_weights, \\\n name='vcg_mask_nll_weights')\n\n # Grab the full set of \"optimizable\" parameters from the generator\n # and discriminator networks that we'll be working with. We need to\n # ignore parameters in the final layers of the proto-networks in the\n # discriminator network (a generalized pseudo-ensemble). We ignore them\n # because the VCGair requires that they be \"bypassed\" in favor of some\n # binary classification layers that will be managed by this VCGair.\n self.dn_params = []\n for pn in self.DN.proto_nets:\n for pnl in pn[0:-1]:\n self.dn_params.extend(pnl.params)\n self.in_params = [p for p in self.IN.mlp_params]\n self.gn_params = [p for p in self.GN.mlp_params]\n self.joint_params = self.dn_params + self.in_params + self.gn_params\n\n # Now construct a binary discriminator layer for each proto-net in the\n # discriminator network. And, add their params to optimization list.\n self._construct_disc_layers(rng)\n self.disc_reg_cost = self.lam_l2d[0] * \\\n T.sum([dl.act_l2_sum for dl in self.disc_layers])\n\n # Construct costs for the generator and discriminator networks based \n # on adversarial binary classification\n self.disc_cost_dn, self.disc_cost_gn = self._construct_disc_costs()\n\n # first, build the cost to be optimized by the discriminator network,\n # in general this will be treated somewhat indepedently of the\n # optimization of the generator and inferencer networks.\n self.dn_cost = self.disc_cost_dn + self.DN.act_reg_cost + \\\n self.disc_reg_cost\n\n # construct costs relevant to the optimization of the generator and\n # discriminator networks\n self.chain_nll_cost = self.lam_chain_nll[0] * \\\n self._construct_chain_nll_cost(cost_decay=self.cost_decay)\n self.chain_kld_cost = self.lam_chain_kld[0] * \\\n self._construct_chain_kld_cost(cost_decay=self.cost_decay, \\\n kld2_scale=self.kld2_scale)\n self.chain_vel_cost = self.lam_chain_vel[0] * \\\n self._construct_chain_vel_cost()\n self.mask_nll_cost = self.lam_mask_nll[0] * \\\n self._construct_mask_nll_cost()\n self.mask_kld_cost = self.lam_mask_kld[0] * \\\n self._construct_mask_kld_cost(kld2_scale=self.kld2_scale)\n self.other_reg_cost = self._construct_other_reg_cost()\n self.gip_cost = self.disc_cost_gn + self.chain_nll_cost + \\\n self.chain_kld_cost + self.chain_vel_cost + \\\n self.mask_nll_cost + self.mask_kld_cost + \\\n self.other_reg_cost\n # compute total cost on the discriminator and VB generator/inferencer\n self.joint_cost = self.dn_cost + self.gip_cost\n\n # grab the gradients for all parameters to optimize\n self.joint_grads = OrderedDict()\n for p in self.dn_params:\n # grads for discriminator network params use a separate cost\n self.joint_grads[p] = T.grad(self.dn_cost, p).clip(-0.1,0.1)\n for p in self.in_params:\n # grads for generator network use the GIPair's cost\n self.joint_grads[p] = T.grad(self.gip_cost, p).clip(-0.1,0.1)\n for p in self.gn_params:\n # grads for generator network use the GIPair's cost\n self.joint_grads[p] = T.grad(self.gip_cost, p).clip(-0.1,0.1)\n\n # construct the updates for the discriminator, generator and \n # inferencer networks. all networks share the same first/second\n # moment momentum and iteration count. the networks each have their\n # own learning rates, which lets you turn their learning on/off.\n self.dn_updates = get_adam_updates(params=self.dn_params, \\\n grads=self.joint_grads, alpha=self.lr_dn, \\\n beta1=self.mom_1, beta2=self.mom_2, it_count=self.it_count, \\\n mom2_init=1e-3, smoothing=1e-8)\n self.gn_updates = get_adam_updates(params=self.gn_params, \\\n grads=self.joint_grads, alpha=self.lr_gn, \\\n beta1=self.mom_1, beta2=self.mom_2, it_count=self.it_count, \\\n mom2_init=1e-3, smoothing=1e-8)\n self.in_updates = get_adam_updates(params=self.in_params, \\\n grads=self.joint_grads, alpha=self.lr_in, \\\n beta1=self.mom_1, beta2=self.mom_2, it_count=self.it_count, \\\n mom2_init=1e-3, smoothing=1e-8)\n #self.dn_updates = get_adadelta_updates(params=self.dn_params, \\\n # grads=self.joint_grads, alpha=self.lr_dn, beta1=0.98)\n #self.gn_updates = get_adadelta_updates(params=self.gn_params, \\\n # grads=self.joint_grads, alpha=self.lr_gn, beta1=0.98)\n #self.in_updates = get_adadelta_updates(params=self.in_params, \\\n # grads=self.joint_grads, alpha=self.lr_in, beta1=0.98)\n\n # bag up all the updates required for training\n self.joint_updates = OrderedDict()\n for k in self.dn_updates:\n self.joint_updates[k] = self.dn_updates[k]\n for k in self.gn_updates:\n self.joint_updates[k] = self.gn_updates[k]\n for k in self.in_updates:\n self.joint_updates[k] = self.in_updates[k]\n # construct an update for tracking the mean KL divergence of\n # approximate posteriors for this chain\n new_kld_mean = (0.98 * self.IN.kld_mean) + ((0.02 / self.chain_len) * \\\n sum([T.mean(I_N.kld_cost) for I_N in self.IN_chain]))\n self.joint_updates[self.IN.kld_mean] = T.cast(new_kld_mean, 'floatX')\n\n # construct the function for training on training data\n self.train_joint = self._construct_train_joint()\n return\n\n def set_dn_sgd_params(self, learn_rate=0.01):\n \"\"\"\n Set learning rate for the discriminator network.\n \"\"\"\n zero_ary = np.zeros((1,))\n new_lr = zero_ary + learn_rate\n self.lr_dn.set_value(new_lr.astype(theano.config.floatX))\n return\n\n def set_in_sgd_params(self, learn_rate=0.01):\n \"\"\"\n Set learning rate for the inferencer network.\n \"\"\"\n zero_ary = np.zeros((1,))\n new_lr = zero_ary + learn_rate\n self.lr_in.set_value(new_lr.astype(theano.config.floatX))\n return\n\n def set_gn_sgd_params(self, learn_rate=0.01):\n \"\"\"\n Set learning rate for the generator network.\n \"\"\"\n zero_ary = np.zeros((1,))\n new_lr = zero_ary + learn_rate\n self.lr_gn.set_value(new_lr.astype(theano.config.floatX))\n return\n\n def set_all_sgd_params(self, learn_rate=0.01, mom_1=0.9, mom_2=0.999):\n \"\"\"\n Set learning rate and momentum parameter for all updates.\n \"\"\"\n zero_ary = np.zeros((1,))\n # set learning rates to the same value\n new_lr = zero_ary + learn_rate\n self.lr_dn.set_value(new_lr.astype(theano.config.floatX))\n self.lr_gn.set_value(new_lr.astype(theano.config.floatX))\n self.lr_in.set_value(new_lr.astype(theano.config.floatX))\n # set the first/second moment momentum parameters\n new_mom_1 = zero_ary + mom_1\n new_mom_2 = zero_ary + mom_2\n self.mom_1.set_value(new_mom_1.astype(theano.config.floatX))\n self.mom_2.set_value(new_mom_2.astype(theano.config.floatX))\n return\n\n def set_disc_weights(self, dweight_gn=1.0, dweight_dn=1.0):\n \"\"\"\n Set weights for the adversarial classification cost.\n \"\"\"\n zero_ary = np.zeros((1,)).astype(theano.config.floatX)\n new_dw_dn = zero_ary + dweight_dn\n self.dw_dn.set_value(new_dw_dn)\n new_dw_gn = zero_ary + dweight_gn\n self.dw_gn.set_value(new_dw_gn)\n return\n\n def set_lam_chain_nll(self, lam_chain_nll=1.0):\n \"\"\"\n Set weight for controlling the influence of the data likelihood.\n \"\"\"\n zero_ary = np.zeros((1,))\n new_lam = zero_ary + lam_chain_nll\n self.lam_chain_nll.set_value(new_lam.astype(theano.config.floatX))\n return\n\n def set_lam_chain_kld(self, lam_chain_kld=1.0):\n \"\"\"\n Set the strength of regularization on KL-divergence for continuous\n posterior variables. When set to 1.0, this reproduces the standard\n role of KL(posterior || prior) in variational learning.\n \"\"\"\n zero_ary = np.zeros((1,))\n new_lam = zero_ary + lam_chain_kld\n self.lam_chain_kld.set_value(new_lam.astype(theano.config.floatX))\n return\n\n def set_lam_chain_vel(self, lam_chain_vel=1.0):\n \"\"\"\n Set the strength of regularization on Markov Chain velocity.\n \"\"\"\n zero_ary = np.zeros((1,))\n new_lam = zero_ary + lam_chain_vel\n self.lam_chain_vel.set_value(new_lam.astype(theano.config.floatX))\n return\n\n def set_lam_mask_nll(self, lam_mask_nll=0.0):\n \"\"\"\n Set weight for controlling the influence of the data likelihood.\n \"\"\"\n zero_ary = np.zeros((1,))\n new_lam = zero_ary + lam_mask_nll\n self.lam_mask_nll.set_value(new_lam.astype(theano.config.floatX))\n return\n\n def set_lam_mask_kld(self, lam_mask_kld=1.0):\n \"\"\"\n Set the strength of regularization on KL-divergence for continuous\n posterior variables. When set to 1.0, this reproduces the standard\n role of KL(posterior || prior) in variational learning.\n \"\"\"\n zero_ary = np.zeros((1,))\n new_lam = zero_ary + lam_mask_kld\n self.lam_mask_kld.set_value(new_lam.astype(theano.config.floatX))\n return\n\n def set_lam_l2w(self, lam_l2w=1e-3):\n \"\"\"\n Set the relative strength of l2 regularization on network params.\n \"\"\"\n zero_ary = np.zeros((1,))\n new_lam = zero_ary + lam_l2w\n self.lam_l2w.set_value(new_lam.astype(theano.config.floatX))\n return\n\n def _construct_disc_layers(self, rng):\n \"\"\"\n Construct binary discrimination layers for each spawn-net in the\n underlying discrimnator pseudo-ensemble. All spawn-nets spawned from\n the same proto-net will use the same disc-layer parameters.\n \"\"\"\n self.disc_layers = []\n self.disc_outputs = []\n dn_init_scale = self.DN.init_scale\n for sn in self.DN.spawn_nets:\n # construct a \"binary discriminator\" layer to sit on top of each\n # spawn net in the discriminator pseudo-ensemble\n sn_fl = sn[-1]\n init_scale = dn_init_scale * (1. / np.sqrt(sn_fl.in_dim))\n self.disc_layers.append(DiscLayer(rng=rng, \\\n input=sn_fl.noisy_input, in_dim=sn_fl.in_dim, \\\n W_scale=dn_init_scale))\n # capture the (linear) output of the DiscLayer, for possible reuse\n self.disc_outputs.append(self.disc_layers[-1].linear_output)\n # get the params of this DiscLayer, for convenient optimization\n self.dn_params.extend(self.disc_layers[-1].params)\n return\n\n def _construct_disc_costs(self):\n \"\"\"\n Construct the generator and discriminator adversarial costs.\n \"\"\"\n gn_costs = []\n dn_costs = []\n for dl_output in self.disc_outputs:\n data_preds = dl_output.take(self.It, axis=0)\n noise_preds = dl_output.take(self.Id, axis=0)\n # compute the cost with respect to which we will be optimizing\n # the parameters of the discriminator network\n data_size = T.cast(self.It.size, 'floatX')\n noise_size = T.cast(self.Id.size, 'floatX')\n dnl_dn_cost = (logreg_loss(data_preds, 1.0) / data_size) + \\\n (logreg_loss(noise_preds, -1.0) / noise_size)\n # compute the cost with respect to which we will be optimizing\n # the parameters of the generative model\n dnl_gn_cost = (hinge_loss(noise_preds, 0.0) + hinge_sq_loss(noise_preds, 0.0)) / (2.0 * noise_size)\n dn_costs.append(dnl_dn_cost)\n gn_costs.append(dnl_gn_cost)\n dn_cost = self.dw_dn[0] * T.sum(dn_costs)\n gn_cost = self.dw_gn[0] * T.sum(gn_costs)\n return [dn_cost, gn_cost]\n\n def _construct_chain_nll_cost(self, cost_decay=0.1):\n \"\"\"\n Construct the negative log-likelihood part of cost to minimize.\n\n This is for operation in \"free chain\" mode, where a seed point is used\n to initialize a long(ish) running markov chain.\n \"\"\"\n assert((cost_decay > 0.0) and (cost_decay < 1.0))\n obs_count = T.cast(self.Xd.shape[0], 'floatX')\n nll_costs = []\n step_weight = 1.0\n step_weights = []\n step_decay = cost_decay\n for i in range(self.chain_len):\n if self.chain_type == 'walkback':\n # train with walkback roll-outs -- reconstruct initial point\n IN_i = self.IN_chain[0]\n else:\n # train with walkout roll-outs -- reconstruct previous point\n IN_i = self.IN_chain[i]\n GN_i = self.GN_chain[i]\n if self.use_encoder:\n # compare encoded output of the generator with the encoded\n # non-control input to the inferencer\n c = -T.sum(GN_i.compute_log_prob(Xd=IN_i.Xd_encoded)) / obs_count\n else:\n # compare encoded output of the generator with the unencoded\n # non-control input to the inferencer\n c = -T.sum(GN_i.compute_log_prob(Xd=IN_i.Xd)) / obs_count\n nll_costs.append(step_weight * c)\n step_weights.append(step_weight)\n step_weight = step_weight * step_decay\n nll_cost = sum(nll_costs) / sum(step_weights)\n return nll_cost\n\n def _construct_chain_kld_cost(self, cost_decay=0.1, kld2_scale=0.0):\n \"\"\"\n Construct the posterior KL-d from prior part of cost to minimize.\n\n This is for operation in \"free chain\" mode, where a seed point is used\n to initialize a long(ish) running markov chain.\n \"\"\"\n assert((cost_decay > 0.0) and (cost_decay < 1.0))\n obs_count = T.cast(self.Xd.shape[0], 'floatX')\n kld_mean = self.IN.kld_mean[0]\n kld_costs = []\n step_weight = 1.0\n step_weights = []\n step_decay = cost_decay\n for i in range(self.chain_len):\n IN_i = self.IN_chain[i]\n # basic variational term on KL divergence between post and prior\n kld_cost_1 = IN_i.kld_cost\n # extra term for the squre of KLd in excess of the mean\n kld_too_big = theano.gradient.consider_constant( \\\n (IN_i.kld_cost > kld_mean))\n kld_cost_2 = kld2_scale * \\\n (kld_too_big * (IN_i.kld_cost - kld_mean))**2.0\n # combine the two types of KLd costs\n c = T.sum(kld_cost_1 + kld_cost_2) / obs_count\n kld_costs.append(step_weight * c)\n step_weights.append(step_weight)\n step_weight = step_weight * step_decay\n kld_cost = sum(kld_costs) / sum(step_weights)\n return kld_cost\n\n def _construct_chain_vel_cost(self):\n \"\"\"\n Construct the Markov Chain velocity part of cost to minimize.\n\n This is for operation in \"free chain\" mode, where a seed point is used\n to initialize a long(ish) running markov chain.\n \"\"\"\n obs_count = T.cast(self.Xd.shape[0], 'floatX')\n IN_start = self.IN_chain[0]\n GN_end = self.GN_chain[-1]\n vel_cost = T.sum(GN_end.compute_log_prob(Xd=IN_start.Xd)) / obs_count\n return vel_cost\n\n def _construct_mask_nll_cost(self):\n \"\"\"\n Construct the negative log-likelihood part of cost to minimize.\n\n This is for \"iterative reconstruction\" when the seed input is subject\n to partial masking.\n \"\"\"\n obs_count = T.cast(self.Xd.shape[0], 'floatX')\n nll_costs = []\n for i in range(self.chain_len):\n IN_i = self.IN_chain[i]\n GN_i = self.GN_chain[i]\n if self.use_encoder:\n # compare encoded output of the generator to the encoded\n # representation of control input to the inferencer\n c = -T.sum(GN_i.compute_log_prob(Xd=IN_i.Xd_encoded)) / obs_count\n else:\n # compare encoded output of the generator to the unencoded\n # control input to the inferencer, but only measure NLL for\n # input dimensions that are not part of the \"control set\"\n c = -T.sum(GN_i.masked_log_prob(Xc=self.Xc, Xm=self.Xm)) \\\n / obs_count\n nll_costs.append(self.mask_nll_weights[i] * c)\n nll_cost = sum(nll_costs)\n return nll_cost\n\n def _construct_mask_kld_cost(self, kld2_scale=0.0):\n \"\"\"\n Construct the posterior KL-d from prior part of cost to minimize.\n\n This is for \"iterative reconstruction\" when the seed input is subject\n to partial masking.\n \"\"\"\n obs_count = T.cast(self.Xd.shape[0], 'floatX')\n kld_mean = self.IN.kld_mean[0]\n kld_costs = []\n for i in range(self.chain_len):\n IN_i = self.IN_chain[i]\n # basic variational term on KL divergence between post and prior\n kld_cost_1 = IN_i.kld_cost\n # extra term for the squre of KLd in excess of the mean\n kld_too_big = theano.gradient.consider_constant( \\\n (IN_i.kld_cost > kld_mean))\n kld_cost_2 = kld2_scale * \\\n (kld_too_big * (IN_i.kld_cost - kld_mean))**2.0\n # combine the two types of KLd costs\n c = T.sum(kld_cost_1 + kld_cost_2) / obs_count\n kld_costs.append(c)\n kld_cost = sum(kld_costs) / float(self.chain_len)\n return kld_cost\n\n def _construct_other_reg_cost(self):\n \"\"\"\n Construct the cost for low-level basic regularization. E.g. for\n applying l2 regularization to the network activations and parameters.\n \"\"\"\n gp_cost = sum([T.sum(par**2.0) for par in self.gn_params])\n ip_cost = sum([T.sum(par**2.0) for par in self.in_params])\n other_reg_cost = self.lam_l2w[0] * (gp_cost + ip_cost)\n return other_reg_cost\n\n def _construct_train_joint(self):\n \"\"\"\n Construct theano function to train generator and discriminator jointly.\n \"\"\"\n outputs = [self.joint_cost, self.chain_nll_cost, self.chain_kld_cost, \\\n self.chain_vel_cost, self.mask_nll_cost, self.mask_kld_cost, \\\n self.disc_cost_gn, self.disc_cost_dn, self.other_reg_cost]\n func = theano.function(inputs=[ self.Xd, self.Xc, self.Xm, self.Xt ], \\\n outputs=outputs, updates=self.joint_updates) # , \\\n #mode=profmode)\n return func\n\n def sample_from_chain(self, X_d, X_c=None, X_m=None, loop_iters=5, \\\n sigma_scale=None):\n \"\"\"\n Sample for several rounds through the I<->G loop, initialized with the\n the \"data variable\" samples in X_d.\n \"\"\"\n result = self.GIP.sample_from_chain(X_d, X_c=X_c, X_m=X_m, \\\n loop_iters=loop_iters, sigma_scale=sigma_scale)\n return result\n\n def sample_from_prior(self, samp_count, sigma=None):\n \"\"\"\n Draw independent samples from the model's prior, using the gaussian\n continuous prior of the underlying GenNet. Use a user-defined sigma.\n \"\"\"\n if sigma is None:\n sigma = self.GN.prior_sigma\n # sample from the GenNet, with either the GenNet's prior sigma or some\n # user-defined sigma\n Xs = self.GN.scaled_sampler(samp_count, sigma)\n return Xs\n\nif __name__==\"__main__\":\n # TEST CODE IS ELSEWHERE\n print(\"NO TEST CODE HERE!\")\n\n\n\n\n\n##############\n# EYE BUFFER #\n##############\n","sub_path":"generative_models/VCGLoop.py","file_name":"VCGLoop.py","file_ext":"py","file_size_in_byte":33257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"33997689","text":"# https://atcoder.jp/contests/abc134/tasks/abc134_b\nimport sys\nsys.setrecursionlimit(2147483647)\nINF=float(\"inf\")\nMOD=10**9+7\ninput=lambda :sys.stdin.readline().rstrip()\ndef resolve():\n from math import ceil\n n,d=map(int,input().split())\n print(ceil(n/(2*d+1)))\nresolve()\n","sub_path":"ABC134/b_golden_apple.py","file_name":"b_golden_apple.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"146887363","text":"import socket\n\nHOST = \"127.0.0.1\"\nPORT = 27000\n\nwith socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:\n s.connect((HOST, PORT))\n s.sendall(b\"{change: 'admin', data: 'green'}\")\n data = s.recv(1024)\n\nprint('Received', repr(data))\n","sub_path":"SocketConnection/python/socket.py","file_name":"socket.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"345246972","text":"import random\nfrom common import SheerID\n\n\n# If you want to see the webhook recieve a response this is set up to use https://ngrok.com/\n# Just visit this site, register and follow the docs for getting the tunnel up and running. \n# You should see the webhook get called in your terminal.\nACCESS_TOKEN = \"cc3faec8ef1841e9a59de1019c8bb70e\"\nPROGRAM_ID = \"5d72a016a34e721fa1af11ee\"\nSHEER_HOST = \"https://services.sheerid.com/rest/v2\"\nNGROK_ID = \"wallydev\"\n\nsid = SheerID(ACCESS_TOKEN)\n\n\n# GET BASIS INFO FROM SHEERID ABOUT THE BUILD\nsid.get(SHEER_HOST + \"/info\")\n\n\n# SET A WEBHOOK. EACH PROGRAM GETS ONE. SHEERID CAN ADD MORE ON REQUEST. ONLY REQUIRED ONE TIME PER PROGRAM\nsid.post(SHEER_HOST + \"/program/\"+PROGRAM_ID+\"/webhook\", {\"callbackUri\":\"http://\"+NGROK_ID+\".ngrok.io\"})\n\n\n# CREATE A NEW VERIFICATION. OPTIONAL FIRST STEP\nres = sid.post(SHEER_HOST + \"/verification\", {\"programId\": PROGRAM_ID})\nverification_id = res[\"verificationId\"]\n\n\n# GET AN ORG TO QUERY AGAINST\norg_res = sid.get(SHEER_HOST + \"/program/\"+PROGRAM_ID+\"/organization?name=oregon\")\norg = random.choice(org_res)\nprint(\"Choosing \" + org[\"name\"] + \" with id \" + str(org[\"id\"]))\n\n\n# SEND INFO FOR INSTANT VERIFICATION\napproved_info = {\n \"firstName\": \"rejected\", # use rejected to fail instant verification. Use any other value to succeed.\n \"lastName\": \"person\", # In test mode, this is an easy value to change to work around the same-person-limit\n \"birthDate\": \"1991-01-01\", # 1900-01-01 is the earliest date we accept\n \"email\": \"walt.norblad+studenttest@sheerid.edu\",\n \"status\": \"POLICE\",\n \"organization\": {\n \"id\": org[\"id\"],\n \"name\": org[\"name\"]\n },\n}\nres = sid.post(res[\"submissionUrl\"], approved_info)\n\n\n# SEND FILE FOR DOC UPLOAD\nfilename = \"approve.jpg\"\n#filename = \"needs_review.jpg\"\n#filename = \"reject.jpg\"\nres = sid.file_post(res[\"submissionUrl\"], filename)\n\n\n# Status TO CHECK ON STATUS OF A VERIFICATION - safe for browsers\nsid.get(SHEER_HOST + \"/verification/\"+verification_id)\n\n\n# DETAILS TO GET ALL THE INFO ABOUT A VERIFICATION ON STATUS OF DOCUMENT UPLOAD - !!Unsafe for browsers\nsid.get(SHEER_HOST + \"/verification/\"+verification_id+\"/details\")\n","sub_path":"scripts/first-responder.py","file_name":"first-responder.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"120606485","text":"\"\"\"The WaveBlocks Project\n\nPlot the coefficients $c_i$ of each component of a homogeneous or\ninhomogeneous Hagedorn wavepacket for all timesteps during the\ntime propagation.\n\n@author: R. Bourquin\n@copyright: Copyright (C) 2012 R. Bourquin\n@license: Modified BSD License\n\"\"\"\n\nimport sys\nfrom numpy import abs, angle, array\nfrom matplotlib.pyplot import *\n\nfrom WaveBlocksND import IOManager\nfrom WaveBlocksND import BlockFactory\nfrom WaveBlocksND.Plot import stemcf\n\nimport GraphicsDefaults as GD\n\n\ndef read_all_datablocks(iom):\n r\"\"\"Read the data from all blocks that contain any usable data.\n\n :param iom: An :py:class:`IOManager` instance providing the simulation data.\n \"\"\"\n # Iterate over all blocks and plot their data\n for blockid in iom.get_block_ids():\n if iom.has_wavepacket(blockid=blockid):\n read_data_homogeneous(iom, blockid=blockid)\n elif iom.has_inhomogwavepacket(blockid=blockid):\n read_data_inhomogeneous(iom, blockid=blockid)\n else:\n print(\"Warning: Not plotting wavepacket coefficients in block '\"+str(blockid)+\"'!\")\n\n\ndef read_data_homogeneous(iom, blockid=0):\n r\"\"\"\n :param iom: An :py:class:`IOManager` instance providing the simulation data.\n :param blockid: The data block from which the values are read.\n \"\"\"\n BF = BlockFactory()\n\n parameters = iom.load_parameters()\n timegrid = iom.load_wavepacket_timegrid(blockid=blockid)\n\n # Basis shapes\n bsdescr = iom.load_wavepacket_basisshapes(blockid=blockid)\n BS = {}\n for ahash, descr in bsdescr.iteritems():\n BS[ahash] = BF.create_basis_shape(descr)\n\n # Plot the coefficients for all timesteps\n for j, step in enumerate(timegrid):\n hashes, coeffs = iom.load_wavepacket_coefficients(timestep=step, blockid=blockid, get_hashes=True)\n\n k = []\n\n for i in xrange(parameters[\"ncomponents\"]):\n bs = BS[int(hashes[i])]\n ki = array([bs[node] for node in bs.get_node_iterator()])\n k.append(ki)\n\n plot_coefficients(k, coeffs, step, parameters[\"dt\"], index=blockid)\n\n\ndef read_data_inhomogeneous(iom, blockid=0):\n r\"\"\"\n :param iom: An :py:class:`IOManager` instance providing the simulation data.\n :param blockid: The data block from which the values are read.\n \"\"\"\n BF = BlockFactory()\n\n parameters = iom.load_parameters()\n timegrid = iom.load_inhomogwavepacket_timegrid(blockid=blockid)\n\n # Basis shapes\n bsdescr = iom.load_inhomogwavepacket_basisshapes(blockid=blockid)\n BS = {}\n for ahash, descr in bsdescr.iteritems():\n BS[ahash] = BF.create_basis_shape(descr)\n\n # Plot the coefficients for all timesteps\n for j, step in enumerate(timegrid):\n hashes, coeffs = iom.load_inhomogwavepacket_coefficients(timestep=step, blockid=blockid, get_hashes=True)\n\n k = []\n\n for i in xrange(parameters[\"ncomponents\"]):\n bs = BS[int(hashes[i])]\n ki = array([bs[node] for node in bs.get_node_iterator()])\n k.append(ki)\n\n plot_coefficients(k, coeffs, step, parameters[\"dt\"], index=blockid)\n\n\ndef plot_coefficients(k, c, step, dt, index=0):\n \"\"\"\n :param parameters: A :py:class:`ParameterProvider` instance.\n :param timegrid: The timegrid that belongs to the coefficient values.\n :param coeffs: The coefficient values.\n :param imgsize: The size of the plot. For a large number of plotted\n coefficients, we might have to increase this value.\n \"\"\"\n print(\"Plotting the coefficients of data block '\"+str(index)+\"' at timestep \"+str(step))\n\n N = len(k)\n\n fig = figure()\n\n for n in xrange(N):\n ax = fig.add_subplot(N,1,n+1)\n\n stemcf(k[n], angle(c[n]), abs(c[n]))\n\n # axis formatting:\n m = max(abs(c[n]))\n ax.set_xlim(-1, max(k[n])+1)\n ax.set_ylim(-0.1*m, 1.1*m)\n\n ax.set_xlabel(r\"$k$\")\n ax.set_ylabel(r\"$c_k$\")\n\n fig.suptitle(r\"Coefficients $c_k$ at time $t=\"+str(step*dt)+r\"$\")\n\n fig.savefig(\"wavepacket_coefficients_block\"+str(index)+\"_timestep_\"+(5-len(str(step)))*\"0\"+str(step)+GD.output_format)\n close(fig)\n\n\n\n\nif __name__ == \"__main__\":\n iom = IOManager()\n\n # Read file with simulation data\n try:\n iom.open_file(filename=sys.argv[1])\n except IndexError:\n iom.open_file()\n\n # Read the data and plot it, one plot for each data block.\n read_all_datablocks(iom)\n\n iom.finalize()\n","sub_path":"src/plotters/PlotWavepacketCoefficientsStem.py","file_name":"PlotWavepacketCoefficientsStem.py","file_ext":"py","file_size_in_byte":4452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"589980220","text":"\r\n#!/usr/bin/env python3\r\n\r\n###############\r\n# Author: Yi Herng Ong\r\n# Purpose: Kinova 3-fingered gripper in mujoco environment\r\n# Summer 2019\r\n\r\n###############\r\n\r\n#TODO: Remove unecesssary commented lines\r\n#TODO: Make a brief description of each function commented at the top of it\r\n\r\nfrom gym import utils, spaces\r\nimport gym\r\nimport glfw\r\nfrom gym.utils import seeding\r\n# from gym.envs.mujoco import mujoco_env\r\nimport numpy as np\r\nfrom mujoco_py import MjViewer, load_model_from_path, MjSim\r\nimport mujoco_py\r\n# from PID_Kinova_MJ import *\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimport os, sys\r\nfrom scipy.spatial.transform import Rotation as R\r\nimport random\r\nimport pickle\r\nimport pdb\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport xml.etree.ElementTree as ET\r\nfrom classifier_network import LinearNetwork, ReducedLinearNetwork\r\nimport re\r\nfrom scipy.stats import triang\r\nimport csv\r\nimport pandas as pd\r\nfrom pathlib import Path\r\nimport threading #oh boy this might get messy\r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n\r\nclass KinovaGripper_Env(gym.Env):\r\n metadata = {'render.modes': ['human']}\r\n def __init__(self, arm_or_end_effector=\"hand\", frame_skip=4):\r\n self.file_dir = os.path.dirname(os.path.realpath(__file__))\r\n self.arm_or_hand=arm_or_end_effector\r\n if arm_or_end_effector == \"arm\":\r\n self._model = load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300.xml\")\r\n full_path = self.file_dir + \"/kinova_description/j2s7s300.xml\"\r\n self.filename= \"/kinova_description/j2s7s300.xml\"\r\n elif arm_or_end_effector == \"hand\":\r\n pass\r\n #self._model,self.obj_size,self.filename = load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1.xml\"),'s',\"/kinova_description/j2s7s300_end_effector_v1.xml\"\r\n #self._model,self.obj_size,self.filename = load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_scyl.xml\"),'s',\"/kinova_description/j2s7s300_end_effector_v1_scyl.xml\"\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_mbox.xml\"),'m',\"/kinova_description/j2s7s300_end_effector_v1_mbox.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_mcyl.xml\"),'m',\"/kinova_description/j2s7s300_end_effector_v1_mcyl.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_bcyl.xml\"),'b',\"/kinova_description/j2s7s300_end_effector_v1_bcyl.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_bbox.xml\"),'b',\"/kinova_description/j2s7s300_end_effector_v1_bbox.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_shg.xml\"), 's',\"/kinova_description/j2s7s300_end_effector_v1_shg.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_mhg.xml\"), 'm',\"/kinova_description/j2s7s300_end_effector_v1_mhg.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_bhg.xml\"), 'b',\"/kinova_description/j2s7s300_end_effector_v1_bhg.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_svase.xml\"), 's',\"/kinova_description/j2s7s300_end_effector_v1_svase.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_mvase.xml\"), 'm',\"/kinova_description/j2s7s300_end_effector_v1_mvase.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_bvase.xml\"), 'b',\"/kinova_description/j2s7s300_end_effector_v1_bvase.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_bcap.xml\"), 'b',\"/kinova_description/j2s7s300_end_effector_v1_bcap.xml\"\r\n #full_path = file_dir + \"/kinova_description/j2s7s300_end_effector_v1.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_blemon.xml\"), 'b',\"/kinova_description/j2s7s300_end_effector_v1_blemon.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_bRectBowl.xml\"), 'b',\"/kinova_description/j2s7s300_end_effector_v1_bRectBowl.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_bRoundBowl.xml\"), 'b',\"/kinova_description/j2s7s300_end_effector_v1_bRoundBowl.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_bbottle.xml\"), 'b',\"/kinova_description/j2s7s300_end_effector_v1_bbottle.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_btbottle.xml\"), 'b',\"/kinova_description/j2s7s300_end_effector_v1_btbottle.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_slemon.xml\"), 's',\"/kinova_description/j2s7s300_end_effector_v1_slemon.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_sRectBowl.xml\"), 's',\"/kinova_description/j2s7s300_end_effector_v1_sRectBowl.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_sRoundBowl.xml\"), 's',\"/kinova_description/j2s7s300_end_effector_v1_sRoundBowl.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_sbottle.xml\"), 's',\"/kinova_description/j2s7s300_end_effector_v1_sbottle.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_stbottle.xml\"), 's',\"/kinova_description/j2s7s300_end_effector_v1_stbottle.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_mlemon.xml\"), 'm',\"/kinova_description/j2s7s300_end_effector_v1_mlemon.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_mRectBowl.xml\"), 'm',\"/kinova_description/j2s7s300_end_effector_v1_mRectBowl.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_mRoundBowl.xml\"), 'm',\"/kinova_description/j2s7s300_end_effector_v1_mRoundBowl.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_mbottle.xml\"), 'm',\"/kinova_description/j2s7s300_end_effector_v1_mbottle.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_mtbottle.xml\"), 'm',\"/kinova_description/j2s7s300_end_effector_v1_mtbottle.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_msphere.xml\"), 'm',\"/kinova_description/j2s7s300_end_effector_v1_sphere.xml\"\r\n #self._model,self.obj_size,self.filename = load_model_from_path(self.file_dir + \"/kinova_description/DisplayStuff.xml\"),'s',\"/kinova_description/DisplayStuff.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_bcone1.xml\"),'b',\"/kinova_description/j2s7s300_end_effector_v1_bcone1.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_mcone1.xml\"),'m',\"/kinova_description/j2s7s300_end_effector_v1_mcone1.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_scone1.xml\"),'s',\"/kinova_description/j2s7s300_end_effector_v1_scone1.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_bcone2.xml\"),'b',\"/kinova_description/j2s7s300_end_effector_v1_bcone2.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_mcone2.xml\"),'m',\"/kinova_description/j2s7s300_end_effector_v1_mcone2.xml\"\r\n #self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_scone2.xml\"),'s',\"/kinova_description/j2s7s300_end_effector_v1_scone2.xml\"\r\n\r\n else:\r\n print(\"CHOOSE EITHER HAND OR ARM\")\r\n raise ValueError\r\n\r\n self._sim = MjSim(self._model) # The simulator. This holds all the information about object locations and orientations\r\n self.Grasp_Reward=False #This varriable says whether or not a grasp reward has been given this run\r\n self._viewer = None # The render window\r\n self.contacts=self._sim.data.ncon # The number of contacts in the simulation environment\r\n self.Tfw=np.zeros([4,4]) # The trasfer matrix that gets us from the world frame to the local frame\r\n self.wrist_pose=np.zeros(3) # The wrist position in world coordinates\r\n self.thetas=[0,0,0,0,0,0,0] # The angles of the joints of a real robot arm used for calculating the jacobian of the hand\r\n self._timestep = self._sim.model.opt.timestep\r\n self.pid=False\r\n self.step_coords='global'\r\n self._torque = [0,0,0,0] #Unused\r\n self._velocity = [0,0,0,0] #Unused\r\n self._jointAngle = [5,0,0,0] #Usused\r\n self._positions = [] # ??\r\n self._numSteps = 0\r\n self._simulator = \"Mujoco\"\r\n self.action_scale = 0.0333\r\n self.max_episode_steps = 150\r\n self.site_count=0\r\n # Parameters for cost function\r\n self.state_des = 0.20\r\n self.initial_state = np.array([0.0, 0.0, 0.0, 0.0])\r\n self.action_space = spaces.Box(low=np.array([-0.8, -0.8, -0.8, -0.8]), high=np.array([0.8, 0.8, 0.8, 0.8]), dtype=np.float32) # Velocity action space\r\n self.const_T=np.array([[0,-1,0,0],[0,0,-1,0],[1,0,0,0],[0,0,0,1]]) #Transfer matrix from world frame to un-modified hand frame\r\n self.frame_skip = frame_skip # Used in step. Number of frames you go through before you reach the next step\r\n self.all_states = None # This is the varriable we use to save the states before they are sent to the simulator when we are resetting.\r\n\r\n self.state_rep = \"local\" # change accordingly\r\n\r\n self.obj_coords = [0,0,0]\r\n self.objects = {}\r\n self.obj_keys = list()\r\n\r\n # Originally used for defining min/max ranges of state input (currently not being used)\r\n min_hand_xyz = [-0.1, -0.1, 0.0, -0.1, -0.1, 0.0, -0.1, -0.1, 0.0,-0.1, -0.1, 0.0, -0.1, -0.1, 0.0,-0.1, -0.1, 0.0, -0.1, -0.1, 0.0]\r\n min_obj_xyz = [-0.1, -0.01, 0.0]\r\n min_joint_states = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\r\n min_obj_size = [0.0, 0.0, 0.0]\r\n min_finger_obj_dist = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\r\n min_obj_dot_prod = [0.0]\r\n min_f_dot_prod = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\r\n\r\n max_hand_xyz = [0.1, 0.1, 0.5, 0.1, 0.1, 0.5, 0.1, 0.1, 0.5,0.1, 0.1, 0.5, 0.1, 0.1, 0.5,0.1, 0.1, 0.5, 0.1, 0.1, 0.5]\r\n max_obj_xyz = [0.1, 0.7, 0.5]\r\n max_joint_states = [0.2, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]\r\n max_obj_size = [0.5, 0.5, 0.5]\r\n max_finger_obj_dist = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]\r\n max_obj_dot_prod = [1.0]\r\n max_f_dot_prod = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\r\n # print()\r\n if self.state_rep == \"global\" or self.state_rep == \"local\":\r\n\r\n obs_min = min_hand_xyz + min_obj_xyz + min_joint_states + min_obj_size + min_finger_obj_dist + min_obj_dot_prod #+ min_f_dot_prod\r\n obs_min = np.array(obs_min)\r\n # print(len(obs_min))\r\n\r\n obs_max = max_hand_xyz + max_obj_xyz + max_joint_states + max_obj_size + max_finger_obj_dist + max_obj_dot_prod #+ max_f_dot_prod\r\n obs_max = np.array(obs_max)\r\n # print(len(obs_max))\r\n\r\n self.observation_space = spaces.Box(low=obs_min , high=obs_max, dtype=np.float32)\r\n elif self.state_rep == \"metric\":\r\n obs_min = list(np.zeros(17)) + [-0.1, -0.1, 0.0] + min_obj_xyz + min_joint_states + min_obj_size + min_finger_obj_dist + min_dot_prod\r\n obs_max = list(np.full(17, np.inf)) + [0.1, 0.1, 0.5] + max_obj_xyz + max_joint_states + max_obj_size + max_finger_obj_dist + max_dot_prod\r\n self.observation_space = spaces.Box(low=np.array(obs_min) , high=np.array(obs_max), dtype=np.float32)\r\n\r\n elif self.state_rep == \"joint_states\":\r\n obs_min = min_joint_states + min_obj_xyz + min_obj_size + min_dot_prod\r\n obs_max = max_joint_states + max_obj_xyz + max_obj_size + max_dot_prod\r\n self.observation_space = spaces.Box(low=np.array(obs_min) , high=np.array(obs_max), dtype=np.float32)\r\n # <---- end of unused section\r\n\r\n #self.Grasp_net = LinearNetwork().to(device) # This loads the grasp classifier\r\n #trained_model = \"/home/orochi/KinovaGrasping/gym-kinova-gripper/trained_model_05_28_20_2105local.pt\"\r\n #trained_model = \"/home/orochi/KinovaGrasping/gym-kinova-gripper/trained_model_01_23_20_2052local.pt\"\r\n # self.Grasp_net = GraspValid_net(54).to(device)\r\n # trained_model = \"/home/graspinglab/NCS_data/ExpertTrainedNet_01_04_20_0250.pt\"\r\n #model = torch.load(trained_model)\r\n #self.Grasp_net.load_state_dict(model)\r\n #self.Grasp_net.eval()\r\n\r\n\r\n obj_list=['Coords_try1.txt','Coords_CubeM.txt','Coords_try1.txt','Coords_CubeB.txt','Coords_CubeM.txt','Coords_CubeS.txt']\r\n self.random_poses=[[],[],[],[],[],[]]\r\n for i in range(len(obj_list)):\r\n random_poses_file=open(obj_list[i],\"r\")\r\n #temp=random_poses_file.read()\r\n lines_list = random_poses_file.readlines()\r\n temp = [[float(val) for val in line.split()] for line in lines_list[1:]]\r\n self.random_poses[i]=temp\r\n random_poses_file.close()\r\n self.instance=0#int(np.random.uniform(low=0,high=100))\r\n\r\n\r\n\r\n # Funtion to get 3D transformation matrix of the palm and get the wrist position and update both those varriables\r\n def _get_trans_mat_wrist_pose(self): #WHY MUST YOU HATE ME WHEN I GIVE YOU NOTHING BUT LOVE?\r\n self.wrist_pose=np.copy(self._sim.data.get_geom_xpos('palm')) \r\n Rfa=np.copy(self._sim.data.get_geom_xmat('palm'))\r\n temp=np.matmul(Rfa,np.array([[0,0,1],[-1,0,0],[0,-1,0]]))\r\n temp=np.transpose(temp)\r\n Tfa=np.zeros([4,4])\r\n Tfa[0:3,0:3]=temp\r\n Tfa[3,3]=1 \r\n Tfw=np.zeros([4,4])\r\n Tfw[0:3,0:3]=temp\r\n Tfw[3,3]=1\r\n self.wrist_pose=self.wrist_pose+np.matmul(np.transpose(Tfw[0:3,0:3]),[-0.009,0.048,0.0])\r\n Tfw[0:3,3]=np.matmul(-(Tfw[0:3,0:3]),np.transpose(self.wrist_pose))\r\n self.Tfw=Tfw \r\n self.Twf=np.linalg.inv(Tfw)\r\n\r\n def experimental_sensor(self,rangedata,finger_pose,gravity):\r\n #print('flimflam')\r\n #finger_joints = [\"f1_prox\", \"f2_prox\", \"f3_prox\", \"f1_dist\", \"f2_dist\", \"f3_dist\"]\r\n finger_pose=np.array(finger_pose)\r\n\r\n s1=finger_pose[0:3]-finger_pose[6:9]\r\n s2=finger_pose[0:3]-finger_pose[3:6]\r\n #print(finger_pose)\r\n front_area=np.linalg.norm(np.cross(s1,s2))/2\r\n #print('front area',front_area)\r\n top1=np.linalg.norm(np.cross(finger_pose[0:3],finger_pose[9:12]))/2\r\n top2=np.linalg.norm(np.cross(finger_pose[9:12],finger_pose[12:15]))/2\r\n top3=np.linalg.norm(np.cross(finger_pose[3:6],finger_pose[12:15]))/2\r\n top4=np.linalg.norm(np.cross(finger_pose[6:9],finger_pose[15:18]))/2\r\n top5=np.linalg.norm(np.cross(finger_pose[9:12],finger_pose[15:18]))/2\r\n total1=top1+top2+top3\r\n total2=top1+top4+top5\r\n top_area=max(total1,total2)\r\n #print('front',front_area,'top',top_area)\r\n\r\n sites=[\"palm\",\"palm_1\",\"palm_2\",\"palm_3\",\"palm_4\"]\r\n obj_pose=[]#np.zeros([5,3])\r\n xs=[]\r\n ys=[]\r\n zs=[]\r\n for i in range(len(sites)):\r\n temp=self._sim.data.get_site_xpos(sites[i])\r\n temp=np.append(temp,1)\r\n temp=np.matmul(self.Tfw,temp)\r\n temp=temp[0:3]\r\n if rangedata[i] < 0.06:\r\n temp[1]+=rangedata[i]\r\n obj_pose=np.append(obj_pose,temp)\r\n #obj_pose[i,:]=temp\r\n for i in range(int(len(obj_pose)/3)):\r\n xs=np.append(xs,obj_pose[i*3])\r\n ys=np.append(ys,obj_pose[i*3+1])\r\n zs=np.append(zs,obj_pose[i*3+2])\r\n if xs ==[]:\r\n sensor_pose=[0.2,0.2,0.2]\r\n else:\r\n sensor_pose=[np.average(xs),np.average(ys),np.average(zs)]\r\n obj_size=np.copy(self._get_obj_size())\r\n if np.argmax(np.abs(gravity))==2:\r\n front_part=np.abs(obj_size[0]*obj_size[2])/front_area\r\n top_part=np.abs(obj_size[0]*obj_size[1])/top_area\r\n elif np.argmax(np.abs(gravity))==1:\r\n front_part=np.abs(obj_size[0]*obj_size[2])/front_area\r\n top_part=np.abs(obj_size[1]*obj_size[2])/top_area\r\n else:\r\n front_part=np.abs(obj_size[0]*obj_size[1])/front_area\r\n top_part=np.abs(obj_size[0]*obj_size[2])/top_area\r\n\r\n return sensor_pose,front_part, top_part\r\n\r\n\r\n def get_sim_state(self): #this gives you the whole damn qpos\r\n return np.copy(self._sim.data.qpos)\r\n\r\n def set_sim_state(self,qpos,obj_state):#this just sets all the qpos of the simulation manually. Is it bad? Probably. Do I care at this point? Not really\r\n self._sim.data.set_joint_qpos(\"object\", [obj_state[0], obj_state[1], obj_state[2], 1.0, 0.0, 0.0, 0.0])\r\n for i in range(len(self._sim.data.qpos)):\r\n self._sim.data.qpos[i]=qpos[i]\r\n self._sim.forward()\r\n\r\n # Function to get the state of all the joints, including sliders\r\n def _get_joint_states(self):\r\n arr = []\r\n for i in range(len(self._sim.data.sensordata)-17):\r\n arr.append(self._sim.data.sensordata[i])\r\n arr[0]=-arr[0]\r\n arr[1]=-arr[1]\r\n return arr # it is a list\r\n\r\n\r\n def obs_test(self):\r\n obj_pose = self._get_obj_pose()\r\n obj_pose = np.copy(obj_pose)\r\n tests_passed=[]\r\n self._sim.data.site_xpos[0]=obj_pose\r\n self._sim.data.site_xpos[1]=obj_pose\r\n print(self._sim.data.qpos)\r\n print('object position', obj_pose)\r\n temp=True\r\n while temp:\r\n ans=input('do the red bars line up with the object center Y/N?')\r\n if ans.lower()=='n':\r\n print('Recording first test as failure')\r\n tests_passed.append(False)\r\n temp=False\r\n elif ans.lower()=='y':\r\n print('Recording first test as success')\r\n tests_passed.append(True)\r\n temp=False\r\n else:\r\n print('input not recognized, please input either Y or N. do the red bars line up with the object center Y/N?')\r\n print('Next test, finger positions')\r\n \r\n finger_joints = [\"f1_prox\", \"f2_prox\", \"f3_prox\", \"f1_dist\", \"f2_dist\", \"f3_dist\"]\r\n fingers_6D_pose = []\r\n for joint in finger_joints:\r\n trans = self._sim.data.get_geom_xpos(joint)\r\n trans = list(trans)\r\n for i in range(3):\r\n fingers_6D_pose.append(trans[i])\r\n for i in range(6):\r\n self._sim.data.site_xpos[0]=fingers_6D_pose[i*3:i*3+3]\r\n self._sim.data.site_xpos[1]=fingers_6D_pose[i*3:i*3+3]\r\n temp=True\r\n while temp:\r\n ans=input(f'do the red bars line up with the {finger_joints[i]} Y/N?')\r\n if ans.lower()=='n':\r\n print('Recording test as failure')\r\n tests_passed.append(False)\r\n temp=False\r\n elif ans.lower()=='y':\r\n print('Recording test as success')\r\n tests_passed.append(True)\r\n temp=False\r\n else:\r\n print(f'input not recognized, please input either Y or N. do the red bars line up with the {finger_joints[i]} Y/N?')\r\n print('Next test, wrist position')\r\n self._sim.data.site_xpos[0]=self.wrist_pose\r\n self._sim.data.site_xpos[1]=self.wrist_pose\r\n temp=True\r\n while temp:\r\n ans=input('do the red bars line up with the wrist position Y/N?')\r\n if ans.lower()=='n':\r\n print('Recording first test as failure')\r\n tests_passed.append(False)\r\n temp=False\r\n elif ans.lower()=='y':\r\n print('Recording first test as success')\r\n tests_passed.append(True)\r\n temp=False\r\n else:\r\n print('input not recognized, please input either Y or N. do the red bars line up with the wrist position Y/N?')\r\n passed=np.sum(tests_passed)\r\n failed=np.sum(np.invert(tests_passed))\r\n print('out of', np.shape(tests_passed), f'tests, {passed} tests passed and {failed} tests failed')\r\n print('tests passed')\r\n print('object pose:',tests_passed[0])\r\n print('wrist pose:',tests_passed[7])\r\n for i in range(6):\r\n print(finger_joints[i], 'pose:',tests_passed[i+1])\r\n\r\n \r\n \r\n \r\n # Function to return global or local transformation matrix\r\n def _get_obs(self, state_rep=None, test = False): #TODO: Add or subtract elements of this to match the discussions with Ravi and Cindy\r\n '''\r\n Local obs, all in local coordinates (from the center of the palm)\r\n (18,) Finger Pos 0-18\r\n (3,) Wrist Pos 18-21\r\n (3,) Obj Pos 21-24\r\n (9,) Joint States 24-33\r\n (3,) Obj Size 33-36\r\n (12,) Finger Object Distance 36-48\r\n (2,) X and Z angle 48-50\r\n (17,) Rangefinder data 50-67\r\n (3,) Gravity vector in local coordinates 67-70\r\n (3,) Object location based on rangefinder data 70-73\r\n (1,) Ratio of the area of the side of the shape to the open portion of the side of the hand 73\r\n (1,) Ratio of the area of the top of the shape to the open portion of the top of the hand 74\r\n (6, ) Finger dot product 75) \"f1_prox\", 76) \"f2_prox\", 77) \"f3_prox\", 78) \"f1_dist\", 79) \"f2_dist\", 80) \"f3_dist\" 75-80\r\n (1, ) Dot product (wrist) 81\r\n '''\r\n '''\r\n Global obs, all in global coordinates (from simulator 0,0,0)\r\n (18,) Finger Pos 0-18\r\n (3,) Wrist Pos 18-21\r\n (3,) Obj Pos 21-24\r\n (9,) Joint States 24-33\r\n (3,) Obj Size 33-36\r\n (12,) Finger Object Distance 36-48\r\n (2,) X and Z angle 48-50\r\n (17,) Rangefinder data 50-67\r\n '''\r\n \r\n if state_rep == None:\r\n state_rep = self.state_rep\r\n # states rep\r\n obj_pose = self._get_obj_pose()\r\n obj_pose = np.copy(obj_pose)\r\n self._get_trans_mat_wrist_pose()\r\n x_angle,z_angle = self._get_angles()\r\n joint_states = self._get_joint_states()\r\n obj_size = self._get_obj_size()\r\n finger_obj_dist = self._get_finger_obj_dist()\r\n range_data=self._get_rangefinder_data()\r\n finger_joints = [\"f1_prox\", \"f2_prox\", \"f3_prox\", \"f1_dist\", \"f2_dist\", \"f3_dist\"]\r\n gravity=[0,0,-1]\r\n dot_prod=self._get_dot_product()\r\n fingers_6D_pose = []\r\n if test:\r\n x=threading.Thread(target=self.obs_test)\r\n x.start()\r\n while x.is_alive():\r\n self.render()\r\n print('')\r\n if state_rep == \"global\":#NOTE: only use local coordinates! global coordinates suck\r\n finger_dot_prod=[]\r\n for joint in finger_joints:\r\n trans = self._sim.data.get_geom_xpos(joint)\r\n trans = list(trans)\r\n for i in range(3):\r\n fingers_6D_pose.append(trans[i])\r\n finger_dot_prod=self._get_fingers_dot_product(fingers_6D_pose)\r\n\r\n fingers_6D_pose = fingers_6D_pose + list(self.wrist_pose) + list(obj_pose) + joint_states + [obj_size[0], obj_size[1], obj_size[2]*2] + finger_obj_dist + [x_angle, z_angle] + range_data +finger_dot_prod+ [dot_prod]#+ [self.obj_shape]\r\n\r\n elif state_rep == \"local\":\r\n finger_dot_prod=[]\r\n for joint in finger_joints:\r\n trans = np.copy(self._sim.data.get_geom_xpos(joint))\r\n dot_prod_coords=list(trans)\r\n trans_for_roation=np.append(trans,1)\r\n trans_for_roation=np.matmul(self.Tfw,trans_for_roation)\r\n trans = trans_for_roation[0:3]\r\n trans = list(trans)\r\n temp_dot_prod=self._get_dot_product(dot_prod_coords)\r\n finger_dot_prod.append(temp_dot_prod)\r\n for i in range(3):\r\n fingers_6D_pose.append(trans[i])\r\n wrist_for_rotation=np.append(self.wrist_pose,1)\r\n wrist_for_rotation=np.matmul(self.Tfw,wrist_for_rotation)\r\n \r\n wrist_pose = wrist_for_rotation[0:3]\r\n obj_for_roation=np.append(obj_pose,1)\r\n obj_for_roation=np.matmul(self.Tfw,obj_for_roation)\r\n obj_pose = obj_for_roation[0:3]\r\n gravity=np.matmul(self.Tfw[0:3,0:3],gravity)\r\n sensor_pos,front_thing,top_thing=self.experimental_sensor(range_data,fingers_6D_pose,gravity)\r\n\r\n fingers_6D_pose = fingers_6D_pose + list(wrist_pose) + list(obj_pose) + joint_states + [obj_size[0], obj_size[1], obj_size[2]*2] + finger_obj_dist + [x_angle, z_angle] + range_data + [gravity[0],gravity[1],gravity[2]] + [sensor_pos[0],sensor_pos[1],sensor_pos[2]] + [front_thing, top_thing] + finger_dot_prod + [dot_prod]#+ [self.obj_shape]\r\n\r\n if self.pid:\r\n fingers_6D_pose = fingers_6D_pose+ [self._get_dot_product()]\r\n elif state_rep == \"joint_states\":\r\n fingers_6D_pose = joint_states + list(obj_pose) + [obj_size[0], obj_size[1], obj_size[2]*2] + [x_angle, z_angle] #+ fingers_dot_prod\r\n return fingers_6D_pose\r\n\r\n # Function to get the distance between the digits on the fingers and the object center\r\n # NOTE! This only takes into account the x and y differences. We might want to consider taking z into account as well for other orientations\r\n def _get_finger_obj_dist(self): \r\n finger_joints = [\"f1_prox\", \"f1_prox_1\", \"f2_prox\", \"f2_prox_1\", \"f3_prox\", \"f3_prox_1\",\"f1_dist\", \"f1_dist_1\", \"f2_dist\", \"f2_dist_1\", \"f3_dist\", \"f3_dist_1\"]\r\n\r\n obj = self._get_obj_pose()\r\n dists = []\r\n for i in finger_joints:\r\n pos = self._sim.data.get_site_xpos(i)\r\n dist = np.absolute(pos[0:3] - obj[0:3])\r\n temp = np.linalg.norm(dist)\r\n dists.append(temp)\r\n return dists\r\n\r\n # get range data from 1 step of time\r\n # Uncertainty: rangefinder could only detect distance to the nearest geom, therefore it could detect geom that is not object\r\n def _get_rangefinder_data(self):\r\n range_data = []\r\n for i in range(17):\r\n if self._sim.data.sensordata[i+len(self._sim.data.sensordata)-17]==-1:\r\n a=6\r\n else:\r\n a=self._sim.data.sensordata[i+len(self._sim.data.sensordata)-17]\r\n range_data.append(a)\r\n\r\n return range_data\r\n\r\n # Function to return the object position in world coordinates\r\n def _get_obj_pose(self):\r\n arr = self._sim.data.get_geom_xpos(\"object\")\r\n return arr\r\n\r\n # Function to return the angles between the palm normal and the object location\r\n def _get_angles(self):\r\n #t=time.time()\r\n obj_pose = self._get_obj_pose()\r\n self._get_trans_mat_wrist_pose()\r\n local_obj_pos=np.copy(obj_pose)\r\n local_obj_pos=np.append(local_obj_pos,1)\r\n local_obj_pos=np.matmul(self.Tfw,local_obj_pos)\r\n obj_wrist = local_obj_pos[0:3]/np.linalg.norm(local_obj_pos[0:3])\r\n center_line = np.array([0,1,0])\r\n z_dot = np.dot(obj_wrist[0:2],center_line[0:2])\r\n z_angle = np.arccos(z_dot/np.linalg.norm(obj_wrist[0:2]))\r\n x_dot = np.dot(obj_wrist[1:3],center_line[1:3])\r\n x_angle = np.arccos(x_dot/np.linalg.norm(obj_wrist[1:3]))\r\n return x_angle,z_angle\r\n \r\n def _get_fingers_dot_product(self, fingers_6D_pose):\r\n fingers_dot_product = []\r\n for i in range(6):\r\n fingers_dot_product.append(self._get_dot_product(fingers_6D_pose[3*i:3*i+3]))\r\n return fingers_dot_product\r\n \r\n #function to get the dot product. Only used for the pid controller\r\n def _get_dot_product(self,obj_state=None):\r\n if obj_state==None:\r\n obj_state=self._get_obj_pose()\r\n hand_pose = self._sim.data.get_body_xpos(\"j2s7s300_link_7\")\r\n obj_state_x = abs(obj_state[0] - hand_pose[0])\r\n obj_state_y = abs(obj_state[1] - hand_pose[1])\r\n obj_vec = np.array([obj_state_x, obj_state_y])\r\n obj_vec_norm = np.linalg.norm(obj_vec)\r\n obj_unit_vec = obj_vec / obj_vec_norm\r\n\r\n center_x = abs(0.0 - hand_pose[0])\r\n center_y = abs(0.0 - hand_pose[1])\r\n center_vec = np.array([center_x, center_y])\r\n center_vec_norm = np.linalg.norm(center_vec)\r\n center_unit_vec = center_vec / center_vec_norm\r\n\r\n dot_prod = np.dot(obj_unit_vec, center_unit_vec)\r\n return dot_prod**20 # cuspy to get distinct reward\r\n\r\n\r\n # Function to get rewards based only on the lift reward. This is primarily used to generate data for the grasp classifier\r\n def _get_reward_DataCollection(self):\r\n obj_target = 0.2\r\n obs = self._get_obs(state_rep=\"global\")\r\n # TODO: change obs[23] and obs[5] to the simulator height object\r\n if abs(obs[23] - obj_target) < 0.005 or (obs[23] >= obj_target): #Check to make sure that obs[23] is still the object height. Also local coordinates are a thing\r\n lift_reward = 1\r\n done = True\r\n elif obs[20]>obj_target+0.2:\r\n lift_reward=0.0\r\n done=True\r\n else:\r\n lift_reward = 0\r\n done = False\r\n\r\n info = {\"lift_reward\":lift_reward}\r\n return lift_reward, info, done\r\n\r\n # Function to get rewards for RL training\r\n def _get_reward(self, test = False): # TODO: change obs[23] and obs[5] to the simulator height object and stop using _get_obs\r\n #TODO: Make sure this works with the new grasp classifier\r\n\r\n # object height target\r\n obj_target = 0.2\r\n\r\n # Grasp reward\r\n grasp_reward = 0.0\r\n obs = self._get_obs(state_rep=\"global\")\r\n #loc_obs=self._get_obs()\r\n\r\n # network_inputs=obs[0:5]\r\n # network_inputs=np.append(network_inputs,obs[6:23])\r\n # network_inputs=np.append(network_inputs,obs[24:])\r\n # inputs = torch.FloatTensor(np.array(network_inputs)).to(device)\r\n\r\n # WITHOUT GRASP CLASSIFIER\r\n #if np.max(np.array(obs[41:47])) < 0.035 or np.max(np.array(obs[35:41])) < 0.015:\r\n # outputs = self.Grasp_net(inputs).cpu().data.numpy().flatten()\r\n # if (outputs >=0.3) & (not self.Grasp_Reward):\r\n # grasp_reward = 5.0\r\n # self.Grasp_Reward=True\r\n # else:\r\n # grasp_reward = 0.0\r\n\r\n if not test:\r\n if abs(obs[23] - obj_target) < 0.005 or (obs[23] >= obj_target):\r\n lift_reward = 50.0\r\n done = True\r\n else:\r\n lift_reward = 0.0\r\n done = False\r\n\r\n finger_reward = -np.sum((np.array(obs[41:47])) + (np.array(obs[35:41])))\r\n\r\n else:\r\n lift_reward = 0.0\r\n done = False\r\n finger_reward = -12.0\r\n\r\n #finger_reward = -np.sum((np.array(obs[41:47])) + (np.array(obs[35:41])))\r\n\r\n reward = 0.2*finger_reward + lift_reward + grasp_reward\r\n\r\n info = {\"lift_reward\":lift_reward}\r\n\r\n if test:\r\n if reward == -2.4000000000000004:\r\n print(\"Reward function Working Properly\")\r\n else:\r\n print(\"Reward function is not Working Properly\")\r\n\r\n return reward, info, done\r\n\r\n # only set proximal joints, cuz this is an underactuated hand\r\n #we have a problem here (a binomial in the denomiator)\r\n #ill use the quotient rule\r\n def _set_state(self, states):\r\n #print('sensor data',self._sim.data.sensordata[0:9])\r\n #print('qpos',self._sim.data.qpos[0:9])\r\n #print('states',states)\r\n self._sim.data.qpos[0] = states[0]\r\n self._sim.data.qpos[1] = states[1]\r\n self._sim.data.qpos[2] = states[2]\r\n self._sim.data.qpos[3] = states[3]\r\n self._sim.data.qpos[5] = states[4]\r\n self._sim.data.qpos[7] = states[5]\r\n self._sim.data.set_joint_qpos(\"object\", [states[6], states[7], states[8], 1.0, 0.0, 0.0, 0.0])\r\n self._sim.forward()\r\n\r\n # Function to get the dimensions of the object\r\n def _get_obj_size(self):\r\n #TODO: fix this shit\r\n num_of_geoms=np.shape(self._sim.model.geom_size)\r\n final_size=[0,0,0]\r\n #print(self._sim.model.geom_size)\r\n #print(num_of_geoms[0]-8)\r\n for i in range(num_of_geoms[0]-8):\r\n size=np.copy(self._sim.model.geom_size[-1-i])\r\n diffs=[0,0,0]\r\n if size[2]==0:\r\n size[2]=size[1]\r\n size[1]=size[0]\r\n diffs[0]=abs(size[0]-size[1])\r\n diffs[1]=abs(size[1]-size[2])\r\n diffs[2]=abs(size[0]-size[2])\r\n if ('lemon' in self.filename)|(np.argmin(diffs)!=0):\r\n temp=size[0]\r\n size[0]=size[2]\r\n size[2]=temp\r\n\r\n if 'Bowl' in self.filename:\r\n if 'Rect' in self.filename:\r\n final_size[0]=0.17\r\n final_size[1]=0.17\r\n final_size[2]=0.075\r\n else:\r\n final_size[0]=0.175\r\n final_size[1]=0.175\r\n final_size[2]=0.07\r\n if self.obj_size=='m':\r\n for j in range(3):\r\n final_size[j]=final_size[j]*0.85\r\n elif self.obj_size=='s':\r\n for j in range(3):\r\n final_size[j]=final_size[j]*0.7\r\n else:\r\n final_size[0]=max(size[0],final_size[0])\r\n final_size[1]=max(size[1],final_size[1])\r\n final_size[2]+=size[2]\r\n #print(final_size)\r\n return final_size\r\n\r\n def set_obj_coords(self,x,y,z):\r\n self.obj_coords[0] = x\r\n self.obj_coords[1] = y\r\n self.obj_coords[2] = z\r\n\r\n def get_obj_coords(self):\r\n return self.obj_coords\r\n\r\n # Function to run all the experiments for RL training\r\n def experiment(self, shape_keys): #TODO: Talk to people thursday about adding the hourglass and bottles to this dataset.\r\n all_objects = {}\r\n # Cube\r\n all_objects[\"CubeS\"] = \"/kinova_description/j2s7s300_end_effector_v1_CubeS.xml\"\r\n all_objects[\"CubeM\"] = \"/kinova_description/j2s7s300_end_effector_v1_CubeM.xml\"\r\n all_objects[\"CubeB\"] = \"/kinova_description/j2s7s300_end_effector_v1_CubeB.xml\"\r\n # Cylinder\r\n all_objects[\"CylinderS\"] = \"/kinova_description/j2s7s300_end_effector_v1_CylinderS.xml\"\r\n all_objects[\"CylinderM\"] = \"/kinova_description/j2s7s300_end_effector_v1_CylinderM.xml\"\r\n all_objects[\"CylinderB\"] = \"/kinova_description/j2s7s300_end_effector_v1_CylinderB.xml\"\r\n # Cube rotated by 45 degrees\r\n all_objects[\"Cube45S\"] = \"/kinova_description/j2s7s300_end_effector_v1_Cube45S.xml\"\r\n all_objects[\"Cube45M\"] = \"/kinova_description/j2s7s300_end_effector_v1_Cube45M.xml\"\r\n all_objects[\"Cube45B\"] = \"/kinova_description/j2s7s300_end_effector_v1_Cube45B.xml\"\r\n # Vase 1\r\n all_objects[\"Vase1S\"] = \"/kinova_description/j2s7s300_end_effector_v1_Vase1S.xml\"\r\n all_objects[\"Vase1M\"] = \"/kinova_description/j2s7s300_end_effector_v1_Vase1M.xml\"\r\n all_objects[\"Vase1B\"] = \"/kinova_description/j2s7s300_end_effector_v1_Vase1B.xml\"\r\n # Vase 2\r\n all_objects[\"Vase2S\"] = \"/kinova_description/j2s7s300_end_effector_v1_Vase2S.xml\"\r\n all_objects[\"Vase2M\"] = \"/kinova_description/j2s7s300_end_effector_v1_Vase2M.xml\"\r\n all_objects[\"Vase2B\"] = \"/kinova_description/j2s7s300_end_effector_v1_Vase2B.xml\"\r\n # Cone 1\r\n all_objects[\"Cone1S\"] = \"/kinova_description/j2s7s300_end_effector_v1_Cone1S.xml\"\r\n all_objects[\"Cone1M\"] = \"/kinova_description/j2s7s300_end_effector_v1_Cone1M.xml\"\r\n all_objects[\"Cone1B\"] = \"/kinova_description/j2s7s300_end_effector_v1_Cone1B.xml\"\r\n # Cone 2\r\n all_objects[\"Cone2S\"] = \"/kinova_description/j2s7s300_end_effector_v1_Cone2S.xml\"\r\n all_objects[\"Cone2M\"] = \"/kinova_description/j2s7s300_end_effector_v1_Cone2M.xml\"\r\n all_objects[\"Cone2B\"] = \"/kinova_description/j2s7s300_end_effector_v1_Cone2B.xml\"\r\n\r\n ## Nigel's Shapes ##\r\n # Hourglass\r\n all_objects[\"HourB\"] = \"/kinova_description/j2s7s300_end_effector_v1_bhg.xml\"\r\n all_objects[\"HourM\"] = \"/kinova_description/j2s7s300_end_effector_v1_mhg.xml\"\r\n all_objects[\"HourS\"] = \"/kinova_description/j2s7s300_end_effector_v1_shg.xml\"\r\n # Vase\r\n all_objects[\"VaseB\"] = \"/kinova_description/j2s7s300_end_effector_v1_bvase.xml\"\r\n all_objects[\"VaseM\"] = \"/kinova_description/j2s7s300_end_effector_v1_mvase.xml\"\r\n all_objects[\"VaseS\"] = \"/kinova_description/j2s7s300_end_effector_v1_svase.xml\"\r\n # Bottle\r\n all_objects[\"BottleB\"] = \"/kinova_description/j2s7s300_end_effector_v1_bbottle.xml\"\r\n all_objects[\"BottleM\"] = \"/kinova_description/j2s7s300_end_effector_v1_mbottle.xml\"\r\n all_objects[\"BottleS\"] = \"/kinova_description/j2s7s300_end_effector_v1_sbottle.xml\"\r\n # Bowl\r\n all_objects[\"BowlB\"] = \"/kinova_description/j2s7s300_end_effector_v1_bRoundBowl.xml\"\r\n all_objects[\"BowlM\"] = \"/kinova_description/j2s7s300_end_effector_v1_mRoundBowl.xml\"\r\n all_objects[\"BowlS\"] = \"/kinova_description/j2s7s300_end_effector_v1_sRoundBowl.xml\"\r\n # Lemon\r\n all_objects[\"LemonB\"] = \"/kinova_description/j2s7s300_end_effector_v1_blemon.xml\"\r\n all_objects[\"LemonM\"] = \"/kinova_description/j2s7s300_end_effector_v1_mlemon.xml\"\r\n all_objects[\"LemonS\"] = \"/kinova_description/j2s7s300_end_effector_v1_slemon.xml\"\r\n # TBottle\r\n all_objects[\"TBottleB\"] = \"/kinova_description/j2s7s300_end_effector_v1_btbottle.xml\"\r\n all_objects[\"TBottleM\"] = \"/kinova_description/j2s7s300_end_effector_v1_mtbottle.xml\"\r\n all_objects[\"TBottleS\"] = \"/kinova_description/j2s7s300_end_effector_v1_stbottle.xml\"\r\n # RBowl\r\n all_objects[\"RBowlB\"] = \"/kinova_description/j2s7s300_end_effector_v1_bRectBowl.xml\"\r\n all_objects[\"RBowlM\"] = \"/kinova_description/j2s7s300_end_effector_v1_mRectBowl.xml\"\r\n all_objects[\"RBowlS\"] = \"/kinova_description/j2s7s300_end_effector_v1_sRectBowl.xml\"\r\n\r\n for key in shape_keys:\r\n self.objects[key] = all_objects[key]\r\n\r\n if len(shape_keys) == 0:\r\n print(\"No shape keys\")\r\n raise ValueError\r\n elif len(shape_keys) != len(self.objects):\r\n print(\"Invlaid shape key requested\")\r\n raise ValueError\r\n return self.objects\r\n\r\n #Function to randomize the position of the object for grasp classifier data collection\r\n def randomize_initial_pos_data_collection(self,orientation=\"side\"):\r\n print('ya done messed up A-A-ron')\r\n size=self._get_obj_size()\r\n #The old way to generate random poses\r\n if orientation=='side':\r\n '''\r\n temp=self.random_poses[obj][self.instance]\r\n rand_x=temp[0]\r\n rand_y=temp[1]\r\n z=temp[2]\r\n self.instance+=1\r\n '''\r\n rand_x=triang.rvs(0.5)\r\n rand_x=(rand_x-0.5)*(0.16-2*size[0])\r\n rand_y=np.random.uniform()\r\n if rand_x>=0:\r\n rand_y=rand_y*(-(0.07-size[0]*np.sqrt(2))/(0.08-size[0])*rand_x-(-0.03-size[0]))\r\n else:\r\n rand_y=rand_y*((0.07-size[0]*np.sqrt(2))/(0.08-size[0])*rand_x-(-0.03-size[0]))\r\n elif orientation=='rotated':\r\n rand_x=0\r\n rand_y=0\r\n else:\r\n theta=np.random.uniform(low=0,high=2*np.pi)\r\n r=np.random.uniform(low=0,high=size[0]/2)\r\n rand_x=np.sin(theta)*r\r\n rand_y=np.cos(theta)*r\r\n z = size[-1]/2\r\n return rand_x, rand_y, z \r\n\r\n def write_xml(self,new_rotation): #This function takes in a rotation vector [roll, pitch, yaw] and sets the hand rotation in the\r\n #self.file_dir and self.filename to that rotation. It then sets up the simulator with the object \r\n #incredibly far from the hand to prevent collisions and recalculates the rotation matrices of the hand\r\n xml_file=open(self.file_dir+self.filename,\"r\")\r\n xml_contents=xml_file.read()\r\n xml_file.close()\r\n starting_point=xml_contents.find(']+\",xml_contents[euler_point:])\r\n c_start=contents.start()\r\n c_end=contents.end()\r\n starting_point=xml_contents.find('joint name=\"j2s7s300_joint_7\" type')\r\n axis_point=xml_contents.find('axis=',starting_point)\r\n contents=re.search(\"[^\\s]+\\s[^\\s]+\\s[^>]+\",xml_contents[axis_point:])\r\n starting_point=xml_contents.find('site name=\"local_origin_site\" type=\"cylinder\" size=\"0.0075 0.005\" rgba=\"25 0.5 0.0 1\"')\r\n site_point=xml_contents.find('pos=',starting_point)\r\n contents=re.search(\"[^\\s]+\\s[^\\s]+\\s[^>]+\",xml_contents[starting_point:])\r\n wrist_pose=self.wrist_pose\r\n new_thing= str(wrist_pose[0]) + \" \" + str(wrist_pose[1]) + \" \" + str(wrist_pose[2])\r\n p1=str(new_rotation[0])\r\n p2=str(new_rotation[1])\r\n p3=str(new_rotation[2])\r\n xml_contents=xml_contents[:euler_point+c_start+7] + p1[0:min(5,len(p1))]+ \" \"+p2[0:min(5,len(p2))] +\" \"+ p3[0:min(5,len(p3))] \\\r\n + xml_contents[euler_point+c_end-1:]# + new_thing + xml_contents[site_point+c2_end:]\r\n xml_file=open(self.file_dir+self.filename,\"w\")\r\n xml_file.write(xml_contents)\r\n xml_file.close()\r\n self._model = load_model_from_path(self.file_dir + self.filename)\r\n self._sim = MjSim(self._model) \r\n self._set_state(np.array([0, 0, 0, 0, 0, 0, 10, 10, 10]))\r\n self._get_trans_mat_wrist_pose()\r\n \r\n # Steph Added\r\n def check_obj_file_empty(self,filename):\r\n if os.path.exists(filename) == False:\r\n return False\r\n with open(filename, 'r') as read_obj:\r\n # read first character\r\n one_char = read_obj.read(1)\r\n # if not fetched then file is empty\r\n if not one_char:\r\n return True\r\n return False\r\n\r\n def Generate_Latin_Square(self,max_elements,filename,shape_keys, test = False):\r\n print(\"GENERATE LATIN SQUARE\")\r\n #print(\"shape keys: \",shape_keys)\r\n ### Choose an experiment ###\r\n self.objects = self.experiment(shape_keys)\r\n\r\n # TEMPORARY - Only uncomment for quicker testing\r\n # max_elements = 1000\r\n\r\n # n is the number of object types (sbox, bbox, bcyl, etc.)\r\n num_elements = 0\r\n elem_gen_done = 0\r\n printed_row = 0\r\n\r\n while num_elements < max_elements:\r\n n = len(self.objects.keys())-1\r\n #print(\"This is n: \",n)\r\n k = n\r\n # Loop to prrows\r\n for i in range(0, n+1, 1):\r\n # This loops runs only after first iteration of outer loop\r\n # Prints nummbers from n to k\r\n keys = list(self.objects.keys())\r\n temp = k\r\n\r\n while (temp <= n) :\r\n if printed_row <= n: # Just used to print out one row instead of all of them\r\n printed_row += 1\r\n\r\n key_name = str(keys[temp])\r\n self.obj_keys.append(key_name)\r\n temp += 1\r\n num_elements +=1\r\n if num_elements == max_elements:\r\n elem_gen_done = 1\r\n break\r\n if elem_gen_done:\r\n break\r\n\r\n # This loop prints numbers from 1 to k-1.\r\n for j in range(0, k):\r\n key_name = str(keys[j])\r\n self.obj_keys.append(key_name)\r\n num_elements +=1\r\n if num_elements == max_elements:\r\n elem_gen_done = 1\r\n break\r\n if elem_gen_done:\r\n break\r\n k -= 1\r\n\r\n ########## Function Testing Code########\r\n if test:\r\n test_key = self.obj_keys\r\n if len(test_key) == max_elements:\r\n test_key.sort()\r\n num_elem_test = 1\r\n for i in range(len(test_key)-2):\r\n if test_key[i] != test_key[i+1]:\r\n num_elem_test += 1\r\n\r\n if num_elem_test == len(shape_keys):\r\n print(\"Latin Square function is Generating Perfect Distribution\")\r\n else:\r\n print(\"Latin Square function is not Generating Perfect Distribution\")\r\n ########## Ends Here ###############\r\n\r\n with open(filename, \"w\", newline=\"\") as outfile:\r\n writer = csv.writer(outfile)\r\n for key in self.obj_keys:\r\n writer.writerow(key)\r\n\r\n def objects_file_to_list(self,filename, num_objects,shape_keys):\r\n print(\"FILENAME: \",filename)\r\n\r\n my_file = Path(filename)\r\n if my_file.is_file() is True:\r\n if os.stat(filename).st_size == 0:\r\n print(\"Object file is empty!\")\r\n self.Generate_Latin_Square(num_objects,filename,shape_keys)\r\n else:\r\n self.Generate_Latin_Square(num_objects, filename, shape_keys)\r\n\r\n with open(filename, newline='') as csvfile:\r\n reader = csv.reader(csvfile)\r\n for row in reader:\r\n row = ''.join(row)\r\n self.obj_keys.append(row)\r\n #print('LAST OBJECT KEYS',self.obj_keys)\r\n def get_obj_keys(self):\r\n return self.obj_keys\r\n\r\n def get_object(self,filename):\r\n # Get random shape\r\n random_shape = self.obj_keys.pop()\r\n\r\n # remove current object file contents\r\n f = open(filename, \"w\")\r\n f.truncate()\r\n f.close()\r\n\r\n # write new object keys to file so new env will have updated list\r\n with open(filename, \"w\", newline=\"\") as outfile:\r\n writer = csv.writer(outfile)\r\n for key in self.obj_keys:\r\n writer.writerow(key)\r\n\r\n # Load model\r\n self._model = load_model_from_path(self.file_dir + self.objects[random_shape])\r\n self._sim = MjSim(self._model)\r\n\r\n print(\"random_shape: \",random_shape)\r\n\r\n return random_shape, self.objects[random_shape]\r\n\r\n # Get the initial object position\r\n def sample_initial_valid_object_pos(self,shapeName,coords_filename):\r\n data = []\r\n with open(coords_filename) as csvfile:\r\n checker=csvfile.readline()\r\n if ',' in checker:\r\n delim=','\r\n else:\r\n delim=' '\r\n reader = csv.reader(csvfile, delimiter=delim)\r\n for i in reader:\r\n data.append([float(i[0]), float(i[1]), float(i[2])])\r\n rand_coord = random.choice(data)\r\n x = rand_coord[0]\r\n y = rand_coord[1]\r\n z = rand_coord[2]\r\n\r\n return x, y, z\r\n\r\n def obj_shape_generator(self,obj_params):\r\n if obj_params[0] == \"Cube\":\r\n if obj_params[1] == \"B\":\r\n obj=0\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_bbox.xml\"),'b',\"/kinova_description/j2s7s300_end_effector_v1_bbox.xml\"\r\n elif obj_params[1] == \"M\":\r\n obj=1\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_mbox.xml\"),'m',\"/kinova_description/j2s7s300_end_effector_v1_mbox.xml\"\r\n elif obj_params[1] == \"S\":\r\n obj=2\r\n self._model,self.obj_size,self.filename = load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1.xml\"),'s',\"/kinova_description/j2s7s300_end_effector_v1.xml\"\r\n elif obj_params[0] == \"Cylinder\":\r\n if obj_params[1] == \"B\":\r\n obj=3\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_bcyl.xml\"),'b',\"/kinova_description/j2s7s300_end_effector_v1_bcyl.xml\" \r\n elif obj_params[1] == \"M\":\r\n obj=4\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_mcyl.xml\"),'m',\"/kinova_description/j2s7s300_end_effector_v1_mcyl.xml\"\r\n elif obj_params[1] == \"S\":\r\n obj=5\r\n self._model,self.obj_size,self.filename = load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_scyl.xml\"),'s',\"/kinova_description/j2s7s300_end_effector_v1_scyl.xml\"\r\n elif obj_params[0] == \"Hour\":\r\n if obj_params[1] == \"B\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_bhg.xml\"), 'b',\"/kinova_description/j2s7s300_end_effector_v1_bhg.xml\"\r\n elif obj_params[1] == \"M\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_mhg.xml\"), 'm',\"/kinova_description/j2s7s300_end_effector_v1_mhg.xml\"\r\n elif obj_params[1] == \"S\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_shg.xml\"), 's',\"/kinova_description/j2s7s300_end_effector_v1_shg.xml\"\r\n if obj_params[0] == \"Vase\":\r\n if obj_params[1] == \"B\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_bvase.xml\"), 'b',\"/kinova_description/j2s7s300_end_effector_v1_bvase.xml\"\r\n elif obj_params[1] == \"M\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_mvase.xml\"), 'm',\"/kinova_description/j2s7s300_end_effector_v1_mvase.xml\"\r\n elif obj_params[1] == \"S\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_svase.xml\"), 's',\"/kinova_description/j2s7s300_end_effector_v1_svase.xml\"\r\n elif obj_params[0] == \"Bottle\":\r\n if obj_params[1] == \"B\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_bbottle.xml\"), 'b',\"/kinova_description/j2s7s300_end_effector_v1_bbottle.xml\"\r\n elif obj_params[1] == \"M\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_mbottle.xml\"), 'm',\"/kinova_description/j2s7s300_end_effector_v1_mbottle.xml\"\r\n elif obj_params[1] == \"S\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_sbottle.xml\"), 's',\"/kinova_description/j2s7s300_end_effector_v1_sbottle.xml\"\r\n elif obj_params[0] == \"Bowl\":\r\n if obj_params[1] == \"B\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_bRoundBowl.xml\"), 'b',\"/kinova_description/j2s7s300_end_effector_v1_bRoundBowl.xml\"\r\n elif obj_params[1] == \"M\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_mRoundBowl.xml\"), 'm',\"/kinova_description/j2s7s300_end_effector_v1_mRoundBowl.xml\"\r\n elif obj_params[1] == \"S\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_sRoundBowl.xml\"), 's',\"/kinova_description/j2s7s300_end_effector_v1_sRoundBowl.xml\"\r\n if obj_params[0] == \"Lemon\":\r\n if obj_params[1] == \"B\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_blemon.xml\"), 'b',\"/kinova_description/j2s7s300_end_effector_v1_blemon.xml\"\r\n elif obj_params[1] == \"M\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_mlemon.xml\"), 'm',\"/kinova_description/j2s7s300_end_effector_v1_mlemon.xml\"\r\n elif obj_params[1] == \"S\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_slemon.xml\"), 's',\"/kinova_description/j2s7s300_end_effector_v1_slemon.xml\"\r\n elif obj_params[0] == \"TBottle\":\r\n if obj_params[1] == \"B\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_btbottle.xml\"), 'b',\"/kinova_description/j2s7s300_end_effector_v1_btbottle.xml\"\r\n elif obj_params[1] == \"M\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_mtbottle.xml\"), 'm',\"/kinova_description/j2s7s300_end_effector_v1_mtbottle.xml\"\r\n elif obj_params[1] == \"S\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_stbottle.xml\"), 's',\"/kinova_description/j2s7s300_end_effector_v1_stbottle.xml\"\r\n elif obj_params[0] == \"RBowl\":\r\n if obj_params[1] == \"B\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_bRectBowl.xml\"), 'b',\"/kinova_description/j2s7s300_end_effector_v1_bRectBowl.xml\"\r\n elif obj_params[1] == \"M\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_mRectBowl.xml\"), 'm',\"/kinova_description/j2s7s300_end_effector_v1_mRectBowl.xml\"\r\n elif obj_params[1] == \"S\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_sRectBowl.xml\"), 's',\"/kinova_description/j2s7s300_end_effector_v1_sRectBowl.xml\"\r\n elif obj_params[0] == \"Cone1\":\r\n if obj_params[1] == \"B\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_bcone1.xml\"),'b',\"/kinova_description/j2s7s300_end_effector_v1_bcone1.xml\"\r\n elif obj_params[1] == \"M\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_mcone1.xml\"),'m',\"/kinova_description/j2s7s300_end_effector_v1_mcone1.xml\"\r\n elif obj_params[1] == \"S\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_scone1.xml\"),'s',\"/kinova_description/j2s7s300_end_effector_v1_scone1.xml\"\r\n elif obj_params[0] == \"Cone2\":\r\n if obj_params[1] == \"B\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_bcone2.xml\"),'b',\"/kinova_description/j2s7s300_end_effector_v1_bcone2.xml\"\r\n elif obj_params[1] == \"M\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_mcone2.xml\"),'m',\"/kinova_description/j2s7s300_end_effector_v1_mcone2.xml\"\r\n elif obj_params[1] == \"S\":\r\n self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + \"/kinova_description/j2s7s300_end_effector_v1_scone2.xml\"),'s',\"/kinova_description/j2s7s300_end_effector_v1_scone2.xml\"\r\n elif obj_params[0]=='display':\r\n self._model,self.obj_size,self.filename = load_model_from_path(self.file_dir + \"/kinova_description/DisplayStuff.xml\"),'s',\"/kinova_description/DisplayStuff.xml\"\r\n return obj_params[0]+obj_params[1]\r\n\r\n def reset(self,env_name=\"env\",shape_keys=[\"CubeS\"],hand_orientation=\"normal\",mode=\"train\",start_pos=None,obj_params=None,coords='global',qpos=None, test = False):\r\n # All possible shape keys - default shape keys will be used for expert data generation\r\n # shape_keys=[\"CubeS\",\"CubeB\",\"CylinderS\",\"CylinderB\",\"Cube45S\",\"Cube45B\",\"Cone1S\",\"Cone1B\",\"Cone2S\",\"Cone2B\",\"Vase1S\",\"Vase1B\",\"Vase2S\",\"Vase2B\"]\r\n\r\n # x, y = self.randomize_initial_pose(False, \"s\") # for RL training\r\n #x, y = self.randomize_initial_pose(True) # for data collection\r\n\r\n # Steph new code\r\n obj_list_filename = \"\"\r\n num_objects = 200\r\n if env_name == \"env\":\r\n obj_list_filename = \"objects.csv\"\r\n num_objects = 20000\r\n else:\r\n obj_list_filename = \"eval_objects.csv\"\r\n num_objects = 200\r\n\r\n if len(self.objects) == 0:\r\n self.objects = self.experiment(shape_keys)\r\n if len(self.obj_keys) == 0:\r\n print(\"Objects file to list Reset call\")\r\n self.objects_file_to_list(obj_list_filename,num_objects,shape_keys)\r\n \r\n if obj_params==None:\r\n random_shape, self.filename = self.get_object(obj_list_filename)\r\n else:\r\n random_shape = self.obj_shape_generator(obj_params)\r\n\r\n shapes=list(self.objects.keys())\r\n #print(shapes[0])\r\n #self._get_jacobian()\r\n\r\n hand_rotation= 0 #np.random.normal(-0.087,0.087,3)\r\n obj=0\r\n\r\n # Orientation is initialized as Normal\r\n orientation_type = 0.330\r\n\r\n #-1.57,0,-1.57 is side normal\r\n #-1.57, 0, 0 is side tilted\r\n #0,0,-1.57 is top down\r\n\r\n if self.filename==\"/kinova_description/j2s7s300_end_effector.xml\":\r\n new_rotation=np.array([0,0,0])+hand_rotation\r\n if hand_orientation == 'random':\r\n orientation_type=np.random.rand()\r\n print(\"orientation_type: \",orientation_type)\r\n if orientation_type <0.333:\r\n new_rotation=np.array([0,0,0])+hand_rotation\r\n elif orientation_type >0.667:\r\n new_rotation=np.array([0,0,0])+hand_rotation\r\n else:\r\n new_rotation=np.array([1.2,0,0])+hand_rotation\r\n else:\r\n # Normal hand orientation\r\n new_rotation=np.array([0,0,0])+hand_rotation\r\n\r\n else:\r\n if hand_orientation == 'random':\r\n if not test:\r\n orientation_type=np.random.rand()\r\n else:\r\n orientation_type= 0.111\r\n print(\"orientation_type: \",orientation_type)\r\n\r\n # Initial position\r\n if orientation_type <0.333:\r\n new_rotation=np.array([-1.57,0,-1.57])+hand_rotation\r\n coords_filename = \"gym_kinova_gripper/envs/kinova_description/\"+mode+\"_coords/Normal/\" + random_shape + \".txt\"\r\n # Top orientation\r\n elif orientation_type >0.667:\r\n new_rotation=np.array([0,0,0])+hand_rotation\r\n coords_filename = \"gym_kinova_gripper/envs/kinova_description/\"+mode+\"_coords/Top/\" + random_shape + \".txt\"\r\n # Sideways orientation\r\n else:\r\n new_rotation=np.array([-1.2,0,0])+hand_rotation\r\n coords_filename = \"gym_kinova_gripper/envs/kinova_description/\"+mode+\"_coords/Side/\" + random_shape + \".txt\"\r\n else:\r\n # Normal hand orientations\r\n new_rotation=np.array([-1.57,0,-1.57])+hand_rotation\r\n coords_filename = \"gym_kinova_gripper/envs/kinova_description/\"+mode+\"_coords/Normal/\" + random_shape + \".txt\"\r\n #print(\"COORDS FILENAME: \",coords_filename)\r\n if test & np.shape(self._sim.data.site_xpos)[0]<19:\r\n self.add_site([0,0,0])\r\n self.write_xml(new_rotation)\r\n\r\n if orientation_type < 0.333:\r\n self.orientation = 'normal'\r\n xloc,yloc,zloc,f1prox,f2prox,f3prox=0,0,0,0,0,0\r\n elif orientation_type > 0.667:\r\n self.orientation = 'top'\r\n size=self._get_obj_size()\r\n \r\n if self.obj_size=='b':\r\n Z=0.15\r\n elif self.obj_size=='m':\r\n Z=0.14\r\n elif self.obj_size=='s':\r\n Z=0.13\r\n stuff=np.matmul(self.Tfw[0:3,0:3],[-0.005,-0.155,Z+0.06])\r\n #stuff=np.matmul(self.Tfw[0:3,0:3],[0,-0.15,0.1+size[-1]*1.8])\r\n xloc,yloc,zloc,f1prox,f2prox,f3prox=-stuff[0],-stuff[1],stuff[2],0,0,0\r\n else:\r\n self.orientation = 'rotated'\r\n temp=np.matmul(self.Tfw[0:3,0:3],np.array([0.051,-0.075,0.06]))\r\n #print('temp',temp)\r\n xloc,yloc,zloc,f1prox,f2prox,f3prox=-temp[0],-temp[1],temp[2],0,0,0\r\n if qpos is None:\r\n if start_pos is None:\r\n if orientation_type >0.667:\r\n # Check for coords text file\r\n if self.check_obj_file_empty(coords_filename) == False:\r\n x, y, z = self.sample_initial_valid_object_pos(random_shape,coords_filename)\r\n else:\r\n x, y, z = self.randomize_initial_pos_data_collection(orientation='top')\r\n else:\r\n if self.check_obj_file_empty(coords_filename) == False:\r\n x, y, z = self.sample_initial_valid_object_pos(random_shape,coords_filename)\r\n else:\r\n x, y, z = self.randomize_initial_pos_data_collection()\r\n elif len(start_pos)==3:\r\n x, y, z = start_pos[0], start_pos[1], start_pos[2]\r\n elif len(start_pos)==2:\r\n x, y = start_pos[0], start_pos[1]\r\n z = self._get_obj_size()[-1]\r\n else:\r\n xloc,yloc,zloc,f1prox,f2prox,f3prox=start_pos[0], start_pos[1], start_pos[2],start_pos[3], start_pos[4], start_pos[5]\r\n x, y, z = start_pos[6], start_pos[7], start_pos[8]\r\n\r\n #all_states should be in the following format [xloc,yloc,zloc,f1prox,f2prox,f3prox,objx,objy,objz]\r\n self.all_states_1 = np.array([xloc, yloc, zloc, f1prox, f2prox, f3prox, x, y, z])\r\n #if coords=='local':\r\n # world_coords=np.matmul(self.Twf[0:3,0:3],np.array([x,y,z]))\r\n # self.all_states_1=np.array([xloc, yloc, zloc, f1prox, f2prox, f3prox, world_coords[0], world_coords[1], world_coords[2]])\r\n self.Grasp_Reward=False\r\n self.all_states_2 = np.array([xloc, yloc, zloc, f1prox, f2prox, f3prox, 0.00, 0.0, 0.055])\r\n self.all_states = [self.all_states_1 , self.all_states_2]\r\n\r\n self._set_state(self.all_states[0])\r\n else:\r\n self.set_sim_state(qpos,start_pos)\r\n x, y, z = start_pos[0], start_pos[1], start_pos[2]\r\n\r\n states = self._get_obs(test=False)\r\n obj_pose=self._get_obj_pose()\r\n deltas=[x-obj_pose[0],y-obj_pose[1],z-obj_pose[2]]\r\n #print('deltas',deltas)\r\n\r\n if np.linalg.norm(deltas)>0.05:\r\n self.all_states_1=np.array([xloc, yloc, zloc, f1prox, f2prox, f3prox, x+deltas[0], y+deltas[1], z+deltas[2]])\r\n self.all_states=[self.all_states_1,self.all_states_2]\r\n self._set_state(self.all_states[0])\r\n states = self._get_obs(test=False)\r\n\r\n #These two varriables are used when the action space is in joint states\r\n self.t_vel = 0\r\n self.prev_obs = []\r\n \r\n # Sets the object coordinates for heatmap tracking and plotting\r\n self.set_obj_coords(x,y,z)\r\n self._get_trans_mat_wrist_pose()\r\n\r\n ##Testing Code\r\n if test:\r\n if [xloc, yloc, zloc, f1prox, f2prox, f3prox] == [0,0,0,0,0,0]: \r\n if coords_filename == \"gym_kinova_gripper/envs/kinova_description/\"+mode+\"_coords/Normal/\" + random_shape + \".txt\":\r\n print(\"Reset function is working Properly Check the render\")\r\n self.render()\r\n else:\r\n print(\"Reset function is not working Properly Check the render\")\r\n self.render()\r\n\r\n return states\r\n\r\n #Function to display the current state in a video. The video is always paused when it first starts up.\r\n def render(self, mode='human'): #TODO: Fix the rendering issue where a new window gets built every time the environment is reset or the window freezes when it is reset\r\n a=False\r\n if self._viewer is None:\r\n self._viewer = MjViewer(self._sim)\r\n self._viewer._paused = True\r\n a=True\r\n self._viewer.render()\r\n if a:\r\n self._viewer._paused=True\r\n\r\n #Function to close the rendering window\r\n def close(self): #This doesn't work right now\r\n if self._viewer is not None:\r\n self._viewer = None\r\n\r\n #Function to pause the rendering video\r\n def pause(self):\r\n self._viewer._paused=True\r\n\r\n\r\n def seed(self, seed=None):\r\n self.np_random, seed = seeding.np_random(seed)\r\n return [seed]\r\n\r\n ###################################################\r\n ##### ---- Action space : Joint Velocity ---- #####\r\n ###################################################\r\n #Function to step the simulator forward in time\r\n def step(self, action, graspnetwork = False, testfun = False): #TODO: fix this so that we can rotate the hand\r\n total_reward = 0\r\n self._get_trans_mat_wrist_pose()\r\n if len(action)==4:\r\n action=[0,0,action[0],action[1],action[2],action[3]]\r\n\r\n if self.arm_or_hand==\"hand\":\r\n mass=0.732\r\n gear=25\r\n stuff=np.matmul(self.Tfw[0:3,0:3],[0,0,mass*10/gear])\r\n stuff[0]=-stuff[0]\r\n stuff[1]=-stuff[1]\r\n for _ in range(self.frame_skip):\r\n #print('tfw',self.Tfw)\r\n if self.step_coords=='global':\r\n slide_vector=np.matmul(self.Tfw[0:3,0:3],action[0:3])\r\n if (self.orientation == 'rotated') & (action[2]<=0):\r\n slide_vector=[-slide_vector[0],-slide_vector[1],slide_vector[2]]\r\n else:\r\n slide_vector=[-slide_vector[0],-slide_vector[1],slide_vector[2]]\r\n else:\r\n\r\n if (self.orientation == 'rotated')&(action[2]<=0):\r\n slide_vector=[-slide_vector[0],-slide_vector[1],slide_vector[2]]\r\n else:\r\n slide_vector=[-action[0],-action[1],action[2]]#np.matmul(self.Twf[0:3,0:3],action[0:3])\r\n\r\n for i in range(3):\r\n self._sim.data.ctrl[(i)*2] = slide_vector[i]\r\n if self.step_coords=='rotated':\r\n self._sim.data.ctrl[i+6] = action[i+3]+0.05\r\n else:\r\n self._sim.data.ctrl[i+6] = action[i+3]\r\n self._sim.data.ctrl[i*2+1]=stuff[i]\r\n self._sim.step()\r\n #print('slide vector',slide_vector)\r\n else:\r\n for _ in range(self.frame_skip):\r\n joint_velocities = action[0:7]\r\n finger_velocities=action[7:]\r\n\r\n for i in range(len(joint_velocities)):\r\n\r\n self._sim.data.ctrl[i+10] = joint_velocities[i]\r\n for i in range(len(finger_velocities)):\r\n self._sim.data.ctrl[i+7] = finger_velocities[i]\r\n self._sim.step()\r\n\r\n obs = self._get_obs(test=False)\r\n\r\n if not graspnetwork:\r\n if not testfun:\r\n ### Get this reward for RL training ###\r\n #print(\"Done is set with get_reward()\")\r\n total_reward, info, done = self._get_reward()\r\n else:\r\n print(\"Done is set with get_reward() TEST\")\r\n total_reward, info, done = self._get_reward(test = True)\r\n else:\r\n ### Get this reward for grasp classifier collection ###\r\n print(\"Done is set with get_reward_DataCollection()\")\r\n total_reward, info, done = self._get_reward_DataCollection()\r\n\r\n return obs, total_reward, done, info\r\n\r\n def add_site(self,world_site_coords,keep_sites=False):\r\n if not(keep_sites):\r\n self.site_count=0\r\n xml_file=open(self.file_dir+self.filename,\"r\")\r\n xml_contents=xml_file.read()\r\n xml_file.close()\r\n a=xml_contents.find('\\n')\r\n if a!=-1:\r\n starting_point=xml_contents.find('')\r\n site_point=xml_contents.find('\\n',starting_point)\r\n site_text=f' \\n'\r\n self.site_count+=1\r\n second_site_text=f' \\n'\r\n self.site_count+=1\r\n new_thing=xml_contents[0:site_point+1]+site_text+second_site_text\r\n new_thing=new_thing+xml_contents[site_point+1:]\r\n xml_file=open(self.file_dir+self.filename,\"w\")\r\n xml_file.write(new_thing)\r\n xml_file.close()\r\n\r\n self._model = load_model_from_path(self.file_dir + self.filename)\r\n self._sim = MjSim(self._model)\r\n object_location=self._get_obj_size()\r\n states=[self._sim.data.qpos[0],self._sim.data.qpos[1],self._sim.data.qpos[2],self._sim.data.qpos[3],self._sim.data.qpos[5],self._sim.data.qpos[7],object_location[0],object_location[1],object_location[2]]\r\n self._set_state(np.array(states))\r\n self._get_trans_mat_wrist_pose()\r\n \r\n def test_self(self):\r\n shapes=['Cube','Cylinder','Cone1','Cone2','Bowl','Rbowl','Bottle','TBottle','Hour','Vase','Lemon']\r\n sizes=['S','M','B']\r\n keys=[\"CubeS\",\"CubeB\",\"CylinderS\",\"CylinderB\",\"Cone1S\",\"Cone1B\",\"Cone2S\",\"Cone2B\",\"Vase1S\",\"Vase1B\",\"Vase2S\",\"Vase2B\"]\r\n key=random.choice(keys)\r\n self.reset(obj_params=[key[0:-1],key[-1]])\r\n print('testing shape',key)\r\n self._get_obs(test = True)\r\n print('testing step in global coords')\r\n action=[0,0,0,0]\r\n self.step_coords='global'\r\n start_obs=self._get_obs(state_rep='global')\r\n for i in range(150):\r\n action[0]=np.random.rand()-0.2\r\n self.step(action)\r\n end_obs=self._get_obs(state_rep='global') \r\n if (abs(start_obs[18]-end_obs[18])>0.001)|(abs(start_obs[19]-end_obs[19])>0.001):\r\n print('test failed. x/y position changed when it should not have, check step function')\r\n else:\r\n print('test passed')\r\n print('printing test step in local coords')\r\n self.reset(obj_params=[key[0:-1],key[-1]])\r\n self.step_coords='local'\r\n start_obs=self._get_obs()\r\n for i in range(150):\r\n action[0]=np.random.rand()-0.2\r\n self.step(action)\r\n end_obs=self._get_obs()\r\n if (abs(start_obs[18]-end_obs[18])>0.001)|(abs(start_obs[19]-end_obs[19])>0.001):\r\n print('test failed. x/y position changed when it should not have, check step function')\r\n else:\r\n print('test passed')\r\n print('no current test for 6 axis motion, step tests finished.')\r\n print('begining shape test')\r\n bad_shapes=[]\r\n for shape in shapes: \r\n for size in sizes:\r\n self.reset(obj_params=[shape,size])\r\n self.render()\r\n a=input('obj shape and size',shape,size,'. Is this correct y/n?')\r\n if a.lower()=='y':\r\n print('shape passed')\r\n else:\r\n print('shape failed. recording')\r\n bad_shapes.append([shape,size])\r\n if bad_shapes==[]:\r\n print('all shapes and sizes are accurate, tests finished')\r\n else:\r\n print('the following are shapes that were not correct. Look at the xml files.')\r\n print(bad_shapes) \r\n #TODO: Make a config file that makes it easy to switch action spaces and set global varriables correctly\r\n\r\n #####################################################\r\n\r\n ###################################################\r\n ##### ---- Action space : Joint Angle ---- ########\r\n ###################################################\r\n # def step(self, action):\r\n # total_reward = 0\r\n # for _ in range(self.frame_skip):\r\n # self.pos_control(action)\r\n # self._sim.step()\r\n\r\n # obs = self._get_obs()\r\n # total_reward, info, done = self._get_reward()\r\n # self.t_vel += 1\r\n # self.prev_obs.append(obs)\r\n # # print(self._sim.data.qpos[0], self._sim.data.qpos[1], self._sim.data.qpos[3], self._sim.data.qpos[5])\r\n # return obs, total_reward, done, info\r\n\r\n # def pos_control(self, action):\r\n # # position\r\n # # print(action)\r\n\r\n # self._sim.data.ctrl[0] = (action[0] / 1.5) * 0.2\r\n # self._sim.data.ctrl[1] = action[1]\r\n # self._sim.data.ctrl[2] = action[2]\r\n # self._sim.data.ctrl[3] = action[3]\r\n # # velocity\r\n # if abs(action[0] - 0.0) < 0.0001:\r\n # self._sim.data.ctrl[4] = 0.0\r\n # else:\r\n # self._sim.data.ctrl[4] = 0.1\r\n # # self._sim.data.ctrl[4] = (action[0] - self.prev_action[0] / 25)\r\n\r\n # if abs(action[1] - 0.0) < 0.001:\r\n # self._sim.data.ctrl[5] = 0.0\r\n # else:\r\n # self._sim.data.ctrl[5] = 0.01069\r\n # # self._sim.data.ctrl[5] = (action[1] - self.prev_action[1] / 25)\r\n\r\n # if abs(action[2] - 0.0) < 0.001:\r\n # self._sim.data.ctrl[6] = 0.0\r\n # else:\r\n # self._sim.data.ctrl[6] = 0.01069\r\n # # self._sim.data.ctrl[6] = (action[2] - self.prev_action[2] / 25)\r\n\r\n # if abs(action[3] - 0.0) < 0.001:\r\n # self._sim.data.ctrl[7] = 0.0\r\n # else:\r\n # self._sim.data.ctrl[7] = 0.01069\r\n # # self._sim.data.ctrl[7] = (action[3] - self.prev_action[3] / 25)\r\n\r\n # self.prev_action = np.array([self._sim.data.qpos[0], self._sim.data.qpos[1], self._sim.data.qpos[3], self._sim.data.qpos[5]])\r\n # self.prev_action = np.array([self._sim.data.qpos[0], self._sim.data.qpos[1], self._sim.data.qpos[3], self._sim.data.qpos[5]])\r\n\r\n #####################################################\r\n\r\n\r\nclass GraspValid_net(nn.Module):\r\n def __init__(self, state_dim):\r\n super(GraspValid_net, self).__init__()\r\n self.l1 = nn.Linear(state_dim, 256)\r\n self.l2 = nn.Linear(256, 256)\r\n self.l3 = nn.Linear(256, 1)\r\n\r\n def forward(self, state):\r\n # pdb.set_trace()\r\n\r\n a = F.relu(self.l1(state))\r\n a = F.relu(self.l2(a))\r\n a = torch.sigmoid(self.l3(a))\r\n return a\r\n \r\n","sub_path":"gym-kinova-gripper/gym_kinova_gripper/envs/kinova_gripper_env.py","file_name":"kinova_gripper_env.py","file_ext":"py","file_size_in_byte":79198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"552141665","text":"num = int(input('Enter a number: '))\nnum_original=num\n\ndef check_armstrong(num):\n sum1 = 0\n num2=num\n cnt=0\n while(num>0):\n cnt=cnt+1\n num=num//10\n\n while num2>0:\n rem = num2% 10\n sum1 += rem ** cnt\n num2//= 10\n return sum1\n\nsum_1=check_armstrong(num)\n\nif(num_original==sum_1):\n print('Armstrong Number')\nelse:\n print('Not an Armstrong Number')\n","sub_path":"day 06/task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"395190003","text":"from sympy import Symbol, Eq\nimport numpy as np\n\nfrom simnet.solver import Solver\nfrom simnet.dataset import TrainDomain, ValidationDomain\nfrom simnet.data import Validation\nfrom simnet.sympy_utils.geometry_1d import Point1D\nfrom simnet.controller import SimNetController\nfrom simnet.plot_utils.vtk import var_to_vtk\n\nfrom spring_mass_ode import SpringMass\n\n# define time variable and range\nt_max = 10.0\nt_symbol = Symbol('t')\nx = Symbol('x')\ntime_range = {t_symbol: (0, t_max)}\n\ngeo = Point1D(0)\n\nclass SpringMassTrain(TrainDomain):\n def __init__(self, **config):\n super(SpringMassTrain, self).__init__()\n\n # initial conditions\n IC = geo.boundary_bc(outvar_sympy={'x1': 1.,\n 'x2': 0,\n 'x3': 0,\n 'x1__t': 0,\n 'x2__t': 0,\n 'x3__t': 0},\n param_ranges={t_symbol: 0},\n batch_size_per_area=1)\n self.add(IC, name=\"IC\")\n\n # solve over given time period\n interior = geo.boundary_bc(outvar_sympy={'ode_x1': 0.0,\n 'ode_x2': 0.0,\n 'ode_x3': 0.0}, \n param_ranges=time_range,\n batch_size_per_area=500)\n self.add(interior, name=\"Interior\")\n\nclass SpringMassVal(ValidationDomain):\n def __init__(self, **config):\n super(SpringMassVal, self).__init__()\n deltaT = 0.001\n t = np.arange(0, t_max, deltaT)\n t = np.expand_dims(t, axis=-1) \n invar_numpy = {'t': t}\n outvar_numpy = {'x1': (1/6)*np.cos(t) + (1/2)*np.cos(np.sqrt(3)*t) + (1/3)*np.cos(2*t),\n 'x2': (2/6)*np.cos(t) + (0/2)*np.cos(np.sqrt(3)*t) - (1/3)*np.cos(2*t),\n 'x3': (1/6)*np.cos(t) - (1/2)*np.cos(np.sqrt(3)*t) + (1/3)*np.cos(2*t)} \n val = Validation.from_numpy(invar_numpy, outvar_numpy)\n self.add(val, name=\"Val\")\n\nclass SpringMassSolver(Solver):\n train_domain = SpringMassTrain\n val_domain = SpringMassVal\n\n def __init__(self, **config):\n super(SpringMassSolver, self).__init__(**config)\n\n self.equations = SpringMass(k=(2, 1, 1, 2), m=(1, 1, 1)).make_node()\n\n spring_net = self.arch.make_node(name='spring_net',\n inputs=['t'],\n outputs=['x1','x2','x3'])\n self.nets = [spring_net]\n\n @classmethod # Explain This\n def update_defaults(cls, defaults):\n defaults.update({\n 'network_dir': './network_checkpoint_spring_mass',\n 'max_steps': 10000,\n 'decay_steps': 100,\n 'nr_layers': 6,\n 'layer_size': 256,\n 'xla': True,\n })\n\n\nif __name__ == '__main__':\n ctr = SimNetController(SpringMassSolver)\n ctr.run()\n","sub_path":"hpc_ai/PINN/English/python/source_code/spring_mass/spring_mass_solver.py","file_name":"spring_mass_solver.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"531890198","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import models\n\n\nnclasses = 20 \n\nclass Identity(nn.Module):\n def __init__(self):\n super(Identity, self).__init__()\n \n def forward(self, x):\n return x\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.featurizer = models.resnet152(pretrained=True)\n num_ftrs = self.featurizer.fc.in_features\n for param in self.featurizer.parameters():\n param.requires_grad = False\n self.featurizer.fc = Identity()\n self.dp = nn.Dropout(0.15)\n self.classifier = nn.Linear(num_ftrs, nclasses)\n\n def forward(self, x):\n feats = self.featurizer(x)\n drops = self.dp(feats)\n return self.classifier(drops)\n","sub_path":"models/model_resnet152_dropout.py","file_name":"model_resnet152_dropout.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"401494992","text":"import numpy as np\nfrom trajopt.envs.continual_reacher_env import ContinualReacher7DOFEnv\nimport imageio\n\n\nif __name__ == \"__main__\":\n env = ContinualReacher7DOFEnv()\n dat = np.load(\"data/continual_reacher.npz\")\n obs = dat['obs']\n\n sim = env.sim\n ims = []\n for i in range(obs.shape[1]):\n sim.data.qpos[:7] = obs[3, i, :7]\n sim.forward()\n im = sim.render(256, 256)\n ims.append(im)\n\n imageio.mimwrite(\"test.gif\", ims)\n","sub_path":"debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"569215515","text":" # -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 02 20:17:11 2017\n \n@author: nsuri\n\"\"\"\nimport pandas as panda\n\n\nclass AllFunctionsForDsAnalytics:\n \n def SemisterOrderOfaStudent(self,CustomSortOrder,EnrolledClassData):\n PerStudentSemisterOrder={}\n for row in EnrolledClassData.itertuples():\n if(PerStudentSemisterOrder.has_key(row[EnrolledClassData.columns.get_loc('PRSN_UNIV_ID Crypted')+1])):\n if(row[EnrolledClassData.columns.get_loc('ACAD_TERM_CD')+1] in PerStudentSemisterOrder[row[EnrolledClassData.columns.get_loc('PRSN_UNIV_ID Crypted')+1]]):\n pass\n else:\n PerStudentSemisterOrder[row[EnrolledClassData.columns.get_loc('PRSN_UNIV_ID Crypted')+1]].append(row[EnrolledClassData.columns.get_loc('ACAD_TERM_CD')+1])\n else:\n PerStudentSemisterOrder[row[EnrolledClassData.columns.get_loc('PRSN_UNIV_ID Crypted')+1]]=[row[EnrolledClassData.columns.get_loc('ACAD_TERM_CD')+1]]\n return(PerStudentSemisterOrder)\n \n def SortPerStudentSemisterOrder(self,PerStudentSemisterOrder,CustomSortOrder):\n for key,Currentvalues in PerStudentSemisterOrder.iteritems():\n SortedListOfCurrentStudnet =CustomSortOrder\n disjuntValues = list(set(SortedListOfCurrentStudnet).difference(set(Currentvalues)))\n #print disjuntValues\n FinalVals = [val for val in SortedListOfCurrentStudnet if val not in disjuntValues] #SortedListOfCurrentStudnet.remove(disjuntValues)\n #disjuntValues.remove(SortedListOfCurrentStudnet)\n PerStudentSemisterOrder[key] = FinalVals\n return(PerStudentSemisterOrder)\n \n def insertIntoCoursesTakenInSequence(self,row,Index,CoursesTakenInSequence,columns,EnrolledClassData):\n # check if the dataframe already has that paricular student in it\n #print row[EnrolledClassData.columns.get_loc('PRSN_UNIV_ID Crypted')+1]\n #print CoursesTakenInSequence['PRSN_UNIV_ID Crypted']\n if any( CoursesTakenInSequence['PRSN_UNIV_ID Crypted'] == row[EnrolledClassData.columns.get_loc('PRSN_UNIV_ID Crypted')+1]): \n rowindex = CoursesTakenInSequence.loc[row[EnrolledClassData.columns.get_loc('PRSN_UNIV_ID Crypted')+1]==CoursesTakenInSequence['PRSN_UNIV_ID Crypted']].index.tolist() \n CoursesTakenInSequence.ix[rowindex[0],columns[Index]] = row[EnrolledClassData.columns.get_loc('Course_Code')+1]\n else: \n CoursesTakenInSequence.loc[CoursesTakenInSequence.shape[0]+1] = [row[EnrolledClassData.columns.get_loc('PRSN_UNIV_ID Crypted')+1]]+[0] * 16\n rowindex = CoursesTakenInSequence.loc[row[EnrolledClassData.columns.get_loc('PRSN_UNIV_ID Crypted')+1]==CoursesTakenInSequence['PRSN_UNIV_ID Crypted']].index.tolist() \n CoursesTakenInSequence.ix[rowindex[0],columns[Index]] = row[EnrolledClassData.columns.get_loc('Course_Code')+1]\n \n def StudentsCourseSemisterBins(self,EnrolledClassData,PerStudentSemisterOrder,mode):\n SemOne = panda.DataFrame(columns=['PRSN_UNIV_ID Crypted', 'Course_Code'])\n SemTwo = panda.DataFrame(columns=['PRSN_UNIV_ID Crypted', 'Course_Code'])\n SemThree = panda.DataFrame(columns=['PRSN_UNIV_ID Crypted', 'Course_Code'])\n SemFour = panda.DataFrame(columns=['PRSN_UNIV_ID Crypted', 'Course_Code']) \n Dfcolumns=['PRSN_UNIV_ID Crypted', 'Gender','Semester1','Semester2','Semester3','Semester4','Semester5','Semester6','Semester7','Semester8','Semester9','Semester10','Semester11','Semester12','Semester13','Semester14','Semester15'] \n CoursesTakenInSequence = panda.DataFrame(columns=Dfcolumns)\n \n if mode==0: \n for row in EnrolledClassData.itertuples():\n Index = (PerStudentSemisterOrder[row[EnrolledClassData.columns.get_loc('PRSN_UNIV_ID Crypted')+1]].index(row[EnrolledClassData.columns.get_loc('ACAD_TERM_CD')+1]) )+1\n self.insertIntoCoursesTakenInSequence(row,Index,CoursesTakenInSequence,Dfcolumns,EnrolledClassData) \n binIndex = (Index % 4) \n if(binIndex == 1):\n SemOne.loc[SemOne.shape[0]+1] = [row[EnrolledClassData.columns.get_loc('PRSN_UNIV_ID Crypted')+1] , 'Sem1-' + row[EnrolledClassData.columns.get_loc('Course_Code')+1]] \n elif(binIndex == 2):\n SemTwo.loc[SemTwo.shape[0]+1] = [row[EnrolledClassData.columns.get_loc('PRSN_UNIV_ID Crypted')+1] , 'Sem2-'+ row[EnrolledClassData.columns.get_loc('Course_Code')+1]]\n elif(binIndex == 3):\n SemThree.loc[SemThree.shape[0]+1] = [row[EnrolledClassData.columns.get_loc('PRSN_UNIV_ID Crypted')+1] , 'Sem3-' + row[EnrolledClassData.columns.get_loc('Course_Code')+1]]\n else:\n SemFour.loc[SemFour.shape[0]+1] = [row[EnrolledClassData.columns.get_loc('PRSN_UNIV_ID Crypted')+1] , 'Sem4-' +row[EnrolledClassData.columns.get_loc('Course_Code')+1]]\n elif mode==1: \n for row in EnrolledClassData.itertuples():\n Index = (PerStudentSemisterOrder[row[EnrolledClassData.columns.get_loc('PRSN_UNIV_ID Crypted')+1]].index(row[EnrolledClassData.columns.get_loc('ACAD_TERM_CD')+1]) )+1\n self.insertIntoCoursesTakenInSequence(row,Index,CoursesTakenInSequence,Dfcolumns,EnrolledClassData) \n binIndex = (Index % 4) \n if(binIndex == 1):\n SemOne.loc[SemOne.shape[0]+1] = [row[EnrolledClassData.columns.get_loc('PRSN_UNIV_ID Crypted')+1] , row[EnrolledClassData.columns.get_loc('Course_Code')+1]] \n elif(binIndex == 2):\n SemTwo.loc[SemTwo.shape[0]+1] = [row[EnrolledClassData.columns.get_loc('PRSN_UNIV_ID Crypted')+1] , row[EnrolledClassData.columns.get_loc('Course_Code')+1]]\n elif(binIndex == 3):\n SemThree.loc[SemThree.shape[0]+1] = [row[EnrolledClassData.columns.get_loc('PRSN_UNIV_ID Crypted')+1] , row[EnrolledClassData.columns.get_loc('Course_Code')+1]]\n else:\n SemFour.loc[SemFour.shape[0]+1] = [row[EnrolledClassData.columns.get_loc('PRSN_UNIV_ID Crypted')+1] , row[EnrolledClassData.columns.get_loc('Course_Code')+1]]\n else:\n print (\"Please check the mode of semester level binning\")\n \n return(SemOne,SemTwo,SemThree,SemFour,CoursesTakenInSequence)\n \n def SequenceOfCoursesTaken(self,SemOne,SemTwo,SemThree,SemFour):\n \n Sem1_Sem2_Outer12 = SemOne.merge(SemTwo, left_on='PRSN_UNIV_ID Crypted', right_on='PRSN_UNIV_ID Crypted', how='outer')\n Sem1_Sem2_Outer = Sem1_Sem2_Outer12.ix[(Sem1_Sem2_Outer12['PRSN_UNIV_ID Crypted'].notnull())&(Sem1_Sem2_Outer12['Course_Code_x'].notnull() )&( Sem1_Sem2_Outer12['Course_Code_y'].notnull() )]\n \n Sem2_Sem3_Outer23 = SemTwo.merge(SemThree, left_on='PRSN_UNIV_ID Crypted', right_on='PRSN_UNIV_ID Crypted', how='outer')\n Sem2_Sem3_Outer = Sem2_Sem3_Outer23.ix[(Sem2_Sem3_Outer23['PRSN_UNIV_ID Crypted'].notnull())&(Sem2_Sem3_Outer23['Course_Code_x'].notnull() )&( Sem2_Sem3_Outer23['Course_Code_y'].notnull() )]\n \n Sem3_Sem4_Outer34 = SemThree.merge(SemFour, left_on='PRSN_UNIV_ID Crypted', right_on='PRSN_UNIV_ID Crypted', how='outer')\n Sem3_Sem4_Outer = Sem3_Sem4_Outer34.ix[(Sem3_Sem4_Outer34['PRSN_UNIV_ID Crypted'].notnull())&(Sem3_Sem4_Outer34['Course_Code_x'].notnull() )&( Sem3_Sem4_Outer34['Course_Code_y'].notnull() )]\n \n return(Sem1_Sem2_Outer,Sem2_Sem3_Outer,Sem3_Sem4_Outer)\n \n def JoinAllResultsAndSave(self,Sem1_Sem2_Outer,Sem2_Sem3_Outer,Sem3_Sem4_Outer,path,CoursesTakenInSequence,FileName1,FileName2):\n print (\"About to save the results to the File\")\n outfileName = path+FileName1\n #outfileName2 = path+FileName2\n FinalResult = panda.concat([Sem1_Sem2_Outer, Sem2_Sem3_Outer,Sem3_Sem4_Outer], ignore_index=True)\n FinalResult.to_csv(outfileName+'.csv', sep=',', encoding='utf-8') \n \n FinalResultToUse = panda.concat([Sem1_Sem2_Outer.ix[:,1:3], Sem2_Sem3_Outer.ix[:,1:3],Sem3_Sem4_Outer.ix[:,1:3]], ignore_index=True)\n FinalResultToUse.to_csv(outfileName+'_ToUse.csv', sep=',', encoding='utf-8') \n #CoursesTakenInSequence.to_csv(outfileName2+'_ToUse.csv', sep=',', encoding='utf-8') \n \n # used for chedking if that particular class number is to be taken care \n def isAddClassNumber(self,courseCatalogue,classnumber,ClassNumbersToBeTakenCareOf):\n #print courseCatalogue\n if courseCatalogue in ClassNumbersToBeTakenCareOf:\n return(\"-\"+classnumber)\n else:\n return(\"\")\n \n \n def divideDataSetToThree(self,EnrolledClassDataAll,PerStudentSemisterOrder,OnlyStudentsWith4SemestersModeSet):\n # creating a column in the EnrolledClassDataAll that specifies the new column with the studnet info on when the student started his education? like in spring, summer or Fall\n for row in EnrolledClassDataAll.itertuples():\n SemStarted = (PerStudentSemisterOrder[row[EnrolledClassDataAll.columns.get_loc('PRSN_UNIV_ID Crypted')+1]][0])\n if row[EnrolledClassDataAll.columns.get_loc('Program_Started_In_Semester')+1] is None: \n if('Spring'.lower() in SemStarted.lower()):\n # update the row to Spring\n EnrolledClassDataAll.loc[EnrolledClassDataAll['PRSN_UNIV_ID Crypted']==row[EnrolledClassDataAll.columns.get_loc('PRSN_UNIV_ID Crypted')+1],'Program_Started_In_Semester']='Spring'\n elif('Summer'.lower() in SemStarted.lower()):\n EnrolledClassDataAll.loc[EnrolledClassDataAll['PRSN_UNIV_ID Crypted']==row[EnrolledClassDataAll.columns.get_loc('PRSN_UNIV_ID Crypted')+1],'Program_Started_In_Semester']='Summer'\n elif('Fall'.lower() in SemStarted.lower()):\n EnrolledClassDataAll.loc[EnrolledClassDataAll['PRSN_UNIV_ID Crypted']==row[EnrolledClassDataAll.columns.get_loc('PRSN_UNIV_ID Crypted')+1],'Program_Started_In_Semester']='Fall'\n else:\n EnrolledClassDataAll.loc[EnrolledClassDataAll['PRSN_UNIV_ID Crypted']==row[EnrolledClassDataAll.columns.get_loc('PRSN_UNIV_ID Crypted')+1],'Program_Started_In_Semester']='NA'\n \n if OnlyStudentsWith4SemestersModeSet == 1:\n if len(PerStudentSemisterOrder[row[EnrolledClassDataAll.columns.get_loc('PRSN_UNIV_ID Crypted')+1]])>=4:\n EnrolledClassDataAll.loc[EnrolledClassDataAll['PRSN_UNIV_ID Crypted']==row[EnrolledClassDataAll.columns.get_loc('PRSN_UNIV_ID Crypted')+1],'FinishedAtleast4Semesters']=1\n else:\n EnrolledClassDataAll.loc[EnrolledClassDataAll['PRSN_UNIV_ID Crypted']==row[EnrolledClassDataAll.columns.get_loc('PRSN_UNIV_ID Crypted')+1],'FinishedAtleast4Semesters']=0 \n \n return(EnrolledClassDataAll)\n \n \n def createBinsForEachConsequtiveTerms(BinsToProcessNow,EnrolledClassData):\n print (\"Creating Bin for \" + str(BinsToProcessNow[0]) +\" - \"+str(BinsToProcessNow[1]) )\n BinOneAllData = EnrolledClassData.loc[EnrolledClassData['ACAD_TERM_CD']== BinsToProcessNow[0]]\n BinTwoAllData = EnrolledClassData.loc[EnrolledClassData['ACAD_TERM_CD']== BinsToProcessNow[1]]\n \n BinOne= BinOneAllData.ix[:,['PRSN_UNIV_ID Crypted','Course_Code']]\n BinTwo= BinTwoAllData.ix[:,['PRSN_UNIV_ID Crypted','Course_Code']]\n \n return(BinOne,BinTwo) \n\n\n def BinsOuterJoin(BinOne,BinTwo): \n Bin1_Bin2_Outer12 = BinOne.merge(BinTwo, left_on='PRSN_UNIV_ID Crypted', right_on='PRSN_UNIV_ID Crypted', how='outer')\n Bin1_Bin2_Outer12 = Bin1_Bin2_Outer12.ix[(Bin1_Bin2_Outer12['PRSN_UNIV_ID Crypted'].notnull())&(Bin1_Bin2_Outer12['Course_Code_x'].notnull() )&( Bin1_Bin2_Outer12['Course_Code_y'].notnull() )]\n return(Bin1_Bin2_Outer12)\n \n def JoinAllResultsAndSave(ResultSet,FileName1,path):\n print (\"About to save the results to the File\")\n outfileName = path+FileName1\n #outfileName2 = path+FileName2\n ResultSet.to_csv(outfileName+'.csv', sep=',', encoding='utf-8')\n \n ################################################################################################################################################################################################################################################################# \n","sub_path":"CourseFlowAnalysis/Student-Flow/AllFunctionsForDsAnalytics.py","file_name":"AllFunctionsForDsAnalytics.py","file_ext":"py","file_size_in_byte":12662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"113322126","text":"# -*- coding: utf-8 -*-\nimport cv2, time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom ssd.dl16_CF_ssd_config import ssd_cfg\n\n\ndef time_calc(text):\n def decorator(func):\n def wrapper(*args, **kwargs):\n start_time = time.time()\n f = func(*args, **kwargs)\n exec_time = time.time() - start_time\n print(f'exec_time for {text}: {exec_time:.3f} s.')\n return f\n\n return wrapper\n\n return decorator\n\n\ndef print_result(epoch, epochs, batch, num_batch,\n loss_cla=0, loss_reg=0):\n print(f'\\rEpoch {epoch + 1}/{epochs} Batch {batch + 1}/{num_batch}'\n f' - Loss cla = {loss_cla:.6f} reg = {loss_reg:.6f}',\n end='')\n\n\ndef plot_boxes_img(img, multi_boxes, color_list, line_width=2, show_img=True):\n \"\"\"\n 在一张图中画多组标注框。所有boxes都是r1, c1, r2, c2\n :param img:\n :param multi_boxes: [gt_boxes, rpn_boxes, fast_boxes]\n :param color_list:\n :param line_width:\n :param show_img:\n :return:\n \"\"\"\n\n color_dict = {'k': (0, 0, 0), 'y': (255, 0, 0), 'g': (0, 255, 0), 'r': (0, 0, 255)}\n\n for i, boxes in enumerate(multi_boxes):\n color = color_list[i]\n if color not in color_dict:\n color = 'k'\n for box in boxes:\n img = cv2.rectangle(img, (box[1], box[0]), (box[3], box[2]),\n color_dict[color], line_width)\n\n if show_img:\n plt.imshow(img[:, :, ::-1])\n plt.show()\n\n\n# 根据分类结果、回归结果,进行boxes调整\ndef get_result(img_shape, cla_prob, regs, score_thres=ssd_cfg.test_result_score_thres):\n \"\"\"\n :param img_shape:\n :param cla_prob: [num_boxes]\n :param regs: [num_boxes, 4]\n :param score_thres: 保留作为预测结果的得分阈值\n :return:\n cla_id_scores [num_res, 2]\n boxes [num_res, 4] 按照得分从大到小排序好了\n \"\"\"\n # 生成所有锚框\n img_h, img_w = img_shape\n total_anchor_boxes = get_all_anchor_boxes(img_shape)\n\n # 是前景且概率大于阈值的索引\n fg_inds = np.where(cla_prob > score_thres)[0]\n\n # 满足概率阈值的���景个数\n num_res = fg_inds.size\n # 若无结果,则直接返回\n if num_res == 0:\n return np.array([]).reshape([0, 2]), np.array([]).reshape([0, 4])\n\n # 前景概率大于阈值的分类id、概率、boxes、regs\n scores = cla_prob[fg_inds] # score\n boxes = total_anchor_boxes[fg_inds]\n regs = regs[fg_inds]\n\n shifts = regs\n # 根据回归结果调整boxes\n boxes = bbox_transform_inv(boxes, shifts)\n # 边界处截断\n boxes[boxes[:, 0] < 0, 0] = 0\n boxes[boxes[:, 1] < 0, 1] = 0\n boxes[boxes[:, 2] > img_h - 1, 2] = img_h - 1\n boxes[boxes[:, 3] > img_w - 1, 3] = img_w - 1\n # 保留尺寸大于阈值的预测框\n _keep_inds = np.where(((boxes[:, 2] - boxes[:, 0]) > ssd_cfg.test_min_size) &\n ((boxes[:, 3] - boxes[:, 1]) > ssd_cfg.test_min_size))[0]\n\n # 若无结果,则直接返回\n if _keep_inds.size == 0:\n return np.array([]).reshape([0, 2]), np.array([]).reshape([0, 4])\n\n scores = scores[_keep_inds] # score\n boxes = boxes[_keep_inds]\n\n # 若有结果,则对结果进行nms\n # 将boxes按照得分从高到低排布\n sorted_inds = np.argsort(scores)[::-1]\n scores = scores[sorted_inds]\n boxes = boxes[sorted_inds]\n\n keep_inds = self_nms(boxes, ssd_cfg.test_result_nms_iou)\n\n nmsed_scores = scores[keep_inds]\n nmsed_boxes = boxes[keep_inds]\n\n return nmsed_scores.astype(float), nmsed_boxes.astype(float)\n\n\n# 一组boxes从前往后进行自nms筛选,得到保留boxes的下标\ndef self_nms(boxes, thres):\n num_boxes = boxes.shape[0]\n keep_inds = np.ones(num_boxes) # 0是剔除,1是保留\n\n for i, box in enumerate(boxes):\n # 若当前box已剔除,则计算下一个\n if keep_inds[i] == 0:\n continue\n if i + 1 > boxes.shape[0] - 1:\n break\n last_boxes = boxes[i + 1:]\n ious = get_iou(last_boxes, box)\n # 剔除与当前box的iou大于阈值的boxes\n kill_inds = np.where(ious > thres)[0] + (i + 1)\n keep_inds[kill_inds] = 0\n\n return np.where(keep_inds == 1)[0]\n\n\ndef get_conv_infos(img_shape, conv_layers, first_h, first_w):\n \"\"\"\n :param img_shape:\n :param conv_layers: [(p, k, s), ...]\n :param first_h: (n_h, j_h, r_h, START_h)\n :param first_w: (n_w, j_w, r_w, START_w)\n :return: conv_infos [((n_h, j_h, r_h, START_h), (n_w, j_w, r_w, START_w)), ...]\n \"\"\"\n # 开始信息\n x_h, x_w = img_shape\n if first_h is None:\n first_h = (x_h, 1, 1, 0) # (n_h, j_h, r_h, START_h)\n if first_w is None:\n first_w = (x_w, 1, 1, 0) # (n_w, j_w, r_w, START_w)\n\n h_info, w_info = first_h, first_w\n conv_infos = []\n\n for layer in conv_layers:\n h_info = _get_conv_output_size(*h_info, *layer)\n w_info = _get_conv_output_size(*w_info, *layer)\n conv_infos.append((h_info, w_info))\n\n return conv_infos\n\n\ndef _get_conv_output_size(n, j, r, start, p, k, s):\n \"\"\"\n 输入信息, 经过pks卷积,返回输出信息\n :param n: 尺寸\n :param j: 单点间距\n :param r: 感受野\n :param start: 左上第一点对应原图坐标\n :param p: padding圈数\n :param k: 卷积核尺寸\n :param s: 步长\n :return:\n \"\"\"\n _n = int(np.floor((n + 2 * p - k) / s) + 1)\n _j = j * s\n _r = r + (k - 1) * j\n _start = start + ((k - 1) / 2 - p) * j\n return _n, _j, _r, _start\n\n\ndef get_all_anchor_boxes(img_shape, first_h=None, first_w=None,\n conv_layers=ssd_cfg.conv_layers, anchor_scales=ssd_cfg.anchor_scales,\n anchor_ratio=ssd_cfg.anchor_ratio, fm_inds=ssd_cfg.fm_inds):\n conv_infos = get_conv_infos(img_shape, conv_layers, first_h, first_w)\n # # [((n_h, j_h, r_h, START_h), (n_w, j_w, r_w, START_w)), ...]\n total_anchor_boxes = None\n for i in range(len(fm_inds)):\n # [num_anchor_box, 4] -> r1, c1, r2, c2\n point_anchor_box = gen_point_anchor_box(anchor_scales[i], anchor_scales[i + 1],\n anchor_ratio)\n # [num_shifts * num_anchor_box, 4] -> r1, c1, r2, c2\n # 按行排列 □→□→□→□→□→□→□→□→□→□↓\n # ↓← ←\n # □→□→□→□→□→...\n fm_ind = fm_inds[i]\n total_anchor_box = gen_all_anchor_boxes(img_shape, point_anchor_box, conv_infos[fm_ind])\n if total_anchor_boxes is None:\n total_anchor_boxes = total_anchor_box\n else:\n total_anchor_boxes = np.r_[total_anchor_boxes, total_anchor_box]\n return total_anchor_boxes\n\n\ndef gen_point_anchor_box(scale1, scale2, ratio):\n \"\"\"\n :param scale1: int\n :param scale2: int\n :param ratio: list\n :return: [num_anchor_box, 4]\n \"\"\"\n ratio = np.array(ratio) # [num_ratio]\n w = scale1 / (ratio ** 0.5) # [num_ratio]\n h = w * ratio # [num_ratio]\n w = np.append(w, (scale1 * scale2) ** 0.5)\n h = np.append(h, (scale1 * scale2) ** 0.5)\n zeros = np.zeros_like(w)\n\n r1 = zeros - 0.5 * (h - 1)\n c1 = zeros - 0.5 * (w - 1)\n r2 = zeros + 0.5 * (h - 1)\n c2 = zeros + 0.5 * (w - 1)\n\n return np.vstack((r1, c1, r2, c2)).transpose()\n\n\n# 原图中的所有锚框。按照行排列 ->->->->->->\ndef gen_all_anchor_boxes(img_shape, point_anchor_box, conv_info):\n \"\"\"\n 使用一组单点锚框,根据锚框偏移量,广播得到所有锚框。\n 注意所有锚框[num_boxes, 4]需要按照行排列,因为需要对[fm_h, fm_w, 4].reshape([-1, 4])得到对应位置\n :param img_shape:\n :param point_anchor_box: [num_point_anchor_box, 4]\n :param conv_info: ((n_h, j_h, r_h, START_h), (n_w, j_w, r_w, START_w))\n :return: [num_boxes, 4]\n \"\"\"\n img_h, img_w = img_shape\n fm_h, hj, hr, hstart = conv_info[0]\n fm_w, wj, wr, wstart = conv_info[1]\n # 使用卷积公式计算锚框中心点\n shift_r = [hstart + i * hj for i in range(fm_h)]\n shift_c = [wstart + i * wj for i in range(fm_w)]\n # # 使用特征图均分原图,各网格中点作为锚框中心点\n # _hj = img_h / fm_h\n # _hstart = _hj / 2\n # _wj = img_w / fm_w\n # _wstart = _wj / 2\n # shift_r = [_hstart + i * _hj for i in range(fm_h)]\n # shift_c = [_wstart + i * _wj for i in range(fm_w)]\n\n # c作为x,r作为y,如此偏移得到的boxes是沿行偏移的\n shift_c, shift_r = np.meshgrid(shift_c, shift_r)\n shifts = np.vstack((shift_r.ravel(), shift_c.ravel(), shift_r.ravel(), shift_c.ravel())).transpose()\n\n num_point_anchor_box = point_anchor_box.shape[0]\n num_shifts = shifts.shape[0]\n\n point_anchor_box = np.reshape(point_anchor_box, [1, num_point_anchor_box, 4])\n shifts = np.reshape(shifts, [num_shifts, 1, 4])\n\n anchor_boxes = point_anchor_box + shifts # [num_shifts, num_anchor_box, 4]\n anchor_boxes = np.reshape(anchor_boxes, [-1, 4]) # [num_shifts * num_anchor_box, 4]\n\n return anchor_boxes\n\n\n# 一组boxes和多个gt_box的ious矩阵\ndef get_ious(boxes, gt_boxes):\n ious = np.zeros((boxes.shape[0], gt_boxes.shape[0]))\n\n for i, gt_box in enumerate(gt_boxes):\n iou = get_iou(boxes, gt_box)\n ious[:, i] = iou\n\n return ious # [num_boxes, num_boxes]\n\n\n# 一组boxes和一个box的ious\ndef get_iou(boxes, gt_box):\n r1, c1, r2, c2 = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]\n r1_gt, c1_gt, r2_gt, c2_gt = gt_box[0], gt_box[1], gt_box[2], gt_box[3]\n over_r = get_overlap_length(r1, r2, r1_gt, r2_gt) # [num_boxes]\n over_c = get_overlap_length(c1, c2, c1_gt, c2_gt) # [num_boxes]\n inter = over_r * over_c # [num_boxes]\n box_area = np.abs((r1 - r2) * (c1 - c2)) # [num_boxes]\n gt_area = np.abs((r1_gt - r2_gt) * (c1_gt - c2_gt)) # []\n union = box_area + gt_area - inter\n iou = inter / union\n return iou\n\n\ndef relu(x):\n return np.maximum(x, 0)\n\n\ndef get_overlap_length(a1, a2, b1, b2):\n a1, a2 = np.minimum(a1, a2), np.maximum(a1, a2)\n b1, b2 = np.minimum(b1, b2), np.maximum(b1, b2)\n return relu(a2 - a1 - relu(b1 - a1) - relu(a2 - b2))\n\n\n# 根据原box和目标box,得到回归标签\ndef bbox_transform(ex_rois, gt_rois):\n # ex_rois -> r1, c1, r2, c2\n # gt_rois -> r1, c1, r2, c2\n # return -> r, z, h ,w\n\n ex_heights = ex_rois[:, 2] - ex_rois[:, 0] + 1.0\n ex_widths = ex_rois[:, 3] - ex_rois[:, 1] + 1.0\n ex_ctr_r = ex_rois[:, 0] + 0.5 * (ex_heights - 1)\n ex_ctr_c = ex_rois[:, 1] + 0.5 * (ex_widths - 1)\n\n gt_heights = gt_rois[:, 2] - gt_rois[:, 0] + 1.0\n gt_widths = gt_rois[:, 3] - gt_rois[:, 1] + 1.0\n gt_ctr_r = gt_rois[:, 0] + 0.5 * (gt_heights - 1)\n gt_ctr_c = gt_rois[:, 1] + 0.5 * (gt_widths - 1)\n\n targets_dr = (gt_ctr_r - ex_ctr_r) / ex_heights\n targets_dc = (gt_ctr_c - ex_ctr_c) / ex_widths\n targets_dh = np.log(gt_heights / ex_heights)\n targets_dw = np.log(gt_widths / ex_widths)\n\n targets = np.vstack((targets_dr, targets_dc, targets_dh, targets_dw)).transpose()\n\n return targets\n\n\n# 根据box和回归标签,得到调整后的box\ndef bbox_transform_inv(boxes, deltas):\n # boxes -> r1, c1, r2, c2\n # deltas -> r, z, h, w\n # return -> r1, c1, r2, c2\n\n # if boxes.shape[0] == 0:\n # return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)\n #\n # boxes = boxes.astype(deltas.dtype, copy=False)\n\n heights = boxes[:, 2] - boxes[:, 0] + 1.0\n widths = boxes[:, 3] - boxes[:, 1] + 1.0\n ctr_r = boxes[:, 0] + 0.5 * (heights - 1)\n ctr_c = boxes[:, 1] + 0.5 * (widths - 1)\n\n dr = deltas[:, 0::4] # 虽然只有1个数,但如此得到的是二维数组[num_deltas, 1]\n dc = deltas[:, 1::4]\n dh = deltas[:, 2::4]\n dw = deltas[:, 3::4]\n\n pred_ctr_r = dr * heights[:, np.newaxis] + ctr_r[:, np.newaxis]\n pred_ctr_c = dc * widths[:, np.newaxis] + ctr_c[:, np.newaxis]\n pred_h = np.exp(dh) * heights[:, np.newaxis]\n pred_w = np.exp(dw) * widths[:, np.newaxis]\n\n pred_boxes = np.zeros(boxes.shape, dtype=boxes.dtype)\n # r1\n pred_boxes[:, 0::4] = pred_ctr_r - 0.5 * (pred_h - 1)\n # c1\n pred_boxes[:, 1::4] = pred_ctr_c - 0.5 * (pred_w - 1)\n # r2\n pred_boxes[:, 2::4] = pred_ctr_r + 0.5 * (pred_h - 1)\n # c2\n pred_boxes[:, 3::4] = pred_ctr_c + 0.5 * (pred_w - 1)\n\n return pred_boxes\n\n\ndef get_APs(recalls, precisions, method=None):\n \"\"\"\n 根据PR对来计算得到某一类别的AP值\n :param recalls: [n_iou_thres, n_score_thres]\n :param precisions: [n_iou_thres, n_score_thres]\n :param method: 根据什么方式计算。None默认的是最大precision求面积,其实就是all points方法\n 可选方法:11-points、all-points\n :return:\n \"\"\"\n if type(recalls) in (list, tuple): recalls, precisions = np.array(recalls), np.array(precisions)\n if recalls.ndim == 2: # 表明是多iou_thres\n aps = [_get_AP(recalls[i], precisions[i], method) for i in range(recalls.shape[0])]\n else:\n aps = [_get_AP(recalls, precisions, method)]\n return aps\n\n\ndef _get_AP(recalls, precisions, method):\n \"\"\"\n 根据什么方式计算。None默认的是最大precision求面积,其实就是all points方法\n 可选方法:11-points、all-points\n :param recalls: [n_score_thres]\n :param precisions: [n_iou_thres]\n :param method:\n :return:\n \"\"\"\n ap = temp_r = 0\n if method is None:\n sorted_inds = np.argsort(precisions)[::-1] # 按照precision从大到小排列\n recalls, precisions = recalls[sorted_inds], precisions[sorted_inds]\n while recalls.size > 0:\n r, p = recalls[0], precisions[0]\n ap, temp_r = ap + (r - temp_r) * p, r\n inds = np.where(recalls > r)[0]\n recalls, precisions = recalls[inds], precisions[inds]\n elif method == 'all-points':\n sorted_inds = np.argsort(recalls) # 按照recall从小到大排列\n recalls, precisions = recalls[sorted_inds], precisions[sorted_inds]\n for i, r in enumerate(recalls):\n p = np.max(precisions[recalls >= r])\n ap, temp_r = ap + (r - temp_r) * p, r\n elif method == '11-points':\n for r in np.linspace(0, 1, 11):\n _p = precisions[recalls >= r]\n if _p.size > 0: ap += np.max(_p)\n ap /= 11\n else:\n raise ValueError(f'method:{method} must be in (None, \"all_points\", \"11_points\")')\n return ap\n\n\n# 根据is_tps画出PR曲线\ndef plot_PR_from_is_tps(n_gt, scores, is_tps, score_thres_list=None, iou_thres_list=None):\n \"\"\"\n :param n_gt:\n :param scores: [num_box]\n :param is_tps: [num_iou_thres, num_box]\n :param score_thres_list: 默认None是根据tps中的每个scores进行划分求解\n :param iou_thres_list: \n :return:\n \"\"\"\n if score_thres_list is None:\n score_thres_list = scores\n num_score_thres = len(score_thres_list)\n num_iou_thres = is_tps.shape[1] - 1\n total_p = np.zeros(shape=[num_score_thres])\n cum_tps = np.zeros(shape=[num_iou_thres, num_score_thres])\n\n for ind_score_thres, score_thres in enumerate(score_thres_list):\n ind_p = np.where(scores >= score_thres)[0]\n total_p[ind_score_thres] = ind_p.size\n for ind_iou_thres, iou_thres in enumerate(iou_thres_list):\n cum_tps[ind_iou_thres, ind_score_thres] = np.sum(is_tps[ind_iou_thres, ind_p])\n recalls = cum_tps / n_gt\n precisions = cum_tps / total_p\n\n plot_PR(recalls, precisions, iou_thres_list)\n\n\ndef plot_PR(recalls, precisions, iou_thres_list, APs=None, title='PR-curve'):\n plt.figure(figsize=[10, 10])\n for ind_iou_thres, iou_thres in enumerate(iou_thres_list):\n label = f'iou_thres={iou_thres:.2f}'\n if APs: label += f', AP={APs[ind_iou_thres]:.2%}'\n plt.plot(recalls[ind_iou_thres], precisions[ind_iou_thres], label=label)\n plt.legend()\n plt.xlim([0, 1.1])\n plt.ylim([0, 1.1])\n plt.xlabel('recall')\n plt.ylabel('precision')\n plt.title(title)\n plt.show()\n\n\n# 得到各阈值下所有锚框的is_tps [num_iou_thres, num_box]\ndef get_is_tps(gt_infos, pred_boxes, mAP_iou_thres_list):\n \"\"\"\n\n :param gt_infos:\n :param pred_boxes: 已经按照得分从大到小排序好\n :param mAP_iou_thres_list:\n :return:\n \"\"\"\n n_gt = gt_infos.shape[0]\n n_iou_thres = len(mAP_iou_thres_list)\n n_boxes = pred_boxes.shape[0]\n is_tps = np.zeros(shape=[n_iou_thres, n_boxes])\n if n_gt == 0: return is_tps\n ious = get_ious(pred_boxes, gt_infos)\n for ind_iou, iou_thres in enumerate(mAP_iou_thres_list):\n gt_inds = list(range(n_gt))\n for ind_box in range(n_boxes):\n iou = ious[ind_box, gt_inds]\n max_iou, argmax = np.max(iou), int(np.argmax(iou))\n if max_iou >= iou_thres:\n gt_inds.pop(argmax)\n is_tps[ind_iou, ind_box] = 1\n if not gt_inds: break\n\n return is_tps\n\n\nif __name__ == '__main__':\n tps = [1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]\n n_p = len(tps)\n tps = np.array(tps)\n\n n_gt = 15\n total_P = np.arange(1, n_p + 1)\n TP = np.array([np.sum(tps[:i + 1]) for i in range(n_p)])\n recalls = TP / n_gt\n precisions = TP / total_P\n\n for method in (None, '11-points', 'all-points'):\n aps = get_APs(recalls, precisions, method=method)\n print(f'{method}: {aps[0]:.2%}')\n","sub_path":"ssd/dl16_CF_ssd_utils.py","file_name":"dl16_CF_ssd_utils.py","file_ext":"py","file_size_in_byte":17484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"628390640","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport time\ndef norm(res):\n ret=0\n for i in range(len(res)):\n ret+=res[i]*res[i]\n ret= np.sqrt(ret)\n return ret\n\n\ndef Jacobi(A,b):\n x=[]\n minres=10**-9\n normres=1\n k=0\n xOld=[]\n start=time.perf_counter()\n for i in range(len(A)):\n xOld.append(1)\n while normres>minres:\n x=[]\n for i in range(len(A)):\n sum=0\n for j in range(i):\n sum+=A[i][j]*xOld[j]\n for j in range(i+1, len(A)):\n sum += A[i][j]*xOld[j]\n x.append((b[i]-sum)/A[i][i])\n res=np.subtract(np.matmul(A,x),b)\n xOld=x\n normres=norm(res)\n if normres>10**20:\n print(\"algorytm jacobiego sie nie zbiega\")\n return 0\n k+=1\n print(\"jacobi czas\")\n print(time.perf_counter()-start)\n print(\"jacobi liczba iteracji\")\n print(k)\n return time.perf_counter()-start\n\n\ndef GausSiedel(A,b):\n x=[]\n minres=10**-9\n normres=1\n k=0\n xOld=[]\n start=time.perf_counter()\n for i in range(len(A)):\n xOld.append(1)\n while normres>minres:\n x=[]\n for i in range(len(A)):\n sum=0\n for j in range(i):\n sum+=A[i][j]*x[j]\n for j in range(i+1, len(A)):\n sum += A[i][j]*xOld[j]\n x.append((b[i]-sum)/A[i][i])\n res=np.subtract(np.matmul(A,x),b)\n xOld=x\n normres=norm(res)\n if normres > 10 ** 20:\n print(\"algorytm gausa-siedla sie nie zbiega\")\n return 0\n k+=1\n print(\"gaus siedel czas\")\n print(time.perf_counter()-start)\n print(\"gaus siedel liczba iteracji\")\n print(k)\n return time.perf_counter()-start\n\n\ndef LU(A,b):\n U = [row[:] for row in A]\n L=[[0 for i in range(len(A))] for j in range(len(A))]\n for i in range(len(A)):\n L[i][i]=1\n start = time.perf_counter()\n for k in range(len(A)):\n for j in range(k+1,len(A)):\n L[j][k]=U[j][k]/U[k][k]\n for l in range(k,len(A)):\n U[j][l]=U[j][l]-L[j][k]*U[k][l]\n y=[]\n for i in range(0,len(A)):\n sum=0\n for j in range(i):\n sum+=L[i][j]*y[j]\n y.append((b[i]-sum)/L[i][i])\n x = [0 for i in range(len(A))]\n\n x[len(A)-1]=y[len(A)-1]/U[len(A)-1][len(A)-1]\n for i in range(len(A)-1,-1,-1):\n sum = 0\n for j in range(i+1,len(A)):\n sum += U[i][j] * x[j]\n x[i]=(y[i]-sum)/U[i][i]\n res = np.subtract(np.matmul(A, x), b)\n normres = norm(res)\n print(\"LU norma z residum\")\n print(normres)\n print(\"LU czas\")\n print(time.perf_counter()-start)\n return time.perf_counter()-start\n\ndef ZadAB():\n N=927\n f=9\n a1=14\n a2=-1\n A=[[0 for i in range(N)] for j in range(N)]\n for i in range(N):\n for j in range(N):\n if i == j:\n A[i][j]=a1\n if i== j+1 or i==j-1 or i== j+2 or i==j-2:\n A[i][j]=a2\n b=[]\n for i in range(N):\n b.append(np.sin(i * (f + 1)))\n Jacobi(A,b)\n GausSiedel(A,b)\n\n\ndef ZadCD():\n N = 927\n f = 9\n a1 = 3\n a2 = -1\n A = [[0 for i in range(N)] for j in range(N)]\n for i in range(N):\n for j in range(N):\n if i == j:\n A[i][j] = a1\n if i == j + 1 or i == j - 1 or i == j + 2 or i == j - 2:\n A[i][j] = a2\n b = []\n for i in range(N):\n b.append(np.sin(i * (f + 1)))\n Jacobi(A, b)\n GausSiedel(A, b)\n LU(A, b)\n\ndef ZadE():\n N=[100,500,1000,2000,3000]\n f=9\n a1=14\n a2=-1\n LUTime=[]\n JacobiTime=[]\n GausSiedelTime=[]\n for k in range(5):\n A=[[0 for i in range(N[k])] for j in range(N[k])]\n\n for i in range(N[k]):\n for j in range(N[k]):\n if i == j:\n A[i][j]=a1\n if i== j+1 or i==j-1 or i== j+2 or i==j-2:\n A[i][j]=a2\n b=[]\n for i in range(N[k]):\n b.append(np.sin(i * (f + 1)))\n JacobiTime.append(Jacobi(A,b))\n GausSiedelTime.append(GausSiedel(A,b))\n LUTime.append(LU(A,b))\n plt.plot(N,JacobiTime)\n plt.plot(N,GausSiedelTime)\n plt.plot(N,LUTime)\n plt.legend([\"Jacobi\", \"Gaus-Siedel\",\"LU\"])\n plt.xlabel(\"Size of a matrix\")\n plt.ylabel(\"Time[s]\")\n plt.show()\n\nZadAB()\nZadCD()\nZadE()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"350806913","text":"\"\"\"empty message\n\nRevision ID: 4932e35e1c1e\nRevises: 995ebda2a647\nCreate Date: 2016-06-01 14:17:55.848000\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4932e35e1c1e'\ndown_revision = '995ebda2a647'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('households',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('sender_id', sa.Integer(), nullable=True),\n sa.Column('accepter_id', sa.Integer(), nullable=True),\n sa.Column('money', sa.Integer(), nullable=True),\n sa.Column('sendtime', sa.DateTime(), nullable=True),\n sa.Column('datetime', sa.DateTime(), nullable=True),\n sa.Column('completetime', sa.DateTime(), nullable=True),\n sa.Column('infomation', sa.String(length=32), nullable=True),\n sa.ForeignKeyConstraint(['accepter_id'], ['users.id'], ),\n sa.ForeignKeyConstraint(['sender_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('households')\n ### end Alembic commands ###\n","sub_path":"migrations/versions/4932e35e1c1e_.py","file_name":"4932e35e1c1e_.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"264621094","text":"\"\"\"\n- 该文件中的代码 processImage 实现了处理一张图片的功能 中间的处理结果会被写到对应的目录中\n\"\"\"\nimport cv2\nimport numpy as np\nimport sys\nimport os\nimport tqdm\nfrom functools import reduce\nsys.path.append('/home/szh-920/workspace')\n\nfrom master_graduate.logging import ColorLogging\n\ndef pathCheck(inputFile=None, inputPath=None, outputFile=None, outputPath=None):\n \"\"\"\n 检查输入文件是否存在 输出路径是否存在\n :return:\n \"\"\"\n #输入文件要求存在而且不能是文件夹\n if inputFile is not None:\n if isinstance(inputFile, str):\n assert os.path.exists(inputFile) and os.path.isfile(inputFile), ColorLogging.colorStr(\"invalid input file or input not exists\",\"red\")\n elif isinstance(inputFile, list):\n assert all([os.path.exists(i) and os.path.isfile(i) for i in inputFile]), ColorLogging.colorStr(\"invalid input file or input not exists\",\"red\")\n elif isinstance(inputFile, dict):\n assert all([os.path.exists(i) and os.path.isfile(i) for i in inputFile.values()]), ColorLogging.colorStr(\"invalid input file or input not exists\",\"red\")\n\n #输入路径要求存在且是文件夹\n if inputPath is not None:\n if isinstance(inputPath, str):\n assert os.path.exists(inputPath) and os.path.isdir(inputPath), ColorLogging.colorStr(\"invalid input path or input not exists\", \"red\")\n elif isinstance(inputPath, list):\n assert all([os.path.exists(i) and os.path.isdir(i) for i in inputPath]), ColorLogging.colorStr(\"invalid input path or input not exists\", \"red\")\n elif isinstance(inputPath, dict):\n assert all([os.path.exists(i) and os.path.isdir(i) for i in inputPath.values()]), ColorLogging.colorStr(\"invalid path or input not exists\", \"red\")\n\n if outputFile is not None:\n if isinstance(outputFile, str):\n outputFile = os.path.dirname(outputFile)\n assert os.path.exists(outputFile) and os.path.isdir(outputFile), ColorLogging.colorStr(\"invalid output path or output not exists\", \"red\")\n elif isinstance(outputFile, list):\n outputFile = [os.path.dirname(i) for i in outputFile]\n assert all([os.path.exists(i) and os.path.isdir(i) for i in outputFile]), ColorLogging.colorStr(\"invalid output path or output not exists\", \"red\")\n elif isinstance(outputFile, dict):\n outputFile = [os.path.dirname(i) for i in outputFile.values()]\n assert all([os.path.exists(i) and os.path.isdir(i) for i in outputFile]), ColorLogging.colorStr(\"invalid path or output not exists\", \"red\")\n\n #输入路径要求存在且是文件夹\n if outputPath is not None:\n if isinstance(outputPath, str):\n assert os.path.exists(outputPath) and os.path.isdir(outputPath), ColorLogging.colorStr(\"invalid output path or output not exists\", \"red\")\n elif isinstance(outputPath, list):\n assert all([os.path.exists(i) and os.path.isdir(i) for i in outputPath]), ColorLogging.colorStr(\"invalid output path or output not exists\", \"red\")\n elif isinstance(outputPath, dict):\n assert all([os.path.exists(i) and os.path.isdir(i) for i in outputPath.values()]), ColorLogging.colorStr(\"invalid path or output not exists\", \"red\")\n\ndef getMinRectangel(con):\n assert all([len(i.shape) == 2 for i in con]), ColorLogging.colorStr(\"invalid countors\", \"blue\")\n\n con = np.concatenate(con, axis=0)\n\n x_max = np.max(con[:,0])\n x_min = np.min(con[:,0])\n y_max = np.max(con[:,1])\n y_min = np.min(con[:,1])\n return np.array([[x_min, y_max], [x_min, y_min], [x_max, y_min], [x_max, y_max],])\n\ndef processImage(imgFile, outputPaths):\n \"\"\"\n :param imgFile: 单个的图片文件路径\n :param outputPaths: 输出路径组 分别将图片输出到不同的路径\n :param logOpen: 是否打开日志输出 默认关闭\n :return:\n \"\"\"\n # 二值化门限设定\n BINARY_THRESOLD = 100 # 二值化门限\n AREA_THRESOLD = 100 # 面积过滤门限\n LEFT_SCALE = 0.7 # 比例过滤下限\n RIGHT_SCALE = 1.3 # 比例过滤上限\n DILATE_ITERATION = 1 # 膨胀操作迭代数\n ERODE_ITERATION = 1 # 腐蚀操作迭代数\n BLUR_KERNAL = (3, 3)\n\n # HSV阈值参数设定\n # HSV值域 H[0,180] s[0, 255] v[0, 255]\n # blue H[97.5, 117.5] S[64, 255] V[38, 255]\n # red H[0, 10]U[170, 180] S[26, 255] V[38, 255]\n # yellow H[12, 32] S[69, 255] V[38, 255]\n\n blue_lower = np.array([97, 50, 38])\n blue_upper = np.array([120, 255, 255])\n\n #fake red0\n #red_lower0 = np.array([97, 50, 38])\n #red_upper0 = np.array([120, 255, 255])\n # fake red1\n #red_lower1 = np.array([97, 50, 38])\n #red_upper1 = np.array([120, 255, 255])\n\n red_lower0 = np.array([0, 26, 38])\n red_upper0 = np.array([3, 255, 255])\n #\n red_lower1 = np.array([175, 26, 38])\n red_upper1 = np.array([179, 255, 255])\n\n yellow_lower = np.array([12, 69, 38])\n yellow_upper = np.array([32, 255, 255])\n\n # 预先检查各类路径是否合理\n imgId = os.path.basename(imgFile).split(\".\")[0]\n\n #加载原图\n img = cv2.imread(imgFile)\n #cv2.imwrite('./imgs/00_img.jpg',img)\n\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n cv2.imwrite(\"{0}/{1}.jpg\".format(outputPaths['01_hsv'].rstrip('/'), imgId), hsv)\n\n # 提取蓝色区域\n mask_blue = cv2.inRange(hsv, blue_lower, blue_upper)\n #cv2.imwrite(\"{0}/{1}.jpg\".format(outputPaths['02_0_blue_mask'].rstrip('/'), imgId), mask_blue)\n # 提取红色区域\n mask_red0 = cv2.inRange(hsv, red_lower0, red_upper0)\n mask_red1 = cv2.inRange(hsv, red_lower1, red_upper1)\n func_or = np.frompyfunc(lambda x, y: 255 if x > 0 or y > 0 else 0, 2, 1)\n\n mask_red = func_or(mask_red0, mask_red1).astype(np.uint8)\n cv2.imwrite(\"{0}/{1}.jpg\".format(outputPaths['02_1_red_mask'].rstrip('/'), imgId), mask_red)\n # 提取黄色区域\n mask_yellow = cv2.inRange(hsv,yellow_lower, yellow_upper)\n #cv2.imwrite(\"{0}/{1}.jpg\".format(outputPaths['02_2_yellow_mask'].rstrip('/'), imgId), mask_yellow)\n # 将所有过滤出来的区域求和 然后统一处理\n # 求或函数 如果任意一值 大于0 那么最后值为255\n #mask_added = np.minimum(reduce(func_or, [mask_blue, mask_yellow, mask_red,]), 255)\n mask_added = np.minimum(reduce(func_or, [mask_red,]), 255)\n mask_added = mask_added.astype(np.uint8)\n\n #模糊\n blurred=cv2.blur(mask_added, BLUR_KERNAL)\n #cv2.imwrite(\"{0}/{1}.jpg\".format(outputPaths['03_blur'].rstrip('/'), imgId), blurred)\n #二值化\n ret,binary=cv2.threshold(blurred, BINARY_THRESOLD, 255, cv2.THRESH_BINARY)\n cv2.imwrite(\"{0}/{1}.jpg\".format(outputPaths['04_binary'].rstrip('/'), imgId), binary)\n\n # 使区域闭合无空隙\n # 创建一个闭合空间的算子\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21, 7))\n #kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))\n\n #closed = binary\n closed = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel)\n cv2.imwrite(\"{0}/{1}.jpg\".format(outputPaths['05_closed'].rstrip('/'), imgId), closed)\n\n #腐蚀和膨胀\n '''\n 腐蚀操作将会腐蚀图像中白色像素,以此来消除小斑点,\n 而膨胀操作将使剩余的白色像素扩张并重新增长回去。\n '''\n erode=cv2.erode(closed,None,iterations=ERODE_ITERATION)\n cv2.imwrite(\"{0}/{1}.jpg\".format(outputPaths['06_erode'].rstrip('/'), imgId), erode)\n dilate=cv2.dilate(erode,None,iterations=DILATE_ITERATION)\n cv2.imwrite(\"{0}/{1}.jpg\".format(outputPaths['07_dilated'].rstrip('/'), imgId), dilate)\n\n # 查找轮廓\n image, contours, hierarchy=cv2.findContours(dilate.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\n res = img.copy()\n for conIdx, con in enumerate(contours):\n #轮廓转换为矩形 返回的对象是(中心点, 长宽, 旋转角)\n rect=cv2.minAreaRect(con)\n #矩形转换为box\n # box是裁剪出来的矩形的四个定点的坐标 使用np.int0 取整数\n #box_0 = np.uint64(cv2.boxPoints(rect))\n\n box = np.uint64(getMinRectangel(con))\n\n #计算矩形的行列的边界\n\n height = box[0][1] - box[1][1]\n width = box[3][0] - box[0][0]\n\n #if abs(h1 - h2) / abs(l1 - l2)\n #在原图画出目标区域\n #cv2.drawContours 参数 (目标图像, 轮廓点集组)\n if width > 0 and height > 0 and LEFT_SCALE < float(height) / width < RIGHT_SCALE and height * width > AREA_THRESOLD:\n cv2.drawContours(res,[box],-1,(0,0,255),1)\n #cv2.drawContours(res, con, -1, (0, 255, 0), 2)\n\n #显示画了标志的原图\n cv2.imwrite(\"{0}/{1}.jpg\".format(outputPaths['08_signed_img'].rstrip('/'), imgId) ,res)\n\ndef processImages(srcImagePath, outputBasePath, subsample=None):\n \"\"\"\n 这个函数中的 路径组\n :param subsample 如果是一个整数 N 那么表示均匀按照顺序抽样 1/N\n :param srcImagePath:\n :param outputBasePaths: 中间结果帧的输出目录\n :return:\n \"\"\"\n assert not os.path.exists(outputBasePath), ColorLogging.colorStr(\"processed output base path {0} alreay exists\".format(outputBasePath), \"red\")\n os.makedirs(outputBasePath)\n pathCheck(inputPath=srcImagePath, outputPath=outputBasePath)\n \n videoFramePaths = {\n \"01_hsv\": \"{0}/01_hsv\".format(outputBasePath),\n #\"02_0_blue_mask\": \"{0}/02_0_blue_mask\".format(outputBasePath),\n \"02_1_red_mask\": \"{0}/02_1_red_mask\".format(outputBasePath),\n #\"02_2_yellow_mask\": \"{0}/02_2_yellow_mask\".format(outputBasePath),\n #\"03_blur\": \"{0}/03_blur\".format(outputBasePath),\n \"04_binary\": \"{0}/04_binary\".format(outputBasePath),\n \"05_closed\": \"{0}/05_closed\".format(outputBasePath),\n \"06_erode\": \"{0}/06_erode\".format(outputBasePath),\n \"07_dilated\": \"{0}/07_dilated\".format(outputBasePath),\n \"08_signed_img\": \"{0}/08_signed_img\".format(outputBasePath),\n #\"09_sign\": \"{0}/09_sign\".format(outputBasePath),\n }\n\n for dir in videoFramePaths.values():\n ColorLogging.info(\"create folder {0}\".format(dir))\n os.makedirs(dir)\n\n idx = 0\n for img in tqdm.tqdm(sorted(os.listdir(srcImagePath))):\n idx += 1\n if subsample is not None and (idx - 1) % subsample != 0:\n continue\n processImage(\"{0}/{1}\".format(srcImagePath.rstrip('/'), img), videoFramePaths)\n\n\n\nif __name__ == \"__main__\":\n dataPath = \"/home/szh-920/workspace/master_graduate/data\"\n videoFrameBase = \"{0}/test_one_img\".format(dataPath)\n videoFramePaths = {\n \"01_hsv\": \"{0}/01_hsv\".format(videoFrameBase),\n \"02_0_blue_mask\": \"{0}/02_0_blue_mask\".format(videoFrameBase),\n \"02_1_red_mask\": \"{0}/02_1_red_mask\".format(videoFrameBase),\n \"02_2_yellow_mask\": \"{0}/02_2_yellow_mask\".format(videoFrameBase),\n \"03_blur\": \"{0}/03_blur\".format(videoFrameBase),\n \"04_binary\": \"{0}/04_binary\".format(videoFrameBase),\n \"05_closed\": \"{0}/05_closed\".format(videoFrameBase),\n \"06_erode\": \"{0}/06_erode\".format(videoFrameBase),\n \"07_dilated\": \"{0}/07_dilated\".format(videoFrameBase),\n \"08_signed_img\": \"{0}/08_signed_img\".format(videoFrameBase),\n \"09_sign\": \"{0}/09_sign\".format(videoFrameBase),\n }\n\n if os.path.exists(videoFrameBase):\n os.system(\"rm -rf {0}\".format(videoFrameBase))\n\n for dir in videoFramePaths.values():\n print(dir)\n os.makedirs(dir)\n imgFile = \"/home/szh-920/workspace/master_graduate/src/pre_proc_code/imgs/00_img.jpg\"\n processImage(imgFile, outputPaths=videoFramePaths)","sub_path":"src/pre_proc_code/preprocess_imgs2.py","file_name":"preprocess_imgs2.py","file_ext":"py","file_size_in_byte":11821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"101180474","text":"import sqlite3\n\n# create a new database if the database doesn't already exist \nwith sqlite3.connect(\"test.db\") as connection:\n\n # get a cursor object used to execute SQL commands\n c = connection.cursor()\n\n # create the table\n c.execute(\"\"\"CREATE TABLE items\n (item_id INTEGER primary key autoincrement, task TEXT, priority NUMBER, status INTEGER) \n \"\"\")\n\n\n\n","sub_path":"flask/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"155023819","text":"from __future__ import unicode_literals\n\nfrom datetime import timedelta\nimport time\n\nfrom mayan.apps.common.tests import BaseTestCase\n\nfrom ..literals import STUB_EXPIRATION_INTERVAL\nfrom ..models import (\n DeletedDocument, Document, DocumentType, DuplicatedDocument\n)\n\nfrom .base import GenericDocumentTestCase\nfrom .literals import (\n TEST_DOCUMENT_TYPE_LABEL, TEST_DOCUMENT_PATH, TEST_MULTI_PAGE_TIFF_PATH,\n TEST_PDF_INDIRECT_ROTATE_PATH, TEST_OFFICE_DOCUMENT_PATH,\n TEST_SMALL_DOCUMENT_CHECKSUM, TEST_SMALL_DOCUMENT_FILENAME,\n TEST_SMALL_DOCUMENT_MIMETYPE, TEST_SMALL_DOCUMENT_PATH,\n TEST_SMALL_DOCUMENT_SIZE\n)\nfrom .mixins import DocumentTestMixin\n\n\nclass DocumentTestCase(DocumentTestMixin, BaseTestCase):\n def test_document_creation(self):\n self.assertEqual(self.test_document_type.label, TEST_DOCUMENT_TYPE_LABEL)\n\n self.assertEqual(self.test_document.exists(), True)\n self.assertEqual(self.test_document.size, TEST_SMALL_DOCUMENT_SIZE)\n\n self.assertEqual(\n self.test_document.file_mimetype, TEST_SMALL_DOCUMENT_MIMETYPE\n )\n self.assertEqual(self.test_document.file_mime_encoding, 'binary')\n self.assertEqual(self.test_document.label, TEST_SMALL_DOCUMENT_FILENAME)\n self.assertEqual(\n self.test_document.checksum, TEST_SMALL_DOCUMENT_CHECKSUM\n )\n self.assertEqual(self.test_document.page_count, 1)\n\n def test_version_creation(self):\n with open(TEST_SMALL_DOCUMENT_PATH, mode='rb') as file_object:\n self.test_document.new_version(file_object=file_object)\n\n with open(TEST_SMALL_DOCUMENT_PATH, mode='rb') as file_object:\n self.test_document.new_version(\n file_object=file_object, comment='test comment 1'\n )\n\n self.assertEqual(self.test_document.versions.count(), 3)\n\n def test_restoring_documents(self):\n self.assertEqual(Document.objects.count(), 1)\n\n # Trash the document\n self.test_document.delete()\n self.assertEqual(DeletedDocument.objects.count(), 1)\n self.assertEqual(Document.objects.count(), 0)\n\n # Restore the document\n self.test_document.restore()\n self.assertEqual(DeletedDocument.objects.count(), 0)\n self.assertEqual(Document.objects.count(), 1)\n\n def test_trashing_documents(self):\n self.assertEqual(Document.objects.count(), 1)\n\n # Trash the document\n self.test_document.delete()\n self.assertEqual(DeletedDocument.objects.count(), 1)\n self.assertEqual(Document.objects.count(), 0)\n\n # Delete the document\n self.test_document.delete()\n self.assertEqual(DeletedDocument.objects.count(), 0)\n self.assertEqual(Document.objects.count(), 0)\n\n def test_auto_trashing(self):\n \"\"\"\n Test document type trashing policies. Documents are moved to the\n trash, x amount of time after being uploaded\n \"\"\"\n self.test_document_type.trash_time_period = 1\n # 'seconds' is not a choice via the model, used here for convenience\n self.test_document_type.trash_time_unit = 'seconds'\n self.test_document_type.save()\n\n # Needed by MySQL as milliseconds value is not store in timestamp\n # field\n time.sleep(1.01)\n\n self.assertEqual(Document.objects.count(), 1)\n self.assertEqual(DeletedDocument.objects.count(), 0)\n\n DocumentType.objects.check_trash_periods()\n\n self.assertEqual(Document.objects.count(), 0)\n self.assertEqual(DeletedDocument.objects.count(), 1)\n\n def test_auto_delete(self):\n \"\"\"\n Test document type deletion policies. Documents are deleted from the\n trash, x amount of time after being trashed\n \"\"\"\n self.test_document_type.delete_time_period = 1\n # 'seconds' is not a choice via the model, used here for convenience\n self.test_document_type.delete_time_unit = 'seconds'\n self.test_document_type.save()\n\n self.assertEqual(Document.objects.count(), 1)\n self.assertEqual(DeletedDocument.objects.count(), 0)\n\n self.test_document.delete()\n\n self.assertEqual(Document.objects.count(), 0)\n self.assertEqual(DeletedDocument.objects.count(), 1)\n\n # Needed by MySQL as milliseconds value is not stored in timestamp\n # field\n time.sleep(1.01)\n\n DocumentType.objects.check_delete_periods()\n\n self.assertEqual(Document.objects.count(), 0)\n self.assertEqual(DeletedDocument.objects.count(), 0)\n\n\nclass PDFCompatibilityTestCase(BaseTestCase):\n def test_indirect_rotate(self):\n self.test_document_type = DocumentType.objects.create(\n label=TEST_DOCUMENT_TYPE_LABEL\n )\n\n with open(TEST_PDF_INDIRECT_ROTATE_PATH, mode='rb') as file_object:\n self.test_document = self.test_document_type.new_document(\n file_object=file_object\n )\n\n self.assertQuerysetEqual(\n qs=Document.objects.all(), values=(repr(self.test_document),)\n )\n\n\nclass OfficeDocumentTestCase(BaseTestCase):\n def setUp(self):\n super(OfficeDocumentTestCase, self).setUp()\n\n self.test_document_type = DocumentType.objects.create(\n label=TEST_DOCUMENT_TYPE_LABEL\n )\n\n with open(TEST_OFFICE_DOCUMENT_PATH, mode='rb') as file_object:\n self.test_document = self.test_document_type.new_document(\n file_object=file_object\n )\n\n def tearDown(self):\n self.test_document_type.delete()\n super(OfficeDocumentTestCase, self).tearDown()\n\n def test_document_creation(self):\n self.assertEqual(self.test_document.file_mimetype, 'application/msword')\n self.assertEqual(\n self.test_document.file_mime_encoding, 'binary'\n )\n self.assertEqual(\n self.test_document.checksum,\n '03a7e9071d2c6ae05a6588acd7dff1d890fac2772cf61abd470c9ffa6ef71f03'\n )\n self.assertEqual(self.test_document.page_count, 2)\n\n\nclass MultiPageTiffTestCase(BaseTestCase):\n def setUp(self):\n super(MultiPageTiffTestCase, self).setUp()\n self.test_document_type = DocumentType.objects.create(\n label=TEST_DOCUMENT_TYPE_LABEL\n )\n\n with open(TEST_MULTI_PAGE_TIFF_PATH, mode='rb') as file_object:\n self.test_document = self.test_document_type.new_document(\n file_object=file_object\n )\n\n def tearDown(self):\n self.test_document_type.delete()\n super(MultiPageTiffTestCase, self).tearDown()\n\n def test_document_creation(self):\n self.assertEqual(self.test_document.file_mimetype, 'image/tiff')\n self.assertEqual(self.test_document.file_mime_encoding, 'binary')\n self.assertEqual(\n self.test_document.checksum,\n '40adaa9d658b65c70a7f002dfe084a8354bb77c0dfbf1993e31fb024a285fb1d'\n )\n self.assertEqual(self.test_document.page_count, 2)\n\n\nclass DocumentVersionTestCase(GenericDocumentTestCase):\n def test_add_new_version(self):\n self.assertEqual(self.test_document.versions.count(), 1)\n\n with open(TEST_DOCUMENT_PATH, mode='rb') as file_object:\n self.test_document.new_version(\n file_object=file_object\n )\n\n self.assertEqual(self.test_document.versions.count(), 2)\n\n self.assertEqual(\n self.test_document.checksum,\n 'c637ffab6b8bb026ed3784afdb07663fddc60099853fae2be93890852a69ecf3'\n )\n\n def test_revert_version(self):\n self.assertEqual(self.test_document.versions.count(), 1)\n\n # Needed by MySQL as milliseconds value is not store in timestamp\n # field\n time.sleep(1.01)\n\n with open(TEST_DOCUMENT_PATH, mode='rb') as file_object:\n self.test_document.new_version(\n file_object=file_object\n )\n\n self.assertEqual(self.test_document.versions.count(), 2)\n\n self.test_document.versions.first().revert()\n\n self.assertEqual(self.test_document.versions.count(), 1)\n\n\nclass DocumentManagerTestCase(BaseTestCase):\n def setUp(self):\n super(DocumentManagerTestCase, self).setUp()\n self.test_document_type = DocumentType.objects.create(\n label=TEST_DOCUMENT_TYPE_LABEL\n )\n\n def tearDown(self):\n self.test_document_type.delete()\n super(DocumentManagerTestCase, self).tearDown()\n\n def test_document_stubs_deletion(self):\n document_stub = Document.objects.create(\n document_type=self.test_document_type\n )\n\n Document.passthrough.delete_stubs()\n\n self.assertEqual(Document.passthrough.count(), 1)\n\n document_stub.date_added = document_stub.date_added - timedelta(\n seconds=STUB_EXPIRATION_INTERVAL + 1\n )\n document_stub.save()\n\n Document.passthrough.delete_stubs()\n\n self.assertEqual(Document.passthrough.count(), 0)\n\n\nclass DuplicatedDocumentsTestCase(GenericDocumentTestCase):\n def test_duplicates_after_delete(self):\n self.upload_document()\n self.test_documents[1].delete()\n self.test_documents[1].delete()\n\n self.assertEqual(\n DuplicatedDocument.objects.filter(\n document=self.test_documents[0]\n ).count(), 0\n )\n\n def test_duplicates_after_trash(self):\n self.upload_document()\n self.test_documents[1].delete()\n\n self.assertFalse(\n self.test_documents[1] in DuplicatedDocument.objects.get(\n document=self.test_documents[0]\n ).documents.all()\n )\n\n def test_duplicate_scan(self):\n self.upload_document()\n\n self.assertTrue(\n self.test_documents[1] in DuplicatedDocument.objects.get(\n document=self.test_documents[0]\n ).documents.all()\n )\n","sub_path":"mayan/apps/documents/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":9943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"599343967","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Carousel',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),\n ('nombre', models.CharField(max_length=100)),\n ('imagen', models.ImageField(upload_to='carousel/')),\n ],\n options={\n 'verbose_name_plural': 'Marcas',\n },\n ),\n ]\n","sub_path":"electrica/apps/carousel/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"478644321","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python2.7/site-packages/webapptitude/jsonapi.py\n# Compiled at: 2016-08-31 16:32:16\n\"\"\"JSON API contracts and utilities.\"\"\"\nfrom google.appengine.ext import ndb\nfrom google.appengine.datastore import datastore_query\nfrom google.appengine.api import datastore_errors\nfrom webapp2 import exc as exceptions\nfrom jsonkit import json_encode, json_decode\nfrom gql import query as gql_query_request\nfrom util import RE_NUMERIC_INT, RE_NUMERIC_FLOAT\nimport webapp2, re, logging\n\ndef isregex(val):\n return type(val) is type(RE_NUMERIC_INT)\n\n\nclass ParameterNotFound(exceptions.HTTPBadRequest, BaseException):\n \"\"\"Exception, for when parameters are missing\"\"\"\n detail = 'A required parameter was not present on the request. (%s)'\n\n def __init__(self, param_name):\n detail = self.detail % param_name\n super(ParameterNotFound, self).__init__(detail=detail)\n\n\nclass ParameterMismatch(ParameterNotFound):\n \"\"\"Exception, for when parameters are malformed.\"\"\"\n detail = 'A required parameter did not match its expected format. (%s)'\n\n\nclass JSONRequestHandler(webapp2.RequestHandler):\n \"\"\"RequestHandler abstraction to enforce API contracts.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Prepare the basic JSON interface parameters.\"\"\"\n super(JSONRequestHandler, self).__init__(*args, **kwargs)\n self.content_type = 'application/json'\n\n def dispatch(self):\n \"\"\"Some light-weight constraints to enforce JSON handling.\"\"\"\n if not self.request.content_type == 'application/json':\n if self.request.method in ('PUT', 'POST'):\n raise exceptions.HTTPUnprocessableEntity\n self.response.content_type = 'application/json'\n return super(JSONRequestHandler, self).dispatch()\n\n def fetch_param(self, name, validate=None, optional=False, default=None):\n \"\"\"Extract a value from query or request body, with validation.\"\"\"\n value = self.request.input\n for k in name.split('.'):\n value = value.get(k, None)\n if value is None:\n break\n\n if value is None and not optional:\n raise ParameterNotFound(name)\n elif value is None:\n return default\n if validate in (int, 'int'):\n if not RE_NUMERIC_INT.match(value):\n raise ParameterMismatch(name)\n if validate in (float, 'float'):\n if not RE_NUMERIC_FLOAT.match(value):\n raise ParameterMismatch(name)\n if validate in (dict, 'dict'):\n if not isinstance(value, dict):\n raise ParameterMismatch(name)\n if isinstance(validate, basestring):\n validate = re.compile(validate)\n if isregex(validate):\n if not validate.match(value):\n raise ParameterMismatch(name)\n return value\n\n\ndef assert_authorization(handler, model=None, instance=None):\n \"\"\"Simple wrapper to ensure consistent authorization handling.\"\"\"\n if model is None:\n model = handler.__modelclass__\n if not handler.is_authorized(method=handler.request.method, model=model, instance=instance):\n raise exceptions.HTTPForbidden\n return\n\n\nclass ModelAPI(JSONRequestHandler):\n __modelclass__ = None\n __apiname__ = None\n route_template_item = 'api/data/i/%s/'\n route_template_collection = 'api/data/i/%s/'\n\n @classmethod\n def routes(cls, prefix='/'):\n \"\"\"Construct a URL route (pattern) for this model class.\"\"\"\n clsname = cls.__apiname__ or cls.__modelclass__._get_kind()\n yield webapp2.Route((prefix + cls.route_template_collection) % clsname, handler=cls, name='data/%s/collection' % clsname)\n yield webapp2.Route((prefix + cls.route_template_item) % clsname, handler=cls, name='data/%s/item' % clsname)\n\n @classmethod\n def register_routes(cls, app, prefix='/'):\n \"\"\"Attach this class to a specific application.\"\"\"\n assert isinstance(app, webapp2.WSGIApplication), 'Registration requires a webapp2.WSGIApplication instance'\n for route in cls.routes(prefix=prefix):\n app.router.add(route)\n\n @classmethod\n def model_url(cls, model=None, instance=None):\n \"\"\"Retrieve a URL for a model (instance or key).\"\"\"\n if model is None:\n model = cls.__modelclass__\n if isinstance(instance, ndb.Model):\n name = 'data/%s/item' % instance._get_kind()\n kwargs = {'key': instance.key.urlsafe()}\n elif isinstance(instance, ndb.Key):\n name = 'data/%s/item' % instance.kind()\n kwargs = {'key': instance.urlsafe()}\n else:\n name = 'data/%s/collection' % model._get_kind()\n kwargs = {}\n return webapp2.uri_for(name, **kwargs)\n\n @property\n def model(self):\n return self.__modelclass__\n\n def fetch_model_instance(self, key, required=False):\n \"\"\"Retireve the given key from datastore, with exception if invalid.\"\"\"\n result = None\n if key:\n result = ndb.Key(urlsafe=key)\n result = result.get()\n if result is None and required:\n raise exceptions.HTTPNotFound\n return result\n\n def is_authorized(self, method=None, model=None, instance=None):\n \"\"\"\n Simple request authorization proxy.\n\n Subclasses should override this per model.\n \"\"\"\n return True\n\n def prepare_query(self):\n \"\"\"\n Construct a query from the given query.\n\n This will assemble a GQL query string from a notation similar to that\n of MongoDB.\n \"\"\"\n return gql_query_request(self.request, self.__modelclass__)\n\n def present(self, item):\n \"\"\"Coerce a datastore object to a JSON-compatible representation.\"\"\"\n assert isinstance(item, ndb.Model), 'Require an NDB model for JSON coersion'\n result = item.to_dict()\n result['$key'] = item.key\n result['$url'] = self.model_url(instance=item)\n return result\n\n def retrieve_input(self, model):\n \"\"\"Map a dictionary for properties of the model from the request.\"\"\"\n values = {}\n for k, prop in model._properties.items():\n provided = self.request.input.get(k, None)\n if provided is not None:\n values[k] = provided\n\n return values\n\n def get(self, key=None):\n \"\"\"Fetch a specific instance (or collection) representation.\"\"\"\n assert_authorization(self)\n if key:\n item = self.fetch_model_instance(key, required=True)\n assert_authorization(self, instance=item)\n return self.response.write_json(self.present(item))\n else:\n query = self.prepare_query()\n return self.response.write_json([ self.present(i) for i in query ])\n\n def put(self, key=None):\n \"\"\"Update a specific instance.\"\"\"\n assert_authorization(self)\n item = self.fetch_model_instance(key, required=True)\n props = self.retrieve_input(self.__modelclass__)\n assert_authorization(self, instance=item)\n try:\n item.populate(**props)\n item.put()\n self.response.set_status(202, 'Accepted')\n except ValueError as e:\n message = 'Coersion failure [%s]' % e.message\n raise exceptions.HTTPUnprocessableEntity(message)\n\n def post(self, key=None):\n \"\"\"Create a new instance.\"\"\"\n if key:\n raise exceptions.HTTPBadRequest\n assert_authorization(self)\n props = self.retrieve_input(self.__modelclass__)\n try:\n item = self.__modelclass__(**props)\n key = item.put()\n self.response.set_status(201, 'Created')\n self.response.headers['Location'] = self.model_url(instance=key)\n except ValueError as e:\n message = 'Coersion failure [%s]' % e.message\n raise exceptions.HTTPUnprocessableEntity(message)\n\n def head(self, key=None):\n \"\"\"Retrieve some metadata about the record.\"\"\"\n assert_authorization(self)\n if key:\n item = self.fetch_model_instance(key, required=True)\n assert_authorization(self, instance=item)\n self.response.headers['X-Record-Exists'] = 'true'\n self.response.set_status(200, 'OK')\n else:\n query = self.prepare_query()\n self.response.headers['X-Records-Match'] = query.count()\n self.response.set_status(200, 'OK')\n\n def delete(self, key=None):\n \"\"\"Remove a record from this model.\"\"\"\n assert_authorization(self)\n try:\n item = ndb.Key(urlsafe=key)\n if item is None:\n raise exceptions.HTTPNotFound\n assert_authorization(self, instance=item)\n item.delete()\n self.response.set_status(202, 'Accepted')\n except:\n raise exceptions.HTTPUnprocessableEntity\n\n return\n\n\ndef api(model, name_override=None):\n \"\"\"Construct a request handler class from an ndb.Model class.\"\"\"\n assert issubclass(model, (ndb.Model, ndb.Expando)), 'JSON API requires a model.'\n bases = (\n ModelAPI, JSONRequestHandler)\n props = {'__modelclass__': model, '__apiname__': name_override}\n return type('JSONAPI_' + model.__name__, bases, props)","sub_path":"pycfiles/webapptitude-0.0.10.linux-x86_64.tar/jsonapi.py","file_name":"jsonapi.py","file_ext":"py","file_size_in_byte":9503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"19044098","text":"import sys\nimport urllib.request\nimport re\nimport json\n\nfrom bs4 import BeautifulSoup\n\ncache = {}\nfor line in open(sys.argv[1]):\n fields = line.rstrip('\\n').split('\\t')\n sid = fields[0]\n uid = fields[2]\n\n #url = 'http://twitter.com/%s/status/%s' % (uid, sid)\n #print \"debug: \"+uid+\" \"+sid+\"\\n\" \n\n tweet = None\n text = \"Not Available\"\n if sid in cache.keys():\n text = cache[sid]\n #print \"debug1\"+text+\"\\n\"\n else:\n try:\n # get status page\n f = urllib.request.urlopen(\"http://twitter.com/%s/status/%s\" % (uid, sid))\n # parse with Beautiful soup\n html = f.read().replace(\"\", \"\") + \"\"\n soup = BeautifulSoup(html)\n #small elements contain the status ids\n small = soup.select(\"small > a\")\n #p elements next to small elements have the tweet content\n p = soup.find_all(\"p\", attrs={'class' : \"js-tweet-text\"})\n # search for the tweet with the correct status id.\n for i in range(len(small)):\n #print small[i]\n regex=re.escape(sid)\n if re.search(regex,str(small[i])):\n text= p[i].get_text()\n cache[sid]=text\n break\n except Exception as e:\n print(\"ERROR:\", str(e))\n continue\n text = text.replace('\\n', ' ',)\n text = re.sub(r'\\s+', ' ', text)\n #print json.dumps(tweet, indent=2)\n print( \"\\t\".join(fields + [text]).encode('utf-8'))\n\n","sub_path":"TT-classification/new_download_tweets.py","file_name":"new_download_tweets.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"644130339","text":"__all__ = [\"Pins\", \"Subsignal\", \"DiffPairs\", \"Resource\"]\n\n\nclass Pins:\n def __init__(self, names, dir=\"io\"):\n if not isinstance(names, str):\n raise TypeError(\"Names must be a whitespace-separated string, not {!r}\"\n .format(names))\n self.names = names.split()\n\n if dir not in (\"i\", \"o\", \"io\"):\n raise TypeError(\"Direction must be one of \\\"i\\\", \\\"o\\\" or \\\"io\\\", not {!r}\"\n .format(dir))\n self.dir = dir\n\n def __repr__(self):\n return \"(pins {} {})\".format(\" \".join(self.names), self.dir)\n\n\nclass DiffPairs:\n def __init__(self, p, n, dir=\"io\"):\n self.p = Pins(p, dir=dir)\n self.n = Pins(n, dir=dir)\n\n if len(self.p.names) != len(self.n.names):\n raise TypeError(\"Positive and negative pins must have the same width, but {!r} \"\n \"and {!r} do not\"\n .format(self.p, self.n))\n\n self.dir = dir\n\n def __repr__(self):\n return \"(diffpairs {} {})\".format(self.p, self.n)\n\n\nclass Subsignal:\n def __init__(self, name, *io, extras=()):\n self.name = name\n\n if not io:\n raise TypeError(\"Missing I/O constraints\")\n for c in io:\n if not isinstance(c, (Pins, DiffPairs, Subsignal)):\n raise TypeError(\"I/O constraint must be one of Pins, DiffPairs or Subsignal, \"\n \"not {!r}\"\n .format(c))\n if isinstance(io[0], (Pins, DiffPairs)) and len(io) > 1:\n raise TypeError(\"Pins and DiffPairs cannot be followed by more I/O constraints, but \"\n \"{!r} is followed by {!r}\"\n .format(io[0], io[1]))\n if isinstance(io[0], Subsignal):\n for c in io[1:]:\n if not isinstance(c, Subsignal):\n raise TypeError(\"A Subsignal can only be followed by more Subsignals, but \"\n \"{!r} is followed by {!r}\"\n .format(io[0], c))\n self.io = io\n\n for c in extras:\n if not isinstance(c, str):\n raise TypeError(\"Extra constraint must be a string, not {!r}\".format(c))\n self.extras = list(extras)\n\n if isinstance(self.io[0], Subsignal):\n for sub in self.io:\n sub.extras += self.extras\n\n def __repr__(self):\n return \"(subsignal {} {} {})\".format(self.name,\n \" \".join(map(repr, self.io)),\n \" \".join(self.extras))\n\n\nclass Resource(Subsignal):\n def __init__(self, name, number, *io, extras=()):\n super().__init__(name, *io, extras=extras)\n\n self.number = number\n\n def __repr__(self):\n return \"(resource {} {} {} {})\".format(self.name, self.number,\n \" \".join(map(repr, self.io)),\n \" \".join(self.extras))\n","sub_path":"nmigen/build/dsl.py","file_name":"dsl.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"54470336","text":"# get researcher name and quality as int or float\n# print our researcher name and convert quality\n\nname = input(\"Please enter your name: \")\nquality = float(input(\"Please enter your quality. \\nIt should be a number\\\n between 0 and 100: \"))\n\nif quality < 50:\n print(\"I'm afraid you have Concerning Quality, {}\".format(name))\nelif 50 <= quality < 70:\n print(\"I'm afraid your quality Needs Improvement, {}\".format(name))\nelif 70 <= quality <= 85:\n print(\"Good Quality, {}\".format(name))\nelse:\n print(\"Excellent Quality, {}!\".format(name))","sub_path":"Desktop/coding/udemy_python/helloworld/classroom_4_ex2.py","file_name":"classroom_4_ex2.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"596456564","text":"'''\n------------------------------------------------------------------------\nLast updated 1/29/2015\n\nReturns the wealth for all ages of a certain percentile.\n\nThis py-file calls the following other file(s):\n jason_savings_data/scf2007to2013_wealth_age.csv\n\nThis py-file creates the following other file(s):\n (make sure that an OUTPUT folder exists)\n OUTPUT/Nothing/wealth_data_moments.pkl\n------------------------------------------------------------------------\n'''\n\n'''\n------------------------------------------------------------------------\n Packages\n------------------------------------------------------------------------\n'''\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nimport pickle\n\ndata = pd.read_table(\n \"data/wealth/scf2007to2013_wealth_age.csv\", sep=',', header=0)\ndel data['num_obs']\n# rearrange columns so the median values are after the 10th percentile values\ncols = ['age', 'mean_wealth', 'sd_wealth', 'p10_wealth', 'median_wealth', 'p90_wealth', 'p95_wealth', 'p96_wealth', 'p98_wealth', 'p99_wealth']\ndata = data[cols]\n\np98 = np.array(data['p98_wealth'])\np99 = np.array(data['p99_wealth'])\n\nvar_names = ['p98', 'p99']\ndictionary = {}\nfor key in var_names:\n dictionary[key] = globals()[key]\npickle.dump(dictionary, open(\"OUTPUT/Nothing/wealth_data_moments.pkl\", \"w\"))\n","sub_path":"Python/wealth_data.py","file_name":"wealth_data.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"605394251","text":"#encoding: utf-8\n#!/bin/python2.7\n\nimport numpy as np\n\n\n# PLOTS PARAMETRIZATION\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\n#Direct input \nplt.rcParams['text.latex.preamble']=[r\"\\usepackage{times} \\usepackage{txfonts} \\usepackage[french]{babel} \\RequirePackage[utf8]{inputenc}\"]\n\n#Options\nparams = {'backend': 'PS',\n 'font.family' : 'serif',\n 'text.usetex' : True,\n 'font.size' : 20,\n 'font.weight' : 'bold',\n 'axes.labelsize' : 20,\n #'axes.weight' : 20,\n 'text.fontsize' : 20,\n #'tick.serif' : 'serif',\n #'label.family' : 'serif',\n #'xtick.family': 'serif',\n 'xtick.labelsize' : 20,\n 'ytick.labelsize' : 20,\n #'xtick.weight' : 'bold',\n #'ytick.weight' : 'bold'\n }\n\nplt.rcParams.update(params)\n\n\n# LOAD NUMERICAL RESULT FOR REFERENCE MODEL\ndobsz = np.loadtxt('dobsz.ascii')\n#plt.imshow(dobsz, aspect='auto', extent=[20.,60.,1.,0.], cmap='gray')\n\n\n# REFERENCE MODEL PARAMETERS\nh1 = 5.\nvp1 = 600.\nvs1 = 300.\nvr1 = 290.\nh2 = 5.\nvp2 = 1000.\nvs2 = 500.\nvr2 = 580.\nh3 = 5.\nvp3 = 1500.\nvs3 = 750.\nvr3 = 725.\nnrec = 81\nrec = np.linspace(0,80.,nrec)\n\n# INITIALIZE ARRAYS\ntrp0 = np.zeros((nrec), dtype=np.float32)\ntrp1 = np.zeros((nrec), dtype=np.float32)\ntrp1m = np.zeros((nrec), dtype=np.float32)\ntrp1mm = np.zeros((nrec), dtype=np.float32)\ntrp2 = np.zeros((nrec-2), dtype=np.float32)\ntrp3 = np.zeros((nrec-2), dtype=np.float32)\ntrs0 = np.zeros((nrec), dtype=np.float32)\ntrs1 = np.zeros((nrec), dtype=np.float32)\ntrs1m = np.zeros((nrec), dtype=np.float32)\ntrs1mm = np.zeros((nrec), dtype=np.float32)\ntrs2 = np.zeros((nrec-2), dtype=np.float32)\ntrs3 = np.zeros((nrec-2), dtype=np.float32)\ntrrA = np.zeros((nrec), dtype=np.float32)\ntrrB = np.zeros((nrec), dtype=np.float32)\n\n# FIRST LAYER\nfor irec in range(0, nrec):\n x1 = rec[irec]/2.\n theta = np.arctan(x1/h1)\n d1 = np.sqrt(x1*x1+h1*h1)\n trp0[irec] = (x1*2.)/vp1\n trp1[irec] = (d1*2)/vp1\n trs0[irec] = (x1*2.)/vs1\n trs1[irec] = (d1*2)/vs1\n trrA[irec] = (x1*2.)/vr1\n trrB[irec] = (x1*2.)/vr3\n \n# FIRST LAYER multiple 2\nfor irec in range(0, nrec):\n x1 = rec[irec]/8.\n theta = np.arctan(x1/h1)\n d1 = np.sqrt(x1*x1+h1*h1)\n trp1mm[irec] = (d1*8)/vp1\n trs1mm[irec] = (d1*8)/vs1\n \n# FIRST LAYER multiple 4\nfor irec in range(0, nrec):\n x1 = rec[irec]/4.\n theta = np.arctan(x1/h1)\n d1 = np.sqrt(x1*x1+h1*h1)\n trp1m[irec] = (d1*4)/vp1\n trs1m[irec] = (d1*4)/vs1\n \n# Vp SECOND LAYER\nfor irec in range(0, nrec-2):\n xtot = 0.\n theta = 0.\n while (xtot) < (rec[irec+2]/2.):\n theta1 = theta*np.pi/180.\n theta2 = np.arcsin(vp2/vp1*np.sin(theta1))\n x1 = h1*np.tan(theta1)\n x2 = h2*np.tan(theta2)\n xtot = x1+x2\n theta += 0.01 \n d1 = np.sqrt(x1*x1+h1*h1)\n d2 = np.sqrt(x2*x2+h2*h2)\n trp2[irec] = d1*2./vp1+d2*2./vp2\n\n# VS SECOND LAYER\nfor irec in range(0, nrec-2):\n xtot = 0.\n theta = 0.\n while (xtot) < (rec[irec+2]/2.):\n theta1 = theta*np.pi/180.\n theta2 = np.arcsin(vs2/vs1*np.sin(theta1))\n x1 = h1*np.tan(theta1)\n x2 = h2*np.tan(theta2)\n xtot = x1+x2\n theta += 0.01 \n d1 = np.sqrt(x1*x1+h1*h1)\n d2 = np.sqrt(x2*x2+h2*h2)\n trs2[irec] = d1*2./vs1+d2*2./vs2\n \n# Vp THIRD LAYER\nfor irec in range(0, nrec-2):\n xtot = 0.\n theta = 0.\n while (xtot) < (rec[irec+2]/2.):\n theta1 = theta*np.pi/180.\n theta2 = np.arcsin(vp2/vp1*np.sin(theta1))\n theta3 = np.arcsin(vp3/vp2*np.sin(theta2))\n x1 = h1*np.tan(theta1)\n x2 = h2*np.tan(theta2)\n x3 = h3*np.tan(theta3)\n xtot = x1+x2+x3\n theta += 0.01 \n d1 = np.sqrt(x1*x1+h1*h1)\n d2 = np.sqrt(x2*x2+h2*h2)\n d3 = np.sqrt(x3*x3+h3*h3)\n trp3[irec] = d1*2./vp1+d2*2./vp2+d3*2./vp3\n\n# Vs THIRD LAYER\nfor irec in range(0, nrec-2):\n xtot = 0.\n theta = 0.\n while (xtot) < (rec[irec+2]/2.):\n theta1 = theta*np.pi/180.\n theta2 = np.arcsin(vs2/vs1*np.sin(theta1))\n theta3 = np.arcsin(vs3/vs2*np.sin(theta2))\n x1 = h1*np.tan(theta1)\n x2 = h2*np.tan(theta2)\n x3 = h3*np.tan(theta3)\n xtot = x1+x2+x3\n theta += 0.01 \n d1 = np.sqrt(x1*x1+h1*h1)\n d2 = np.sqrt(x2*x2+h2*h2)\n d3 = np.sqrt(x3*x3+h3*h3)\n trs3[irec] = d1*2./vs1+d2*2./vs2+d3*2./vs3\n \n\n# FIGURE 1: TRAVEL TIME FOR SIMPLE REFLECTION + DIRECT ARRIVAL\nfont1 = plt.figure(figsize=(5.0,7.5))\nfig1 = font1.add_subplot(1,1,1)\nfig1.set_xlabel(r\"distance (m)\")\nfig1.set_ylabel(r\"temps (s)\")\nfig1.tick_params(axis='x')\nfig1.tick_params(axis='y')\nfig1.xaxis.grid(True, which='major')\nfig1.yaxis.grid(True, which='major')\nfig1.set_ylim(0., 0.25)\nfig1.set_xlim(0., 60.)\nfig1.invert_yaxis()\n\nfig1.plot(rec, trrA, '--', color='black', linewidth=2)\nfig1.plot(rec, trp0, '-', color='red', linewidth=2, label='P directe')\nfig1.plot(rec, trp1, '-', color='red', linewidth=1, label='P refl. 1')\nfig1.plot(rec[2:nrec], trp2, '--', color='red', linewidth=1, label='P refl. 2') \nfig1.plot(rec[2:nrec], trp3, ':', color='red', linewidth=1, label='P refl. 3')\nfig1.plot(rec, trs0, '-', color='green', linewidth=2, label='S directe')\nfig1.plot(rec, trs1, '-', color='green', linewidth=1, label='S refl. 1')\nfig1.plot(rec[2:nrec], trs2, '--', color='green', linewidth=1, label='S refl. 2') \nfig1.plot(rec[2:nrec], trs3, ':', color='green', linewidth=1, label='S refl. 3')\n\nfig1.legend(fontsize=12, loc='lower left', ncol=2)\n\n# FIGURE 2: TRAVEL TIME FOR MULTIPLE REFLECTION + DIRECT ARRIVAL\nfont2 = plt.figure(figsize=(5.0,7.5))\nfig2 = font2.add_subplot(1,1,1)\nfig2.set_xlabel(r\"distance (m)\")\nfig2.set_ylabel(r\"temps (s)\")\nfig2.tick_params(axis='x')\nfig2.tick_params(axis='y')\nfig2.xaxis.grid(True, which='major')\nfig2.yaxis.grid(True, which='major')\nfig2.set_ylim(0., 0.25)\nfig2.set_xlim(0., 60.)\nfig2.invert_yaxis()\n\nfig2.plot(rec, trrA, '--', color='black', linewidth=2)\nfig2.plot(rec, trrB, '--', color='black', linewidth=2)\nfig2.plot(rec, trp0, '-', color='red', linewidth=2, label='P directe')\nfig2.plot(rec, trp1m, '--', color='red', linewidth=1, label='P double refl.')\nfig2.plot(rec, trp1mm, ':', color='red', linewidth=1, label='P quad. refl.')\nfig2.plot(rec, trs0, '-', color='green', linewidth=2, label='S directe')\nfig2.plot(rec, trs1m, '--', color='green', linewidth=1, label='S double refl.')\nfig2.plot(rec, trs1mm, ':', color='green', linewidth=1, label='S quad. refl.')\n\nfig2.legend(fontsize=12, loc='lower left', ncol=2)\n\n\n# FIGURE 3: TRAVEL TIME FOR ALL S + NUMERICAL RESULT\nfont3 = plt.figure(figsize=(5.0,7.5))\nfig3 = font3.add_subplot(1,1,1)\nfig3.set_xlabel(r\"distance (m)\")\nfig3.set_ylabel(r\"temps (s)\")\nfig3.tick_params(axis='x')\nfig3.tick_params(axis='y')\nfig3.xaxis.grid(True, which='major')\nfig3.yaxis.grid(True, which='major')\nfig3.set_ylim(0., 0.5)\nfig3.set_xlim(20., 60.)\nfig3.invert_yaxis()\n\nfig3.imshow(dobsz, aspect='auto', extent=[20.,60.,1.,0.], cmap='gray')\nfig3.plot(rec, trrA+0.1, '--', color='blue', linewidth=2)\nfig3.plot(rec, trs0+0.1, '-', color='green', linewidth=2, label='S directe')\nfig3.plot(rec[2:nrec], trs2+0.1, '-', color='green', linewidth=2, label='S refl. 2') \nfig3.plot(rec[2:nrec], trs3+0.1, '-', color='green', linewidth=2, label='S refl. 3')\nfig3.plot(rec, trs1m+0.1, '-', color='green', linewidth=2, label='S double refl.')\nfig3.plot(rec, trs1mm+0.1, '-', color='green', linewidth=2, label='S quad. refl.')\n\n#fig2.legend(fontsize=12, loc='lower left', ncol=2)\n\nfont1.savefig('simple_reflection.ps')\nfont2.savefig('multiple_reflections.ps')\nfont3.savefig('reflections_numerique.ps')\n#plt.plot(rec, trp0+0.1, '-', color='red', linewidth=2)\n#plt.plot(rec, trp1+0.1, '-', color='red', linewidth=1)\n#plt.plot(rec[2:nrec], trp2+0.1, '--', color='red', linewidth=1)\n#plt.plot(rec[2:nrec], trp3+0.1, ':', color='red', linewidth=1)\n#plt.plot(rec, trs0+0.1, '-', color='green', linewidth=2)\n#plt.plot(rec, trs1+0.1, '-', color='green', linewidth=1)\n#plt.plot(rec[2:nrec], trs2+0.1, '--', color='green', linewidth=1)\n#plt.plot(rec[2:nrec], trs3+0.1, ':', color='green', linewidth=1)\n#plt.plot(rec, trrA+0.1, '-', color='black', linewidth=2)\n\n#plt.plot(rec, trp1m+0.1, '-', color='gray', linewidth=1)\n#plt.plot(rec, trs1m+0.1, '-', color='gray', linewidth=1)\n#plt.plot(rec, trp1mm+0.1, '-', color='gray', linewidth=1)\n#plt.plot(rec, trs1mm+0.1, '-', color='gray', linewidth=1)\n\nplt.show()\n","sub_path":"scripts/ray/ray.py","file_name":"ray.py","file_ext":"py","file_size_in_byte":8448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"186552398","text":"\n# Loader name: BOYR_ADJUSTED_INFLOWS\n# Loader author: KEVIN FOLEY\n# Loader created: 2019-01-08\n \n# WRITE LOADER BELOW ---------------------\n\nimport pandas as pd\nimport numpy as np\nimport requests\nfrom datetime import datetime\n\ndef dataLoaderInfo():\n\toptionsDict={\n\t\t\"Param1\":\"\",\n\t\t\"Param1Operator\":\"+\",\n\t\t\"Param2\":\"\",\n\t\t\"Param2Operator\":\"+\",\n\t\t\"Param3\":\"\",\n\t\t\"Param3Operator\":\"+\"}\n\tdescription = \"Downloads GP hydromet data as (operator1)param1 + (operator2)param2 + (operator3)param3, where operator is either + or -\"\n\treturn optionsDict, description\n\ndef dataLoader(stationDict, startDate, endDate):\n\tsyear = datetime.strftime(startDate, '%Y')\n\tsmonth = datetime.strftime(startDate, '%m')\n\tsday = datetime.strftime(startDate, '%d')\n\teyear = datetime.strftime(endDate, '%Y')\n\temonth = datetime.strftime(endDate, '%m')\n\teday = datetime.strftime(endDate, '%d')\n\n\tparams = []\n\tops = []\n\tfor key in stationDict.keys():\n\t\tif 'Parameter' in key:\n\t\t\tcontinue\n\t\tif 'Param' in key:\n\t\t\tif 'Operator' in key:\n\t\t\t\tcontinue\n\t\t\tif stationDict[key] != '':\n\t\t\t\tparams.append(stationDict[key])\n\t\t\t\tops.append(stationDict[key+'Operator'])\n\n\tdf = pd.DataFrame(index = pd.date_range(startDate, endDate))\n\n\tfor i, param in enumerate(params):\n\t\tstationID = param.split(' ')[0]\n\t\tpcode = param.split(' ')[1]\n\t\turl = (\"https://www.usbr.gov/gp-bin/arcread.pl?st={0}&by={1}&bm={2}&bd={3}&ey={4}&em={5}&ed={6}&pa={7}&json=1\")\n\t\turl = url.format(stationID, syear, smonth, sday, eyear, emonth, eday, pcode)\n\t\tresponse = requests.get(url)\n\t\tdata = response.json()\n\t\tdataValues = data['SITE']['DATA']\n\t\tdf1 = pd.DataFrame(dataValues, index = pd.date_range(startDate, endDate))\n\t\tdel df1['DATE']\n\t\tdf1[pcode.upper()] = pd.to_numeric(df1[pcode.upper()])\n\t\tdf1.replace(to_replace=998877, value=np.nan, inplace=True)\n\t\tdf1.replace(to_replace=998877.0, value=np.nan, inplace=True)\n\t\tdf1 = df1[~df1.index.duplicated(keep='first')]\n\t\tdf1 = df1[~df1.index.isnull()]\n\t\tdf1.columns = [param]\n\t\tif ops[i] == '-':\n\t\t\tdf1[param] = -1*df1[param]\n\t\tdf = pd.concat([df1, df], axis=1)\n\t\tdf = df.round(3)\n\n\tdfOut = df.sum(axis=1)\n\treturn pd.DataFrame(dfOut)","sub_path":"Resources/DataLoaders/Custom/BOYR_ADJUSTED_INFLOWS.py","file_name":"BOYR_ADJUSTED_INFLOWS.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"625237993","text":"import sys\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom auth_window import Ui_auth_window\r\nfrom cabinet_window import Ui_cabinet_window\r\nfrom sqlalchemy import create_engine\r\nfrom sqlalchemy import Table, Column\r\nfrom sqlalchemy import MetaData\r\nfrom sqlalchemy.ext.declarative import declarative_base\r\nfrom sqlalchemy.orm import sessionmaker\r\nfrom sqlalchemy import BigInteger, Date, String\r\n\r\ndbs ='postgresql+psycopg2://postgres:dukenukemistotallycool78@localhost:5433/mydatabase' \r\n\r\ndb = create_engine(dbs)\r\n\r\nmetadata = MetaData()\r\n\r\nbase = declarative_base()\r\n\r\nclass User(base):\r\n\t__tablename__ = 'mytable'\r\n\r\n\tid_user = Column(BigInteger, primary_key=True)\r\n\tlogin = Column(String)\r\n\tpassword = Column(String)\r\n\tdateofbirth = Column(Date)\r\n\tgrp = Column(String)\r\n\t\r\n\r\nSession = sessionmaker(bind=db)\r\nsession = Session()\r\n\r\nbase.metadata.create_all(db)\r\n\r\nclass mainWindow(QtWidgets.QMainWindow):\r\n\tdef __init__(self,parent=None):\r\n\t\tsuper(mainWindow, self).__init__()\r\n\t\tself.ui = Ui_auth_window()\r\n\t\tself.ui.setupUi(self) #инициализация дизайна\r\n\t\tself.ui.enterBut.clicked.connect(self.createEngine)\r\n\t\t\r\n\r\n\tdef createEngine(self):\r\n\t\tglobal l\r\n\t\tglobal p \r\n\t\tglobal engine\r\n\t\tglobal query\r\n\t\tl = self.ui.lLine.text()\r\n\t\tp = self.ui.pLine.text()\r\n\r\n\t\tquery = session.query(User).filter_by(login=l)\r\n\r\n\t\tif query.first()==None:\r\n\t\t\tusr = User(login=l, password=p)\r\n\t\t\tsession.add(usr)\r\n\t\t\tsession.commit()\r\n\r\n\t\tself.hide()\r\n\t\tself.ui=cabinet()\r\n\t\tself.ui.show()\r\n\r\nclass cabinet(QtWidgets.QMainWindow):\r\n\tdef __init__(self,parent=None):\r\n\t\t#обеспечение доступа к переменным и методам из дизайна\r\n\t\tsuper(cabinet, self).__init__()\r\n\t\tself.ui = Ui_cabinet_window()\r\n\t\tself.ui.setupUi(self) #инициализация дизайна\r\n\t\tself.ui.addBut.clicked.connect(self.fillCabinet) #кнопка Добавить\r\n\r\n\t#получение даты рождения и номера группы \r\n\tdef fillCabinet(self):\r\n\t\tglobal dob\r\n\t\tglobal grp\r\n\t\tdob = self.ui.dobLine.text()\r\n\t\tgrp = self.ui.grpLine.text()\r\n\t\tquery.update({User.dateofbirth:dob, User.grp:grp}, synchronize_session = False)\r\n\t\tsession.commit()\r\n\t\tself.ui.exitBut.clicked.connect(self.returnToMain)\r\n\r\n\tdef returnToMain(self):\r\n\t\tself.hide()\r\n\t\tself.ui=mainWindow()\r\n\t\tself.ui.show()\r\n\r\n\t\t\t\t\r\nif __name__ == \"__main__\":\r\n\tapp = QtWidgets.QApplication(sys.argv)\r\n\twin = mainWindow()\r\n\twin.show()\r\n\r\n\tsys.exit(app.exec_())","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"110328751","text":"#!/usr/bin/env python\nimport os\nimport os.path as P\nimport requests\nimport lxml.html\nimport lxml.etree\nimport urllib.parse\nimport codecs\nimport datetime\nimport unidecode\nimport bs4\nimport urllib.request\nimport urllib.parse\nimport urllib.error\n\nDEBUG = 0\n\n\"\"\"The headers to write for each post.\"\"\"\nHEADERS = [\"categories: blog\", \"layout: post\"]\n\n\ndef encode_title(title):\n \"\"\"Jekyll posts are stored as individual files.\n It makes sense to name each file with the title of the post.\n However, Jekyll doesn't seem to handle file names with spaces or\n non-latin characters. It picks them up when building the site, but the\n actual links will be broken. This function encodes the title in such a way\n that it can be used as a filename for Jekyll posts.\"\"\"\n #\n # You probably don't need the line below if your posts are in English\n #\n latin_title = unidecode.unidecode(title)\n encoded_title = urllib.parse.quote_plus(latin_title.replace(\" \", \"-\"))\n return encoded_title\n\n\ndef parse_previous_link(root):\n \"\"\"Parse the link to the chronologically previous blog entry.\"\"\"\n prev_entry_url = None\n links = root.cssselect(\"a.b-controls-prev\")\n if links:\n prev_entry_url = links[0].get(\"href\")\n if DEBUG:\n print(prev_entry_url)\n return prev_entry_url\n\n\ndef parse_title(root):\n \"\"\"Parse the title of a LiveJournal entry.\"\"\"\n title = None\n h1 = root.cssselect('h1.entry-title')\n if h1:\n title = h1[0].text\n if DEBUG:\n print(title)\n assert title\n return title\n\n\ndef parse_timestamp(root):\n \"\"\"Parse the timestamp of a LiveJournal entry.\n Returns a datetime.datetime instance.\"\"\"\n timestamp = None\n published = root.cssselect(\"time.dt-published\")\n if published:\n # 2013-12-13 20:59:00\n timestamp = datetime.datetime.strptime(\n published[0].text_content(), \"%Y-%m-%d %H:%M:%S\")\n if DEBUG:\n print(timestamp)\n assert timestamp\n return timestamp\n\n\ndef parse_entry_text(root):\n \"\"\"Parse the actual entry text of a LiveJournal entry.\n Returns a UTF-8 encoded byte string.\"\"\"\n #\n # Here we only grab the HTML fragment that corresponds to the entry\n # context.\n # Throw everything else away.\n #\n entry_text = None\n article = root.cssselect(\"article.entry-content\")\n if article:\n entry_text = lxml.etree.tostring(\n article[0], pretty_print=True, encoding=\"utf-8\")\n if DEBUG:\n print(entry_text)\n assert entry_text\n return entry_text\n\n\ndef parse_and_remove_tags(root):\n \"\"\"Returns the tags for a LiveJournalEntry.\n As a side effect, destroy the tags element of the entry.\"\"\"\n tags = []\n a = root.cssselect(\"div.ljtags a\")\n if a:\n tags = [aa.text for aa in a]\n ljtags = root.cssselect(\"div.ljtags\")\n if ljtags:\n ljtags[0].getparent().remove(ljtags[0])\n return tags\n\n\nclass Entry:\n \"\"\"Represents a single LiveJournal entry.\n Includes functions for downloading an entry from a known URL.\"\"\"\n\n def __init__(self, title, text, updated, prev_entry_url, tags):\n self.title = title\n self.text = text\n self.updated = updated\n self.prev_entry_url = prev_entry_url\n self.tags = tags\n\n def save_to(self, destination_dir, overwrite=False):\n \"\"\"Save the entry to the specified directory.\n The filename of the entry will be determined from its title and update\n time.\n The entry will contain a Jekyll header with a HTML fragment\n representing the content.\"\"\"\n title = encode_title(self.title)\n opath = P.join(\n destination_dir, \"%s-%s.html\" %\n (self.updated.strftime(\"%Y-%m-%d\"), title))\n #\n # self.text is currently a UTF-8 encoded string, but prettify turns it\n # into a Unicode string.\n #\n pretty_text = bs4.BeautifulSoup(self.text, \"lxml\").prettify()\n lines = [\"---\", \"title: %s\" % self.title] + HEADERS + \\\n [\"tags: \" + \" \".join(self.tags), \"---\", pretty_text]\n #\n # TODO:\n # If the filenames aren't unique enough (e.g. same date, same title),\n # the entries may end up overwriting each other.\n #\n if not overwrite:\n assert not P.isfile(opath)\n with codecs.open(opath, \"w\", \"utf-8\") as fout:\n fout.write(\"\\n\".join(lines))\n\n @staticmethod\n def download(url):\n \"\"\"Download an entry from a URL and parse it.\"\"\"\n if 'format=light' not in url:\n url = '{}{}format=light'.format(url, '&' if '?' in url else '?')\n r = requests.get(url)\n assert r.status_code == 200\n\n root = lxml.html.document_fromstring(r.text)\n title = parse_title(root)\n tags = parse_and_remove_tags(root)\n entry_text = parse_entry_text(root)\n timestamp = parse_timestamp(root)\n prev_entry_url = parse_previous_link(root)\n\n return Entry(title, entry_text, timestamp, prev_entry_url, tags)\n\n\ndef create_parser():\n from optparse import OptionParser\n p = OptionParser(\n \"usage: %prog http://yourusername.livejournal.com/most-recent-entry.html\") # noqa\n p.add_option(\n \"-d\",\n \"--debug\",\n dest=\"debug\",\n type=\"int\",\n default=\"0\",\n help=\"Set debugging level\")\n p.add_option(\n \"\",\n \"--destination\",\n dest=\"destination\",\n type=\"string\",\n default=\"html\",\n help=\"Set destination directory\")\n p.add_option(\n \"-f\",\n \"--force-overwrite\",\n dest=\"overwrite\",\n action=\"store_true\",\n default=False,\n help=\"Overwrite existing files\")\n return p\n\n\ndef main():\n global DEBUG\n p = create_parser()\n options, args = p.parse_args()\n DEBUG = options.debug\n\n if len(args) != 1:\n p.error(\"invalid number of arguments\")\n\n if not P.isdir(options.destination):\n os.mkdir(options.destination)\n\n next_url = args[0]\n\n while next_url is not None:\n print(next_url)\n entry = Entry.download(next_url)\n entry.save_to(options.destination, overwrite=options.overwrite)\n next_url = entry.prev_entry_url\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"365976156","text":"import torch\n\ndef sequence_mask(lengths, max_len=None):\n batch_size = lengths.numel()\n max_len = max_len or lengths.max()\n mask = torch.arange(0, max_len, device=lengths.device)\n mask = mask.type_as(lengths).repeat(batch_size, 1).lt(lengths.unsqueeze(1))\n return mask\n\ndef tile(x, count, dim=0):\n perm = list(range(len(x.size())))\n if dim != 0:\n perm[0], perm[dim] = perm[dim], perm[0]\n x = x.permute(perm).contiguous()\n out_size = list(x.size())\n out_size[0] *= count\n batch = x.size(0)\n x = x.view(batch, -1) \\\n .transpose(0, 1) \\\n .repeat(count, 1) \\\n .transpose(0, 1) \\\n .contiguous() \\\n .view(*out_size)\n if dim != 0:\n x = x.permute(perm).contiguous()\n return x\n","sub_path":"nslt/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"646357263","text":"# hangman\n\nimport random\n\nname=input(\"Please enter your name: \")\nprint(\"Welcome to hangman\", name)\n\ndef game():\n from GUI import hangman\n restart=input(\"Do you want to start the game y/n? \")\n count=0\n total=0\n\n while restart==\"y\":\n list1=open(\"Dictionary.txt\",\"r\")\n word1=list1.readlines() \n word=random.choice(word1)\n word=word.strip(\"\\n\")\n total=total+1\n wordlen=len(word)\n print(\"You will have 8 guesses for each word\")\n rem_guess=8\n again=[]\n\n print(\"The word has\",wordlen,\"letter\")\n blank= \"-\"*len(word)\n blank=list(blank)\n while rem_guess>0:\n if not \"-\" in blank:\n print(\"Congrats!!! you have guessed correctly\")\n print()\n count=count+1\n break\n guess=input(\"Enter any alphabet from a to z: \")\n print()\n\n if guess in again:\n print(\"You have guessed this letter before: \")\n else:\n again.append(guess)\n if guess in word:\n print(\"Right guess\")\n else:\n print(\"Wrong guess\")\n print(blank)\n rem_guess=rem_guess-1\n print(\"you have only\"+\" \" + str(rem_guess)+\" \" + \"guesses remaining\")\n hangman(rem_guess)\n if rem_guess==0:\n print(\"The word was \", word)\n print()\n for i in range(len(word)):\n \n if word[i]==guess:\n blank[i]=guess\n print(blank)\n print()\n print (\"you have\"+\" \" +str(rem_guess)+\" \"+ \"guesses remaining\")\n print()\n\n\n restart=input(\"Do you want to play the game again? y/n? \")\n if restart==\"n\":\n print(\"Your score is \", count, \"out of\" ,total)\n\n\ngame()\n","sub_path":"Hangmanwithfilehandling2.py","file_name":"Hangmanwithfilehandling2.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"358034296","text":"from sys import maxsize\nimport re\n\n\nclass User:\n def __init__(self, fname=None, lname=None, id=None, address=None,\n home=None, mobile=None, work=None,\n all_phones_from_home_page=None,\n email=None, email2=None, email3=None, all_emails_from_home_page =None):\n\n self.fname = fname\n self.lname = lname\n self.id = id\n self.address = address\n self.home = home\n self.mobile = mobile\n self.work = work\n self.all_phones_from_home_page = all_phones_from_home_page\n self.email = email\n self.email2 = email2\n self.email3= email3\n self.all_emails_from_home_page = all_emails_from_home_page\n\n def __repr__(self):\n return \"%s:%s:%s:%s:%s:%s\" % (self.id, self.lname, self.fname, self.address, self.all_phones_from_home_page, self.all_emails_from_home_page)\n\n def __eq__(self, other):\n return (self.id is None or other.id is None or self.id == other.id) \\\n and self.clear(self.lname) == self.clear(other.lname) \\\n and self.clear(self.fname) == self.clear(other.fname)\\\n and self.clear(self.address) == self.clear(other.address)\\\n and self.clear(self.all_phones_from_home_page) == self.clear(other.all_phones_from_home_page)\\\n and self.clear(self.all_emails_from_home_page) == self.clear(other.all_emails_from_home_page)\n\n def id_or_max(self):\n if self.id:\n return int(self.id)\n else:\n return maxsize\n\n def clear(self, s):\n return re.sub(\"[()\\n ]\",\"\",s)\n","sub_path":"model/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"253923282","text":"from __future__ import absolute_import\nimport os\nfrom collections import namedtuple\nimport time\nfrom torch.nn import functional as F\nfrom model.utils.RPN_tools import AnchorTargetCreator, ProposalTargetCreator\n\nfrom torch import nn\nimport torch \n\nLosses = namedtuple('Losses',\n ['rpn_reg_loss',\n 'rpn_classifier_loss',\n 'head_reg_loss',\n 'head_classifier_loss',\n 'total_loss'\n ])\n\ndef l1loss(x, t, in_weight, sigma):\n sigma2 = sigma ** 2\n diff = in_weight * (x - t)\n abs_diff = diff.abs()\n flag = (abs_diff.data < (1. / sigma2)).float()\n y = (flag * (sigma2 / 2.) * (diff ** 2) +\n (1 - flag) * (abs_diff - 0.5 / sigma2))\n return y.sum()\n\ndef regressor_loss(pred_loc, gt_loc, gt_label, sigma):\n in_weight = torch.zeros(gt_loc.shape).cuda()\n in_weight[(gt_label > 0).view(-1, 1).expand_as(in_weight).cuda()] = 1\n loc_loss = l1loss(pred_loc, gt_loc, in_weight.detach(), sigma)\n loc_loss /= ((gt_label >= 0).sum().float()) \n return loc_loss\n\n\nclass TrainStep(nn.Module):\n \"\"\" wrapper for conveniently training. return losses \"\"\"\n\n def __init__(self, faster_rcnn):\n super(TrainStep, self).__init__()\n\n self.faster_rcnn = faster_rcnn\n self.rpn_sigma = 3.\n self.roi_sigma = 1.\n\n self.anchor_target_creator = AnchorTargetCreator()\n self.proposal_target_creator = ProposalTargetCreator()\n\n self.loc_normalize_mean = faster_rcnn.loc_normalize_mean\n self.loc_normalize_std = faster_rcnn.loc_normalize_std\n\n lr = 1e-3\n params = []\n for key, value in dict(faster_rcnn.named_parameters()).items():\n if value.requires_grad:\n if 'bias' in key:\n params += [{'params': [value], 'lr': lr * 2, 'weight_decay': 0}]\n else:\n params += [{'params': [value], 'lr': lr, 'weight_decay': 0.0005}]\n \n self.optimizer = torch.optim.SGD(params, momentum=0.9)\n\n\n def forward(self, inp_img, bboxes, labels, scale):\n\n self.optimizer.zero_grad()\n\n _, _, H, W = inp_img.shape\n img_size = (H, W)\n\n features = self.faster_rcnn.extractor(inp_img)\n\n rpn_locs, rpn_scores, rois, roi_indices, anchor = self.faster_rcnn.rpn(features, img_size, scale)\n\n # Since batch size is one, convert variables to singular form\n bbox = bboxes[0]\n label = labels[0]\n rpn_score = rpn_scores[0]\n rpn_loc = rpn_locs[0]\n roi = rois\n\n sample_roi, gt_roi_loc, gt_roi_label = self.proposal_target_creator(\n roi,\n bbox.detach().cpu().numpy(),\n label.detach().cpu().numpy())\n\n sample_roi_index = torch.zeros(len(sample_roi))\n roi_cls_loc, roi_score = self.faster_rcnn.head(\n features,\n sample_roi,\n sample_roi_index)\n\n # RPN losses \n gt_rpn_loc, gt_rpn_label = self.anchor_target_creator(\n bbox.detach().cpu().numpy(),\n anchor,\n img_size)\n gt_rpn_label = torch.from_numpy(gt_rpn_label).cuda().long()\n gt_rpn_loc = torch.from_numpy(gt_rpn_loc).cuda()\n rpn_reg_loss = regressor_loss(\n rpn_loc,\n gt_rpn_loc,\n gt_rpn_label.data,\n self.rpn_sigma)\n\n\n rpn_classifier_loss = F.cross_entropy(rpn_score, gt_rpn_label.cuda(), ignore_index=-1)\n _gt_rpn_label = gt_rpn_label[gt_rpn_label > -1]\n _rpn_score = rpn_score.detach().cpu().numpy()[gt_rpn_label.detach().cpu().numpy() > -1]\n\n #Head Losses\n n_sample = roi_cls_loc.shape[0]\n roi_cls_loc = roi_cls_loc.view(n_sample, -1, 4)\n roi_loc = roi_cls_loc[torch.arange(0, n_sample).long().cuda(), \\\n torch.from_numpy(gt_roi_label).cuda().long()]\n gt_roi_label = torch.from_numpy(gt_roi_label).cuda().long()\n gt_roi_loc = torch.from_numpy(gt_roi_loc).cuda()\n\n head_reg_loss = regressor_loss(\n roi_loc.contiguous(),\n gt_roi_loc,\n gt_roi_label.data,\n self.roi_sigma)\n\n head_classifier_loss = nn.CrossEntropyLoss()(roi_score, gt_roi_label.cuda())\n\n losses = [rpn_reg_loss, rpn_classifier_loss, head_reg_loss, head_classifier_loss]\n losses = losses + [sum(losses)]\n all_losses = Losses(*losses)\n all_losses.total_loss.backward()\n self.optimizer.step()\n all_losses = {k: v.item() for k, v in all_losses._asdict().items()}\n return all_losses\n\n\n\n\n\n","sub_path":"trainstep.py","file_name":"trainstep.py","file_ext":"py","file_size_in_byte":4639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"543379855","text":"#Introducao a programacao de computadores\n#Professor: Jucimar Junior\n#Ana Jessye Almeida Antunes- 1615310046\n#Kylciane Cristiny Lopes Freitas - 1615310052\n#Franklin Yuri Gonçalves dos Santos - 1615310033\n\n#20) Um fatorial exponencial é um inteiro positivo N elevado à potência de N-1, que por sua vez é elevado à potência de N-2 e assim em diante. Ou seja, Faça uma função recursiva que receba um número inteiro positivo N e retorne o fatorial exponencial desse número.\n\n\ndef potencia(base,expoente):\n if base==0 and expoente==0:\n return 1\n if expoente==0:\n return 1\n if base==0:\n return 0\n else:\n calc=base*potencia(base,expoente-1)\n return calc\n\ndef fatorial(n):\n if n==1 or n==0:\n return 1\n else:\n return n*fatorial(n-1)\n \ndef fatexponencial(n):\n x=n\n if x==1:\n return potencia(n,x-1)\n else:\n return potencia(n,fatorial(x-1))\n \n \nn=int(input(\"Informe a base: \"))\nfat=fatexponencial(n)\nprint(fat)\n\n","sub_path":"lista6/Equipe5/ipc_lista6.20.py","file_name":"ipc_lista6.20.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"197343584","text":"# Copyright 2020, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.research.analytics.heavy_hitters import heavy_hitters_utils as hh_utils\n\n\nclass HeavyHittersUtilsTest(tf.test.TestCase):\n\n def test_top_k(self):\n signal = ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'd', 'e']\n self.assertEqual(hh_utils.top_k(signal, 1), {'a': 3})\n self.assertEqual(hh_utils.top_k(signal, 2), {'a': 3, 'b': 2, 'c': 2})\n\n def test_precision(self):\n signal = {'a': 3, 'b': 2, 'c': 1, 'd': 0}\n\n ground_truth = {'a': 3, 'b': 2, 'c': 1, 'd': 0}\n self.assertAlmostEqual(hh_utils.precision(ground_truth, signal, 2), 1.0)\n\n ground_truth = {'a': 3, 'c': 2, 'b': 1, 'd': 0}\n self.assertAlmostEqual(hh_utils.precision(ground_truth, signal, 2), 0.5)\n\n ground_truth = {'a': 3, 'c': 2, 'b': 1, 'd': 0}\n self.assertAlmostEqual(hh_utils.precision(ground_truth, signal, 3), 1.0)\n\n ground_truth = {'a': 3, 'd': 2, 'b': 2, 'c': 2}\n self.assertAlmostEqual(hh_utils.precision(ground_truth, signal, 3), 1.0)\n\n def test_recall(self):\n signal = {'a': 3, 'b': 2, 'c': 1, 'd': 0}\n\n ground_truth = {'a': 3, 'b': 2, 'c': 1, 'd': 0}\n self.assertAlmostEqual(hh_utils.recall(ground_truth, signal, 2), 1.0)\n\n ground_truth = {'a': 3, 'c': 2, 'b': 1, 'd': 0}\n self.assertAlmostEqual(hh_utils.recall(ground_truth, signal, 2), 0.5)\n\n ground_truth = {'a': 3, 'c': 2, 'b': 1, 'd': 0}\n self.assertAlmostEqual(hh_utils.recall(ground_truth, signal, 3), 1.0)\n\n ground_truth = {'a': 3, 'd': 2, 'b': 2, 'c': 2}\n self.assertAlmostEqual(hh_utils.recall(ground_truth, signal, 3), 0.75)\n\n def test_f1_score(self):\n signal = {'a': 3, 'b': 2, 'c': 1, 'd': 0}\n\n ground_truth = {'a': 3, 'b': 2, 'c': 1, 'd': 0}\n self.assertAlmostEqual(hh_utils.f1_score(ground_truth, signal, 2), 1.0)\n\n ground_truth = {'a': 3, 'c': 2, 'b': 1, 'd': 0}\n self.assertAlmostEqual(hh_utils.f1_score(ground_truth, signal, 2), 0.5)\n\n ground_truth = {'a': 3, 'c': 2, 'b': 1, 'd': 0}\n self.assertAlmostEqual(hh_utils.f1_score(ground_truth, signal, 3), 1.0)\n\n ground_truth = {'a': 3, 'd': 2, 'b': 2, 'c': 2}\n self.assertAlmostEqual(\n hh_utils.f1_score(ground_truth, signal, 3), 0.85714285)\n\n\nif __name__ == '__main__':\n tf.test.main()\n","sub_path":"tensorflow_federated/python/research/analytics/heavy_hitters/heavy_hitters_utils_test.py","file_name":"heavy_hitters_utils_test.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"85786478","text":"# Author : Bowen Shaner\n# For : Personal Project\n# Begun : 6-18-17\n# Last Edited : \n# Desc : Sorting driver for robot to alphabatize cards with fewest number of stack operations.\n# Two main priorities are minimizing card handling and using few stacks.\n# One way operation (one to many, restack) is preferable for mechanical simplicity.\n\n# Approach: Use the central (input) stack and four bin (reciever) stacks.\n# Since stack contents should be known (else loop through stack), find\n# value quartiles and bin the cards. Sort sufficiently small stacks\n# using O(n*log(n)) stack sorting adapted for number of helper stacks.\n# Every pop() and every append() would be accompanied by machine action.\n\n# This isn't ideal. There should be a way to find the sorting order, by only\n# removing items from stack[a,0] and adding to stack[b,-1] ... that minimizes\n# the number of card moves.\n# Finding this order is not super easy but since the starting stack is known\n# there must be a way to find it\n\nimport numpy as np\nimport json\n\ndef binCards(cardStack, numQ):\n sortCdNames = sorted(cardStack)\n sizeQ = len(sortCdNames)/numQ # Quartile/octile size\n return [sortCdNames[sizeQ*l] for l in range(1,numQ)]\n \ndef splitCards(cardStack, numQ, stkN):\n helpers = [[]] * stkN\n cutoffs = binCards(cardStack, numQ)\n for i in range(0,len(cardStack)-1): #cards come off the bottom and onto the top vvv\n helpN = sorted(cutoffs+[cardStack[i]]).index(cardStack[i]) #Use cutoffs to choose bin\n helpers[helpN].append(cardStack[i]) #place card into bin\n return helpers\n\ndef stackSort(cardStack, stkN):\n if stkN < 2:\n raise ValueError(\"Must have at least two stacks\")\n helpers = [[]] * stkN\n cutoffs = binCards(cardStack, stkN)\n current = cardStack.pop(0)\n while len(cardStack) > 0:\n helpN = sorted(cutoffs+[current]).index(current) #Destination stack\n storeN = (helpN + 1) % stkN #Temporary storage stack\n countShifted = 0\n \n while helpers[helpN][-1] < current: # As on top of stack, Zs on bottom (should be None-tolerant)\n countShifted += 1\n helpers[storeN].append(helpers[helpN].pop(-1)) #Lift off of destination stack to storage stack until current card can be placed\n \n while countShifted != 0:\n while helpers[helpN][-1] >= current and helpers[storeN][-1] < current and helpN == sorted(cutoffs+[current]).index(current):\n helpers[helpN].append(current) # Slip in cards from source stack that fit along the way\n current = cardStack.pop(0)\n \n while helpers[helpN][-1] < current and helpN == sorted(cutoffs+[current]).index(current): # If we need to dig deeper, don't bother putting the rest back on\n countShifted += 1\n helpers[storeN].append(helpers[helpN].pop(-1)) #Lift off of destination stack to storage stack until current card can be placed\n \n helpers[helpN].append(helpers[storeN].pop(-1)) #Put back borrowed cards\n countShifted -= 1\n \n # Sorting would be done in len(cardStack)^2 / (2 * stkN) actions by insertion sort-like worst case\n # It would be closer to len(cardStack)^2 / (4 * stkN) assuming on average it moves half a stack for each insertion\n # However, since the stacks are not completely reassembled until another stack has to be accessed: len(cardStack)^2 / (8 * stkN)\n # This isn't particularly good speedup, but since the cards can be binned from 1000s down to hundreds, allowing cards to only be\n # sorted with near neighbors, we could multiplex (with 4x hardware (no camera recognition needed on slaves) or in time) to get \n # a runtime more like: len(cardStack/muxBins)^2 / (8 * stkN) --- More multiplexing is needed for more cards bc bad scaling\n \n # These card moves would be disproportionately distributed to the first cards added to a stack and to the most A (early alphabet\n # cards in each stack. To minimize number of card moves, wear on cards, and time spent sorting multiplexing in space and time is key.\n # Therefore a re-loading system would be extremely useful to automate multiplexing in time. Binning would linearly reduce time by\n # (Number of Bins) * (Number of Times Binned)\n \n \ndef randCardList(nCards):\n fjson = open('AllData.json')\n refData = json.load(fjson)\n cdNames = refData.keys()\n return np.random.choice(cdNames, nCards)\n ","sub_path":"Sort_Stack.py","file_name":"Sort_Stack.py","file_ext":"py","file_size_in_byte":4587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"71868715","text":"import MeCab\nimport pandas as pd\nfrom torchtext.vocab import build_vocab_from_iterator\nimport torchtext.transforms as T\nfrom torch.utils.data import DataLoader\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport pytorch_lightning as pl\nimport torchmetrics\n\nimport warnings\n\nwarnings.filterwarnings('ignore')\n\npath = 'data/novel/train.csv'\nmecab = MeCab.Tagger('-Owakati')\n\n\ndef tokenize(x):\n return mecab.parse(x).split(' ')[:-1]\n\n\ndf = pd.read_csv(path, header=None, names=['text', 'label'])\n\n# 分かち書きを実行\n# df['text']は、Seriesになる\ndf['text'] = df['text'].apply(tokenize)\n# labelは文字列に変換\ndf['label'] = df['label'].astype(str)\n\n# 辞書作成 も辞書に含める\n# 文字列のみ\n# textの辞書(text_vocab)\ntext_vocab = build_vocab_from_iterator(df['text'], specials=['', ''])\ntext_vocab.set_default_index(text_vocab[''])\n# print(text_vocab.get_stoi())\n# labelの辞書(label_vocab)\nlabel_vocab = build_vocab_from_iterator(df['label'])\n# print(label_vocab.get_stoi())\n\n\n# transform生成\n# テキストは、辞書による変換(数値化)とパディング、Tensor型への変換を行います。パディングは、ミニバッチごとに系列長を統一するため不足部分がパディングされます。\n# ラベルは、辞書による変換(数値化)とTensor型への変換を行います。\ntext_transform = T.Sequential(\n T.VocabTransform(text_vocab),\n T.ToTensor(padding_value=text_vocab[''])\n)\nlabel_transform = T.Sequential(\n T.VocabTransform(label_vocab),\n T.ToTensor()\n)\n\n# ミニバッチ時のデータ変換関数\n# リスト内包表記\n# x = [リストの要素を計算する式 for 計算で使用する変数 in 反復可能オブジェクト]\ndef collate_batch(batch):\n # 2次元の場合、LSTMに入れるため、shape(Batch_size[行], vocabrary[列])を転置する\n texts = text_transform([text for (text, label) in batch]).T\n labels = label_transform([label for (text, label) in batch])\n return texts, labels\n\n\nclass Net(pl.LightningModule):\n\n def __init__(self, n_input, n_embed, n_hidden, n_layers, n_output):\n super().__init__()\n self.embed = nn.Embedding(n_input, n_embed, padding_idx=1)\n # 双方向LSTM : bidirectional=True\n self.lstm = nn.LSTM(n_embed, n_hidden, n_layers, bidirectional=True)\n # 前方向と後ろ方向の最後の隠れ層ベクトルを結合したものを受け取るので、n_hiddenは2倍にしている\n self.fc = nn.Linear(n_hidden * 2 , n_output)\n\n self.train_acc = torchmetrics.Accuracy()\n self.val_acc = torchmetrics.Accuracy()\n self.test_acc = torchmetrics.Accuracy()\n\n def forward(self, x):\n x = self.embed(x)\n # (h, c)はタブルのそれぞれの要素を分けて取得\n x, (h, c) = self.lstm(x)\n # 双方向かつlayersが1の場合、\n # h[0]がforward(前から後ろへ)\n # h[1]がbackward(後ろから前へ)\n h_forward = h[::2, :, :]\n h_backward = h[1::2, :, :]\n bih = torch.cat([h_forward[-1], h_backward[-1]], dim=1)\n x = self.fc(bih)\n return x\n\n def training_step(self, batch, batch_idx):\n x, t = batch\n y = self(x)\n loss = F.cross_entropy(y, t)\n self.log('train_loss', loss, on_step=True, on_epoch=True)\n self.log('train_acc', self.train_acc(y, t), on_step=True, on_epoch=True)\n return loss\n\n def validation_step(self, batch, batch_idx):\n x, t = batch\n y = self(x)\n loss = F.cross_entropy(y, t)\n self.log('val_loss', loss, on_step=False, on_epoch=True)\n self.log('val_acc', self.val_acc(y, t), on_step=False, on_epoch=True)\n return loss\n\n def test_step(self, batch, batch_idx):\n x, t = batch\n y = self(x)\n loss = F.cross_entropy(y, t)\n self.log('test_loss', loss, on_step=False, on_epoch=True)\n self.log('test_acc', self.test_acc(y, t), on_step=False, on_epoch=True)\n return loss\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), lr=0.01)\n\n\ndef delete_df(df, df_dalete):\n df_temp = pd.merge(df, df_dalete, how='left', left_index=True, right_index=True)\n df_temp = df_temp[df_temp[\"text_y\"].isna()][['text_x', 'label_x']]\n df_temp.rename(columns={'text_x': 'text', 'label_x': 'label'}, inplace=True)\n return df_temp\n\n\n# df.valuesをtrainとvalidとtestに分ける\n# train : val : text = 60% : 20% : 20%\ndf_train = df.sample(frac=0.6, random_state=0)\n# print(pd.merge(df, df_train, how='left', left_index=True, right_index=True).head(1))\ndf = delete_df(df, df_train)\ndf_val = df.sample(frac=0.5, random_state=0)\ndf_test = delete_df(df, df_val)\n\nprint(df_train.shape)\nprint(df_val.shape)\nprint(df_test.shape)\n\n\n# バッチサイズ\nbatch_size = 20\n\n# Data Loadkerを用意\ntrain_loader = DataLoader(df_train.values, batch_size, shuffle=True, collate_fn=collate_batch)\nval_loader = DataLoader(df_val.values, batch_size, collate_fn=collate_batch)\ntest_loader = DataLoader(df_test.values, batch_size, collate_fn=collate_batch)\n\n# 詳細設定\nn_input = len(text_vocab)\nn_embed = 100\nn_hidden = 100\nn_layers = 3\n# n_outputは、labelの種類の数を指定\nn_output = 4\n\n# 学習の実行\npl.seed_everything(0)\nnet = Net(n_input, n_embed, n_hidden, n_layers, n_output)\ntrainer = pl.Trainer(max_epochs=3)\ntrainer.fit(net, train_loader, val_loader)\n\n# テストデータに対する検証\nresults = trainer.test(dataloaders=test_loader)\nprint(results)\n","sub_path":"ai/pyTorch/nlp/mecab2_lstm_bidirectional.py","file_name":"mecab2_lstm_bidirectional.py","file_ext":"py","file_size_in_byte":5601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"627827993","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom glob import glob\n\nHM2012 = np.loadtxt('/Users/krogager/coding/popratio_uvb/UVB_HM2012.txt')\n\nz_HM12 = HM2012[0, 1:]\ndata = HM2012[1:, :]\nwl_HM12 = data[:,0]\nspec_HM12 = data[:, 1:].T\n\n# -- plot spectra for different redshifts:\nz_to_plot = [0., 3.]\ncolors = ['k-', 'b--']\n\npath = '/Users/krogager/Downloads/KS_2018_EBL/Fiducial_Q18/'\nall_files = np.sort(glob(path+'*.txt'))\nall_z = [fname.split('_')[-1].strip('.txt').strip() for fname in all_files]\n\nplt.close('all')\nfig = plt.figure()\nax = fig.add_subplot(111)\nfor z, col in zip(z_to_plot, colors):\n idx = np.argmin(np.abs(z_HM12 - z))\n ax.loglog(wl_HM12, spec_HM12[idx], col, label='Haardt & Madau (2012), z=%.1f'%z)\n nu_HM12 = 2.99e18 / wl_HM12\n Jnu_HM12 = spec_HM12[idx]\n UV = (wl_HM12 > 911.76) & (wl_HM12 < 2066.67)\n Iuv_HM12 = np.abs(np.trapz(Jnu_HM12[UV], nu_HM12[UV])) * 4*np.pi\n\n # -- load Khaire & Srianand 2019 for comparison\n fid = all_z.index('%.1f'%z)\n fname = all_files[fid]\n KS19 = np.loadtxt(fname)\n wl_KS19 = KS19[:, 0]\n Jnu = KS19[:, 1]\n ax.loglog(wl_KS19, Jnu, col[0]+':', label='Khaire & Srianand (2019), z=%.1f'%z)\n nu = 2.99e18 / wl_KS19\n UV = (wl_KS19 > 911.76) & (wl_KS19 < 2066.67)\n Iuv = np.abs(np.trapz(Jnu[UV], nu[UV])) * 4*np.pi\n\nax.set_xlabel(u\"Wavelength [Å]\", fontsize=14)\nax.set_ylabel(r\"$J_{\\nu}$ [erg s$^{-1}$ cm$^{-2}$ Sr$^{-1}$ Hz$^{-1}$]\", fontsize=14)\nax.set_xlim(xmin=1.e-7)\nax.set_ylim(ymin=1.e-35)\nax.legend()\nfig.tight_layout()\nplt.savefig(\"UVB_comparison.pdf\")\n\n","sub_path":"plot_spectra.py","file_name":"plot_spectra.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"563651115","text":"import dataset.match_dataset as match_dataset\nimport featureset.match_featureset as match_featureset\nimport util.vocab_utils as vocab_utils\nimport util.classifier_utils as classifier_utils\nimport util.dataset_utils as dataset_utils\nfrom util.config_utils import get_dir_cfg\nfrom util.config_utils import get_learning_cfg\nfrom util.model_utils import tidy_up\nfrom util.model_utils import predict\n\n\nimport logging\nimport json\nimport time\n\n\nlogger = logging.getLogger(__name__)\nlocal_dir = get_dir_cfg()['local']\n\ndef create(train, label, label_values, model_dir, train_filename, test_filename, init):\n\n aws_model_dir = 'models/'+model_dir\n tf_models_dir = local_dir+'/'+aws_model_dir\n\n learning_cfg = get_learning_cfg(model_dir)\n\n logger.info(learning_cfg)\n\n logger.info('team vocab started...')\n team_file = vocab_utils.create_vocab(\n url=vocab_utils.ALL_TEAMS_URL,\n filename=vocab_utils.TEAMS_FILE,\n player='default')\n logger.info('team vocab completed')\n\n\n logger.info('player vocab started...')\n player_file = vocab_utils.create_vocab(\n url=vocab_utils.PLAYERS_URL,\n filename=vocab_utils.PLAYERS_FILE,\n player='default')\n logger.info('[player vocab completed')\n\n # and the other numerics. they will be read from a CSV / or direct from mongo more likely. yes. from mongo.\n # and review checkpoints, to only train with the newest data? or build from scratch. lets see.\n #need to add the label field too.\n\n feature_columns = match_featureset.create_feature_columns(team_vocab=team_file, player_vocab=player_file)\n\n\n # Build 2 hidden layer DNN with 10, 10 units respectively. (from example will enrich at some point).\n classifier = classifier_utils.create(\n feature_columns=feature_columns,\n classes=len(label_values),\n model_dir=aws_model_dir,\n learning_cfg=learning_cfg,\n init=init)\n\n if train:\n\n logger.info(label_values)\n\n if learning_cfg['evaluate'] and test_filename is not None:\n (train_x, train_y), (test_x, test_y) = match_dataset.load_data(\n train_path=local_dir+train_filename,\n test_path=local_dir+test_filename,\n y_name=label,\n convert=label_values)\n\n else:\n (train_x, train_y) = match_dataset.load_train_data(\n train_path=local_dir+train_filename,\n y_name=label,\n convert=label_values)\n\n # Train the Model.\n classifier.train(\n input_fn=lambda:dataset_utils.train_input_fn(train_x, train_y,learning_cfg['batch_size']),steps=learning_cfg['steps'])\n\n if learning_cfg['evaluate'] and test_filename is not None:\n # Evaluate the model. not much use anymore. but could use the first test file. makes sense\n eval_result = classifier.evaluate(\n input_fn=lambda:dataset_utils.eval_input_fn(test_x, test_y,learning_cfg['batch_size']))\n\n logger.info('\\nTest set accuracy: {accuracy:0.3f}\\n'.format(**eval_result))\n\n if learning_cfg['aws_debug']:\n with open(local_dir+'sample.json') as f:\n sample = json.load(f)\n\n\n predict(\n classifier=classifier,\n predict_x=sample,\n label_values=label_values)\n\n if init:\n logger.info('tidying up')\n tidy_up(\n tf_models_dir=tf_models_dir,\n aws_model_dir=aws_model_dir,\n team_file=team_file,\n train_filename=train_filename)\n\n time.sleep(30)\n\n\n return classifier\n\n\n\n\n\n\n","sub_path":"src/model/match_model.py","file_name":"match_model.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"597567991","text":"\"\"\"\nDictionary app with command line interface.\nInput an English word and get a dictionary description.\n\"\"\"\n\nfrom json import load as loadJSON\nfrom difflib import get_close_matches\n\ndata = loadJSON(open(\"App1_Dictionary/data.json\"))\n\n\ndef translate(word) -> str:\n word = word.lower()\n matches = get_close_matches(word, data.keys(), n=5, cutoff=0.7)\n \n if word in data:\n return data[word]\n\n elif word.title() in data: # convert 1st letter to capital\n return data[word.title()]\n \n elif word.upper() in data:\n return data[word.upper()]\n\n elif len(get_close_matches(word, matches)) > 0:\n confirmation = input('Did you mean word \"{}\" instead (Y/N)? '.format(matches[0]))\n confirmation = confirmation.lower()\n\n if confirmation == 'y':\n return(data[matches[0]])\n\n elif confirmation == 'n':\n return \"Sorry, the word doesn't exist.\"\n\n else:\n return \"I don't understand this input.\"\n\n else:\n return \"Sorry, the word doesn't exist.\"\n\nif __name__ == \"__main__\":\n word = input(\"Enter word: \")\n \n output = translate(word)\n if type(output) == list:\n for item in output:\n print(item)\n else:\n print(output)\n","sub_path":"App1_Dictionary_JSON/app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"95428056","text":"from django.shortcuts import render, HttpResponse\nfrom django.views.generic import TemplateView, RedirectView, FormView\nfrom Questions.forms import SoruyaGitForm\nfrom Questions.models import Question, Testler, Statistics\nfrom Posts.models import KONU, User\n\n\nclass soruCoz(FormView):\n template_name = \"lounge/SORU_COZ_IN_USER.html\"\n form_class = SoruyaGitForm\n success_url = \"/akis_yonlendir/\"\n\n def get_context_data(self, **kwargs):\n context=super(soruCoz, self).get_context_data(**kwargs)\n context['konular'] = KONU.objects.all()\n return context\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.is_authenticated:\n errormsg = \"Bu sayfaya erişebilmek için öncelikle lütfen giriş yapınız.\"\n return render(self.request, \"entrance/LOGIN_REQUIRED_PAGE.html\", {\n 'error': errormsg,\n })\n else:\n return super(soruCoz, self).dispatch(request, *args, **kwargs)\n\n def form_valid(self, form):\n konu = form.cleaned_data['konu']\n testler = Testler.objects.filter(test_konu=konu)\n soru_adeti = Question.objects.filter(question_test=testler[0]).count()\n return render(self.request, 'corridor/AKIS_GORUNTULE.html', {\n 'konu': konu,\n 'testler': testler,\n 'soru_adeti': soru_adeti\n })\n\n\nfrom datetime import datetime, timedelta\n\n\ndef TesteGit(request, id):\n test = Testler.objects.filter(id=id)\n now = datetime.now()\n new_time = timedelta(minutes=15)+now\n dt_string = new_time.strftime(\"%B %d, %Y %H:%M:%S\")\n sorular = Question.objects.filter(question_test=test[0]).order_by(\"question_number\")\n soru_adeti = sorular.count()\n return render(request, 'lounge/TEST_FIELD_IN_USER.html', {\n 'sorular': sorular,\n 'test': test[0],\n 'sure': dt_string,\n 'soru_adeti': soru_adeti,\n })\n\n\ndef testiTamamla(request):\n if request.method == 'POST':\n ogrenci_id = request.POST.get('ogrenci_id')\n print(ogrenci_id)\n ogrenci=User.objects.filter(id=ogrenci_id)\n print(ogrenci)\n test_id = request.POST.get('test_id')\n print(test_id)\n test = Testler.objects.filter(id=test_id)\n print(test)\n# -----------------------------------------\n soru_adet = request.POST.get('soru_adet', False)\n dogru_adet = request.POST.get('dogru_adet', False)\n yanlis_adet = request.POST.get('yanlis_adet', False)\n Statistics.objects.create(\n ogrenci = request.user.user,\n test = test,\n soru_adet=soru_adet,\n dogru_adet=dogru_adet,\n yanlis_adet=yanlis_adet\n )\n Statistics.save()\n return HttpResponse('')\n# bu şekilde olmadığı için bu def yerine test_field_in_user.html dosyasını açan view ı form view olarak çalıştırıp, formu en sonda görünür hale getirip formview ile sonuca ulaşmayı dene. Bugünlük bukadar yeter\n\n","sub_path":"Hayyam/Questions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"459485388","text":"#!/usr/bin/env python\n\nimport psutil\n\ndef bytes2human(n):\n symbols=(\"K\",\"M\",\"G\",\"T\",\"P\",\"E\",\"Z\",\"Y\")\n prefix={}\n for i,s in enumerate(symbols):\n prefix[s]=1<<(i+1)*10\n for s in reversed(symbols):\n if n>=prefix[s]:\n value=float(n)/prefix[s]\n print(value)\n return '%.1f%s' %(value,s)\n return \"%sB\" %n\nprint(bytes2human(psutil.virtual_memory().total))\n","sub_path":"monitor_test/bytes2human.py","file_name":"bytes2human.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"268176320","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nfrom multimedia_test_case import TestMultimedia\n\n\nclass TestFMStress(TestMultimedia):\n def __init__(self, doc, level, owner):\n super(TestFMStress, self).__init__(doc, level, owner)\n self.package_name = \"cn.whaley.cases.Helios.media.video.local.LocalTestSets\"\n self.test_content = \"testVideoInSpecProfile -e profileName \" + \"标准模式\"\n\n\nif __name__ == \"__main__\":\n TestFMStress(\n \"Set profile in standard mode then play 4K\", 'p1', \"wangdd\").run()\n","sub_path":"Python_Java_UIautomator/case/platform/multiMedia/test_local_profile_standard.py","file_name":"test_local_profile_standard.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"361481418","text":"from __future__ import unicode_literals\nimport h5py\nimport numpy as np\nimport utils\n\nprefix = '/home/lllcho/Documents/season_1/'\ntrain_prefix = prefix + 'training_data/'\ntest_prefix = prefix + 'test_set_1/'\ncluster2id, id2cluster = utils.get_cluster_map(test_prefix + 'cluster_map/cluster_map')\n\ndates,time_idx,dests=21,288,66\ntrain_orders_dest = np.zeros((dates, time_idx, dests, 67), dtype=np.float32) # 去向地\ntrain_orders_price = np.zeros((dates, time_idx, dests, 67), dtype=np.float32) # 价格\ntrain_orders_gap = np.zeros((dates, time_idx, dests), dtype=np.float32) # gap\ntrain_weathers = np.zeros((dates, time_idx, 3), dtype=np.float32)\ntrain_traffics = np.zeros((dates, time_idx, dests, 4), dtype=np.float32)\n\nfor idx, date in enumerate(range(1, 22)):\n print ('date {0:0>2}'.format(date))\n order = utils.get_orders(train_prefix + 'order_data/order_data_2016-01-{0:0>2}'.format(date))\n order = order.loc[:, ['driver_id', 'start_id', 'dest_id', 'price', 'time_idx']]\n order['start_id'] = order['start_id'].map(lambda x: cluster2id[x])\n order['dest_id'] = order['dest_id'].map(lambda x: cluster2id[x] if x in cluster2id.keys() else 0)\n\n dest = np.zeros((time_idx, dests, 67), dtype=np.float32)\n price = np.zeros((time_idx, dests, 67), dtype=np.float32)\n gap = np.price = np.zeros((time_idx, dests), dtype=np.float32)\n for i in range(order.shape[0]):\n t = order.iloc[i]\n if t['driver_id'] == 'nan':\n gap[t['time_idx'], t['start_id'] - 1] += 1\n else:\n dest[t['time_idx'], t['start_id'] - 1, t['dest_id']] += 1\n price[t['time_idx'], t['start_id'] - 1, t['dest_id']] += t['price']\n\n train_orders_dest[idx] = dest\n train_orders_price[idx] = price\n train_orders_gap[idx] = gap\n\n weather = utils.get_weather(train_prefix + 'weather_data/weather_data_2016-01-{0:0>2}'.format(date))\n weather = weather.iloc[:, 1:].groupby('time_idx').apply(np.mean)\n weather = weather.reindex(range(time_idx), method='nearest')\n weather = weather.as_matrix(['weather', 'temp', 'pm25'])\n train_weathers[date - 1] = weather.astype(np.float32)\n\n traffic = utils.get_traffic(train_prefix + 'traffic_data/traffic_data_2016-01-{0:0>2}'.format(date))\n traffic['district'] = traffic['district'].map(lambda x: cluster2id[x])\n group = traffic.groupby('district')\n dists = traffic.district.unique().tolist()\n traffs = np.zeros((time_idx, dests, 4), dtype=np.float32)\n for dist in dists:\n traff = group.get_group(dist)\n traff = traff.loc[:, ['tj1', 'tj2', 'tj3', 'tj4', 'time_idx']]\n traff = traff.groupby('time_idx').apply(np.mean).reindex(range(time_idx), method='nearest')\n traff = traff.as_matrix(columns=['tj1', 'tj2', 'tj3', 'tj4'])\n traffs[:, dist - 1, :] = traff.astype(np.float32)\n train_traffics[idx] = traffs\n\nwith h5py.File('train_data.h5', 'w') as f:\n f.create_dataset('dest', data=train_orders_dest)\n f.create_dataset('price', data=train_orders_price)\n f.create_dataset('gap', data=train_orders_gap)\n f.create_dataset('weathers', data=train_weathers)\n f.create_dataset('traffics', data=train_traffics)\n","sub_path":"DiDi Competition/code/train_feature.py","file_name":"train_feature.py","file_ext":"py","file_size_in_byte":3165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"519745163","text":"'''\nUnused imports\n'''\nimport sys #used to make pauses to code\nimport time #not sure (could use to time the program)\nimport math #not sure\nfrom matplotlib.dates import DateFormatter\n'''\nImports\n'''\nimport os #used for setting that lets tensor flow\nimport json #used in the config files:\n #confinWithoutTrends.json and configWithTrends.json\n\nimport matplotlib.pyplot as plt #used for plotting\nimport pandas as pd #used for databases\nimport numpy as np #used for calculations\n\nimport matplotlib.dates as mdates #dates for plotting/ estimating\n\nimport datetime #datetime for configuring which dates to extract from getStocks\n\nimport time\n\nfrom core.data_processor_HP import DataLoader #no error, from the core folder\nfrom core.model import Model\n\nos.environ['KMP_DUPLICATE_LIB_OK']='True' #setting that lets tensorflow run\n\n#below not really used, seems to be for single prediction (not multiple)\ndef plot_results(predicted_data, true_data):\n fig = plt.figure(facecolor='white')\n ax = fig.add_subplot(111)\n ax.plot(true_data, label='True Data')\n plt.plot(predicted_data, label='Prediction')\n plt.legend()\n plt.show() #\n\ndef plot_results_multiple(predicted_data, true_data, prediction_len, ticker, isTrends, filename, split):\n '''\n Plots results from multiple predictions\n '''\n\n fig = plt.figure(facecolor='white')\n ax = fig.add_subplot(111)\n\n\n dataframe = pd.read_csv(filename)\n i_split = int(len(dataframe) * split) + prediction_len\n\n dates = mdates.date2num(pd.to_datetime(dataframe.iloc[i_split:len(dataframe), 0]))\n\n\t# Pad the list of predictions to shift it in the graph to it's correct start\n for i, data in enumerate(predicted_data):\n padding = [None for p in range(i * prediction_len)]\n plt.plot_date(dates[0:(i+1) * prediction_len], np.transpose(np.array(padding + data)), label='Prediction', fmt=\"-\")\n\n\n months = mdates.MonthLocator() # every month\n months_fmt = mdates.DateFormatter('%Y-%m')\n\n plt.plot_date(dates, true_data, fmt=\"-\", color=\"cornflowerblue\", linewidth=\"0.5\")\n\n ax.xaxis.set_major_locator(months)\n ax.xaxis.set_major_formatter(months_fmt)\n\n # ax.plot((np.array(dataframe.iloc[i_split:len(dataframe), 0])),true_data)\n\n # format the coords message box\n ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')\n ax.grid(True)\n\n fig.autofmt_xdate()\n\n\n if isTrends:\n plt.title(str(ticker)+\" with Trends\")\n plt.savefig(str(ticker)+\"_with_Trends.png\")\n print(\"saved\")\n else:\n plt.title(str(ticker)+\" without Trends\")\n plt.savefig(str(ticker)+\"_without_Trends.png\")\n print(\"saved\")\n\n\n plt.show()\n\n\ndef plot_training(predicted_data, true_data, prediction_len, ticker, isTrends, filename, split):\n '''\n Plots results from training\n '''\n\n fig = plt.figure(facecolor='white', figsize=(6.4*7, 5))\n\n ax = fig.add_subplot(111)\n\n\n dataframe = pd.read_csv(filename)\n i_split = int(len(dataframe) * split)\n\n dates = mdates.date2num(pd.to_datetime(dataframe.iloc[0:i_split-prediction_len, 0]))\n\n plt.plot_date(dates, predicted_data, color= 'red', label='Prediction', fmt=\"-\")\n\n\n months = mdates.MonthLocator() # every month\n months_fmt = mdates.DateFormatter('%Y-%m')\n\n plt.plot_date(dates, true_data, fmt=\"-\", color=\"cornflowerblue\", linewidth=\"0.5\")\n\n ax.xaxis.set_major_locator(months)\n ax.xaxis.set_major_formatter(months_fmt)\n\n # ax.plot((np.array(dataframe.iloc[i_split:len(dataframe), 0])),true_data)\n\n # format the coords message box\n ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')\n ax.grid(True)\n\n fig.autofmt_xdate()\n\n\n if isTrends:\n plt.title(str(ticker)+\" with Trends\")\n plt.savefig(str(ticker)+\"_with_Trends.png\")\n print(\"saved\")\n else:\n plt.title(str(ticker)+\" without Trends\")\n plt.savefig(str(ticker)+\"_without_Trends.png\")\n print(\"saved\")\n\n\n plt.show()\n\ndef plot_future(predicted_data, true_data, prediction_len, ticker, isTrends, filename, split):\n '''\n Plots results from training\n '''\n\n fig = plt.figure(facecolor='white')\n\n ax = fig.add_subplot(111)\n\n\n dataframe = pd.read_csv(filename)\n i_split = int(len(dataframe) * split)\n\n dates = mdates.date2num(pd.to_datetime(dataframe.iloc[0:i_split-prediction_len, 0]))\n\n plt.plot_date(dates, predicted_data, color= 'red', label='Prediction', fmt=\"-\")\n\n\n months = mdates.MonthLocator() # every month\n months_fmt = mdates.DateFormatter('%Y-%m')\n\n plt.plot_date(dates, true_data, fmt=\"-\", color=\"cornflowerblue\", linewidth=\"0.5\")\n\n ax.xaxis.set_major_locator(months)\n ax.xaxis.set_major_formatter(months_fmt)\n\n # ax.plot((np.array(dataframe.iloc[i_split:len(dataframe), 0])),true_data)\n\n # format the coords message box\n ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')\n ax.grid(True)\n\n fig.autofmt_xdate()\n\n\n if isTrends:\n plt.title(str(ticker)+\" with Trends\")\n plt.savefig(str(ticker)+\"_with_Trends.png\")\n print(\"saved\")\n else:\n plt.title(str(ticker)+\" without Trends\")\n plt.savefig(str(ticker)+\"_without_Trends.png\")\n print(\"saved\")\n\n\n plt.show()\n\n\n\ndef main():\n\n #MAKE SURE BOTH DATASETS (yahoo stock and google trends) EXACT SAME LENGTH AND FILLED\n '''\n Runs main code. TODO: Make it into a function that inputs: \"Ticker\", \"Dates of Interest\", \"Trendword 1\", \"Trendword 2\", etc...\n\n Inputs: None\n Outputs: Plot with stock fluctuations as a percent change from the start of window\n '''\n \n configs = json.load(open('configWithTrends.json', 'r'))\n \n #Creates a new DataLoader object, see core/data_processor to see what it does.\n data = DataLoader(\n configs['data']['filename'],\n configs['data']['train_test_split'],\n configs['data']['columns']\n )\n\n #Creates model save directory\n if not os.path.exists(configs['model']['save_dir']): os.makedirs(configs['model']['save_dir'])\n\n #Creates a new Model object, see core/model to see what it does.\n model = Model()\n model.build_model(configs)\n x, y = data.get_train_data(\n seq_len=configs['data']['sequence_length'],\n normalise=configs['data']['normalise']\n )\n\n # # out-of memory generative training\n # steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])\n # model.train_generator(\n # data_gen=data.generate_train_batch(\n # seq_len=configs['data']['sequence_length'],\n # batch_size=configs['training']['batch_size'],\n # normalise=configs['data']['normalise']\n # ),\n # epochs=configs['training']['epochs'],\n # batch_size=configs['training']['batch_size'],\n # steps_per_epoch=steps_per_epoch,\n # save_dir=configs['model']['save_dir']\n # )\n\n # in-memory training\n model.train(x, y, epochs=configs['training']['epochs'], batch_size=configs['training']['batch_size'],\n save_dir=configs['model']['save_dir'])\n\n x_test, y_test = data.get_test_data(\n seq_len=configs['data']['sequence_length'],\n normalise=configs['data']['normalise']\n )\n\n print(\"x_test\")\n print(x_test)\n print(\"-----\")\n print(\"y_test\")\n print(y_test)\n\n predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])\n\n\n # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])\n # predictions = model.predict_point_by_point(x_test)\n \n stockTicker = \"VNQ\"\n plot_results_multiple(predictions, y_test, configs['data']['sequence_length'], stockTicker, True, configs['data']['filename'], configs['data']['train_test_split'])\n\n x_train, y_train = data.get_train_data(seq_len=configs['data']['sequence_length'], normalise=configs['data']['normalise'])\n\n train_predictions = model.predict_point_by_point(x_train)\n\n plot_training(train_predictions, y_train, configs['data']['sequence_length'], stockTicker, True, configs['data']['filename'], configs['data']['train_test_split'])\n\n\nif __name__ == '__main__':\n main()","sub_path":"Xterp/run_main_tPlot_HP.py","file_name":"run_main_tPlot_HP.py","file_ext":"py","file_size_in_byte":8176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"125673973","text":"import argparse\nimport gym\nimport os\nimport sys\nimport pickle\nimport time\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\nfrom utils import *\nfrom models.mlp_policy import Policy\nfrom models.mlp_critic import Value\nfrom models.mlp_policy_disc import DiscretePolicy\nfrom core.ppo import ppo_step\nfrom core.common import estimate_advantages\nfrom core.agent import Agent\n\nfrom larocs_sim.envs.drone_env import DroneEnv\nimport csv\n\n\n \ndef check_dir(file_name):\n directory = os.path.dirname(file_name)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\nparser = argparse.ArgumentParser(description='PyTorch PPO example')\nparser.add_argument('--env-name', default=\"Hopper-v2\", metavar='G',\n help='name of the environment to run')\nparser.add_argument('--env_reset_mode', default=\"Discretized_Uniform\",\n help='name of the environment to run')\n\nparser.add_argument('--model-path', metavar='G',\n help='path of pre-trained model')\nparser.add_argument('--render', action='store_true', default=False,\n help='render the environment')\nparser.add_argument('--log-std', type=float, default=-0.0, metavar='G',\n help='log std for the policy (default: -0.0)')\nparser.add_argument('--gamma', type=float, default=0.99, metavar='G',\n help='discount factor (default: 0.99)')\nparser.add_argument('--tau', type=float, default=0.95, metavar='G',\n help='gae (default: 0.95)')\nparser.add_argument('--l2-reg', type=float, default=1e-3, metavar='G',\n help='l2 regularization regression (default: 1e-3)')\nparser.add_argument('--learning-rate', type=float, default=3e-4, metavar='G',\n help='learning rate (default: 3e-4)')\nparser.add_argument('--clip-epsilon', type=float, default=0.2, metavar='N',\n help='clipping epsilon for PPO')\nparser.add_argument('--optim-epochs', type=int, default=10,\n help='epochs for the internal optimization')\nparser.add_argument('--optim-batch-size', type=int, default=64,\n help='min_batch for the internal optimzation part')\nparser.add_argument('--num-threads', type=int, default=1, metavar='N',\n help='number of threads for agent (default: 1)')\nparser.add_argument('--seed', type=int, default=1, metavar='N',\n help='random seed (default: 1)')\nparser.add_argument('--min-batch-size', type=int, default=2048, metavar='N',\n help='minimal batch size per PPO update (default: 2048)')\nparser.add_argument('--max-iter-num', type=int, default=500, metavar='N',\n help='maximal number of main iterations (default: 500)')\nparser.add_argument('--log-interval', type=int, default=1, metavar='N',\n help='interval between training status logs (default: 10)')\nparser.add_argument('--save-model-interval', type=int, default=0, metavar='N',\n help=\"interval between saving model (default: 0, means don't save)\")\nparser.add_argument('--save_path', type=str, \\\n help=\"path to save model pickle and log file\", default='DEFAULT_DIR')\nparser.add_argument('--obs-running-state', type=int, default=1, choices = [0,1],\n help=\"If the observation will be normalized (default: 1, it will be)\")\nparser.add_argument('--reward-running-state', type=int, default=0, choices = [0,1],\n help=\"If the reward will be normalized (default: 0, it won't be)\")\nparser.add_argument('--gpu-index', type=int, default=0, metavar='N')\nargs = parser.parse_args()\n\ndtype = torch.float64\ntorch.set_default_dtype(dtype)\ndevice = torch.device('cuda', index=args.gpu_index) if torch.cuda.is_available() else torch.device('cpu')\nif torch.cuda.is_available():\n torch.cuda.set_device(args.gpu_index)\n\n\"\"\"environment\"\"\"\nenv = DroneEnv(random=args.env_reset_mode,seed=args.seed)\n\n# action_dim = env.action_space.shape[0]\nstate_dim = env.observation_space[0]\nis_disc_action = len(env.action_space.shape) == 0\n\nif args.obs_running_state == 1:\n running_state = ZFilter((state_dim,), clip=5)\n\nelse:\n running_state = None\n\n# if args.reward_running_state == 1:\n# running_reward = ZFilter((1,), demean=False, clip=10)\n# else:\n# running_reward = None\n\n\n\"\"\"seeding\"\"\"\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\n\n\"\"\"define actor and critic\"\"\"\n\nif args.model_path is None:\n if is_disc_action:\n policy_net = DiscretePolicy(state_dim, env.action_space.n)\n else:\n policy_net = Policy(state_dim, env.action_space.shape[0], log_std=args.log_std)\n value_net = Value(state_dim)\nelse:\n policy_net, value_net, running_state, = pickle.load(open(args.model_path, \"rb\"))\n\n\npolicy_net.to(device)\nvalue_net.to(device)\n\n\noptimizer_policy = torch.optim.Adam(policy_net.parameters(), lr=args.learning_rate)\noptimizer_value = torch.optim.Adam(value_net.parameters(), lr=args.learning_rate)\n\n# optimization epoch number and batch size for PPO\noptim_epochs = args.optim_epochs\noptim_batch_size = args.optim_batch_size\n\n\n\"\"\"create agent\"\"\"\nagent = Agent(env, policy_net, device, running_state=running_state, render=args.render, num_threads=args.num_threads)\n\n\n\n\n# Set save/restore paths\nsave_path = os.path.join('../checkpoint/', args.save_path) +'/'\ncheck_dir(save_path)\n\n\n\n\n\ndef update_params(batch, i_iter, scheduler_policy, scheduler_value):\n states = torch.from_numpy(np.stack(batch.state)).to(dtype).to(device)\n actions = torch.from_numpy(np.stack(batch.action)).to(dtype).to(device)\n rewards = torch.from_numpy(np.stack(batch.reward)).to(dtype).to(device)\n masks = torch.from_numpy(np.stack(batch.mask)).to(dtype).to(device)\n with torch.no_grad():\n values = value_net(states)\n # fixed_log_probs = policy_net.get_log_prob(states, actions)\n fixed_log_probs , act_mean, act_std = policy_net.get_log_prob(states, actions)\n\n \"\"\"get advantage estimation from the trajectories\"\"\"\n advantages, returns = estimate_advantages(rewards, masks, values, args.gamma, args.tau, device)\n\n \"\"\"perform mini-batch PPO update\"\"\"\n optim_iter_num = int(math.ceil(states.shape[0] / optim_batch_size))\n for _ in range(optim_epochs):\n perm = np.arange(states.shape[0])\n np.random.shuffle(perm)\n perm = LongTensor(perm).to(device)\n\n states, actions, returns, advantages, fixed_log_probs = \\\n states[perm].clone(), actions[perm].clone(), returns[perm].clone(), advantages[perm].clone(), fixed_log_probs[perm].clone()\n\n for i in range(optim_iter_num):\n ind = slice(i * optim_batch_size, min((i + 1) * optim_batch_size, states.shape[0]))\n states_b, actions_b, advantages_b, returns_b, fixed_log_probs_b = \\\n states[ind], actions[ind], advantages[ind], returns[ind], fixed_log_probs[ind]\n\n \n policy_surr, value_loss, ev, clip_frac, entropy, approxkl = ppo_step(policy_net, value_net, optimizer_policy, optimizer_value, 1, states_b, actions_b, returns_b,\n advantages_b, fixed_log_probs_b, args.clip_epsilon, args.l2_reg, scheduler_policy, scheduler_value)\n\n return policy_surr, value_loss, ev, clip_frac, entropy, approxkl\n\n\n\ndef main_loop():\n\n\n # list_cols = ['num_steps', 'num_episodes', 'total_reward', 'avg_reward', 'max_reward', \\\n # 'min_reward', 'lenght_mean', 'lenght_min','lenght_max','lenght_std', 'sample_time']\n # # 'min_reward', 'sample_time', 'action_mean', 'action_min', 'action_max']\n\n list_cols = ['num_steps','num_episodes','total_reward','avg_reward','max_reward','min_reward',\\\n 'lenght_mean','lenght_min','lenght_max','lenght_std','total_c_reward',\\\n 'avg_c_reward','avg_c_reward_per_episode','max_c_reward','min_c_reward']\n \n \n algo_cols = ['discrim_loss','policy_loss','value_loss','explained_variance', \\\n 'clipfrac','entropy','aproxkl']\n\n\n with open(os.path.join(save_path,'progress.csv'), 'w') as outcsv:\t\n writer = csv.writer(outcsv, delimiter=';', quotechar='|', quoting=csv.QUOTE_MINIMAL)\t\n writer.writerow(list_cols + algo_cols)\t\n \n\n\n begin=time.time()\n \n\n\n\n # lr = args.learning_rate\n lr=lambda f: args.learning_rate * f\n\n # cliprange = args.clip_epsilon\n # def constfn(val):\n # def f(_):\n # return val\n # return f\n # if isinstance(lr, float): \n # lr = constfn(lr)\n # else:\n # assert callable(lr)\n # if isinstance(cliprange, float): \n # cliprange = constfn(cliprange)\n # else: \n # assert callable(cliprange)\n\n\n policy_lr = args.learning_rate\n\n nupdates = args.max_iter_num # nupdates = total_timesteps//nbatch\n lr_lambda=lambda f:(1.0 - (f - 1.0) / nupdates)\n \n scheduler_policy = torch.optim.lr_scheduler.LambdaLR(optimizer_policy, lr_lambda)\n scheduler_value = torch.optim.lr_scheduler.LambdaLR(optimizer_value, lr_lambda)\n\n\n\n\n for i_iter in range(args.max_iter_num):\n \n # # Fraction to multiply learning_rate and clip range\n # frac = 1.0 - (i_iter - 1.0) / nupdates\n # # Calculate the learning rate\n # lrnow = lr(frac)\n # # Calculate the cliprange\n # cliprangenow = cliprange(frac)\n # print('frac = ', frac)\n env.restart=True ## Hacky because Pyrep breaks the Drone!\n env.reset()\n env.restart=False\n\n print('Iter = ', i_iter)\n \"\"\"generate multiple trajectories that reach the minimum batch_size\"\"\"\n batch, log = agent.collect_samples(args.min_batch_size)\n print('Done batching')\n t0 = time.time()\n loss_policy, loss_value, ev, clipfrac, entropy, approxkl = update_params(batch, i_iter, scheduler_policy, scheduler_value)\n t1 = time.time()\n\n print('Loss_policy = {0:.4f}'.format(loss_policy))\n print('Loss_value = {0:.4f}'.format(loss_value))\n print('Explained variance = {0:.3f}'.format(ev))\n print('clipfrac = {0:.5f}'.format(clipfrac))\n print('entropy = {0:.5f}'.format(entropy))\n print('approxkl = {0:.5f}'.format(approxkl))\n algo_cols_values = [loss_policy, loss_value, ev, clipfrac, entropy, approxkl]\n\n print(\"Done updating\")\n\n \t\n if i_iter % args.log_interval == 0:\n # print(\"LOG KEYS = \", list_cols)\t\n print()\n print('{}\\tT_sample {:.4f}\\tT_update {:.4f}\\tR_min {:.2f}\\tR_max {:.2f}\\tR_avg {:.2f}'.format(\n i_iter, log['sample_time'], t1-t0, log['min_reward'], log['max_reward'], log['avg_reward']))\n print()\n new_list = [log[k] for k in log.keys() if k in list_cols]\n with open(os.path.join(save_path,'progress.csv'), 'a') as csvfile:\t\n rew_writer = csv.writer(csvfile, delimiter=';',\t\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\t\n rew_writer.writerow(new_list + algo_cols_values)\t\n\n\n \n if args.save_model_interval > 0 and (i_iter+1) % args.save_model_interval == 0:\n to_device(torch.device('cpu'), policy_net, value_net)\n pickle.dump((policy_net, value_net, running_state), \\\n open(os.path.join(save_path,'itr_{0}.p'.format(i_iter)), 'wb'))\n to_device(device, policy_net, value_net, discrim_net)\n\n\n \"\"\"clean up gpu memory\"\"\"\n torch.cuda.empty_cache()\n print('Time so far = {0:.2f} on iter = {1}'.format(time.time()-begin, i_iter))\n\n\nmain_loop()\n\n\n\nprint(\"Done\")\nenv.shutdown()","sub_path":"examples/drone_sem_optimizations.py","file_name":"drone_sem_optimizations.py","file_ext":"py","file_size_in_byte":11593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"164995600","text":"import json\nimport sys\nimport DSGRN\nimport ast\nfrom dsgrn_utilities.parameter_building import construct_parameter\nimport pickle\nfrom parameter_constructer import param_constructer\n\n\nwith open('FC_query.json') as f:\n FC_query_result = json.load(f)\nwith open('XC_query.json') as f:\n XC_query = json.load(f)\nwith open('bistable_query.json') as f:\n bistable_query = json.load(f)\n\nwith open('XC_FP_list.json') as f:\n XC_FP_results = json.load(f)\nwith open('FC_FP_list.json') as f:\n FC_FP_results = json.load(f)\n\ndef bistableFC_bistableXC_check(net):\n FC_FP_keys = [int(i) for i in FC_FP_results.keys()]\n clb2_OFF = []\n clb2_ON = []\n network = DSGRN.Network(net)\n pg = DSGRN.ParameterGraph(network)\n for i in FC_FP_keys:\n param = pg.parameter(i)\n new_param_OFF, new_param_ON = param_constructer(param, network)\n clb2_OFF.append(pg.index(new_param_OFF))\n clb2_ON.append(pg.index(new_param_ON))\n OFF_biXC_results = {}\n for k in XC_FP_results.keys():\n if int(k) in clb2_OFF:\n OFF_biXC_results[k] = XC_FP_results[k]\n ON_biXC_results = {}\n for k in XC_FP_results.keys():\n if int(k) in clb2_ON:\n ON_biXC_results[k] = XC_FP_results[k]\n print(\"number of parameters that exhibit a bistable FC to bistable XC with CLB2 ON:\" + str(len(ON_biXC_results)))\n print(\"number of parameters that exhibit a bistable FC to bistable XC with CLB2 OFF:\" + str(len(OFF_biXC_results)))\n\nif __name__ == '__main__':\n net = sys.argv[1]\n bistableFC_bistableXC_check(net)","sub_path":"bistableFC_bistableXC_checker.py","file_name":"bistableFC_bistableXC_checker.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"270774493","text":"def dpMakeChange(coinvaluelist,change,mincoins,coinused):\n #coinvaluelist--币种,change--零钱数目,mincoins--记录最少硬币个数的列表,coinused--用于追踪用了哪些硬币凑成最小方案\n #从1分钱开始计算最少银币个数\n for cents in range(1,change+1):\n coinCount=cents #设一个最大值\n newcoin=1 #记录使用硬币的路径,默认使用最小的1\n #减去每个币种,向后查最少硬币数\n for j in [c for c in coinvaluelist if c<=cents]:\n if mincoins[cents-j]+10:\n thiscoin=coinused[coin]\n print(thiscoin)\n coin=coin-thiscoin\namnt=63\nclist=[1,5,10,21,25]\ncoinused=[0]*(amnt+1)\n\ncoinCount=[0]*(amnt+1)\nprint(dpMakeChange(clist,amnt,coinCount,coinused))\n \n","sub_path":"找零兑换问题(动态规划).py","file_name":"找零兑换问题(动态规划).py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"501904773","text":"import requests, time\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\nimport os, time, glob, fnmatch\r\nimport csv\r\n\r\n\r\ndir = os.getcwd() + '/documents'\r\nif not os.path.exists(dir):\r\n\t\t\tos.makedirs(dir)\r\n\r\nBASE_URL = 'http://www.kijiji.it/case/vendita/annunci-roma/'\r\n \r\n\r\ndef addAds(url, count, maxArt):\r\n\tr = requests.get(url)\r\n\tcontent = r.content\r\n\tsoup = BeautifulSoup(content)\r\n\t\r\n\ttitles= soup.find_all(class_=\"title\")\r\n\tlinks=soup.find_all(class_=\"cta\")\r\n\tlocale= soup.find_all(class_=\"locale\")\r\n\tprice= soup.find_all(class_=\"price\")\r\n\tdescription= soup.find_all(class_=\"description\")\r\n\t\r\n\tfor i in range(0,len(titles)):\r\n\t\tstart= '-' + str('%.6i'%(500 * round(((i+count)/500),2) ) )\r\n\t\tend= '-' + str('%.6i'%((500 * round(((i+count)/500),2) ) + (500-1)) )\r\n\t\tdirectory= dir + '\\documents'+ start + end\r\n\t\t\r\n\t\tif (i + count) > maxArt:\r\n\t\t\treturn maxArt\r\n\t\t\t\t\r\n\t\tif not os.path.exists(directory):\r\n\t\t\tos.makedirs(directory)\r\n\t\t\t\r\n\t\tfilename = directory + '\\document-' +str('%.6i'%(i+count)) + '.txt'\r\n\t\t\r\n\t\twith open( filename, 'w') as text:\r\n\t\t\tspamwriter = csv.writer(text, delimiter='\\t')\r\n\t\t\tspamwriter.writerow([titles[i].get_text().encode('utf-8'),\r\n\t\t\t\t\t\t\t\tlocale[i].get_text().encode('utf-8'),\r\n\t\t\t\t\t\t\t\tprice[i].get_text().encode('utf-8'),\r\n\t\t\t\t\t\t\t\tlinks[i].get(\"href\").encode('utf-8'),\r\n\t\t\t\t\t\t\t\tdescription[i].get_text().encode('utf-8')])\r\n\t\t\t\t\t\t\t\t\r\n\tnumdocs = count + len(titles)\r\n\treturn numdocs\r\n\t \r\n\t\t\t\t\t\t\t\r\ndef processAllPages(baseURL, minPage=1, maxArt=1, delay=2):\r\n\tpag = minPage\r\n\tcount = 0\r\n\tn = 0\r\n\tv=0\r\n\twhile(count < maxArt):\r\n\t\t#print \"Processing page: \" + str(pag)\r\n\t\turl=baseURL + \"?p=\" + str(pag)\r\n\t\tn = addAds(url, count, maxArt)\r\n\t\tcount = n\r\n\t\ttime.sleep(delay)\r\n\t\tpag += 1\t\r\n\r\nprocessAllPages(BASE_URL, 1, 3000, 0.2)\r\n\t\r\n\r\n\r\n\r\n","sub_path":"collect.py","file_name":"collect.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"355983379","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('controller_setup', '0008_auto_20160106_1154'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Device',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('label', models.CharField(max_length=200)),\n ('slave_id', models.IntegerField(default=1)),\n ],\n ),\n migrations.CreateModel(\n name='Plant',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('label', models.CharField(unique=True, max_length=50)),\n ],\n ),\n migrations.AlterField(\n model_name='client',\n name='timeout_in_sec',\n field=models.FloatField(default=5.0),\n ),\n migrations.AddField(\n model_name='device',\n name='client',\n field=models.ForeignKey(to='controller_setup.Client'),\n ),\n migrations.AddField(\n model_name='device',\n name='plant',\n field=models.ForeignKey(to='controller_setup.Plant'),\n ),\n migrations.AddField(\n model_name='device',\n name='template',\n field=models.ForeignKey(to='controller_setup.Template'),\n ),\n ]\n","sub_path":"controller_setup/migrations/0009_auto_20160106_1208.py","file_name":"0009_auto_20160106_1208.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"238630752","text":"#Importing libraries and dependancies required for building the model\nimport numpy as np\nimport pandas as pd\nimport os\nimport sys\nfrom datetime import date\nimport time\nfrom datetime import datetime\nfrom google.cloud import storage, exceptions\nfrom googleapiclient import errors\nfrom googleapiclient import discovery\nfrom google.oauth2 import service_account\nfrom oauth2client.client import GoogleCredentials\nimport pandas as pd\nimport pickle\nimport json\nfrom training.training_utils.get_dataset import *\nfrom training.training_utils.plot_model import *\n# storage_client = storage.Client.from_service_account_json(\"service-account.json\")\n\n#initialise bucket name and GCP storage client\nglobal BUCKET_NAME\nBUCKET_NAME = \"keras-python-models-2\"\nstorage_client = storage.Client()\nbucket = storage_client.get_bucket(BUCKET_NAME)\n#credentials = GoogleCredentials.get_application_default()\n\n#save and upload model history to bucket\ndef upload_history(history, score, model_blob_path):\n\n # storage_client = storage.Client()\n # # storage_client = storage.Client.from_service_account_json(\"psp-keras-training.json\")\n # bucket = storage_client.get_bucket(\"keras-python-models\")\n # buckets = list(storage_client.list_buckets())\n # print(buckets)\n #Saving pickle of history so that it can later be used for visualisation of the model\n history_filepath = 'history_' + str(datetime.date(datetime.now())) + \\\n '_' + str((datetime.now().strftime('%H:%M'))) +'.pckl'\n\n try:\n # f = open(BUCKET_NAME + '/history/history'+ str(datetime.date(datetime.now())) +'.pckl', 'wb')\n f = open(history_filepath, 'wb')\n pickle.dump(history.history, f)\n f.close()\n except pickle.UnpicklingError as e:\n print('Error', e)\n except (AttributeError, EOFError, ImportError, IndexError) as e:\n print(traceback.format_exc(e))\n except Exception as e:\n print(traceback.format_exc(e))\n print('Error creating history pickle')\n\n # blob_path = 'history/history_'+ str(datetime.date(datetime.now())) + \\\n # '_' + str((datetime.now().strftime('%H:%M'))) +'.pckl'\n blob_path = str(model_blob_path) + 'history/history_'+ str(datetime.date(datetime.now())) + \\\n '_' + str((datetime.now().strftime('%H:%M'))) +'.pckl'\n\n blob = bucket.blob(blob_path)\n upload_file(blob_path,history_filepath)\n time.sleep(2)\n\n ## Set MetaData of history blob to store results from history ##\n\n# history_meta = {}\n# for key, value in (history.history.items()):\n# if 'val_false' in key or 'false' in key:\n# # history_meta[key] = ([float(i) for i in([ '%.1f' % elem for elem in history.history[key]])])\n# history_meta[key] = ([ '%.1f' % elem for elem in history.history[key]])\n#\n# else:\n# # history_meta[key] = ([float(i) for i in([ '%.4f' % elem for elem in history.history[key]])])\n# history_meta[key] = ([ '%.4f' % elem for elem in history.history[key]])\n#\n# time.sleep(2)\n#\n#\n# metadata = history_meta\n# metadata['best_accuracy'] = max(history_meta['accuracy'])\n# metadata['best_val_accuracy'] = max(history_meta['val_accuracy'])\n# metadata['best_loss'] = min(history_meta['loss'])\n# metadata['best_val_loss'] = min(history_meta['val_loss'])\n# metadata['best_mean_squared_error'] = min(history_meta['mean_squared_error'])\n# metadata['best_val_mean_squared_error'] = min(history_meta['val_mean_squared_error'])\n# metadata['best_false_negatives'] = min(history_meta['false_negatives'])\n# metadata['best_false_positives'] = min(history_meta['false_positives'])\n# metadata['best_val_false_negatives'] = min(history_meta['val_false_negatives'])\n# metadata['best_val_false_positives'] = min(history_meta['val_false_positives'])\n# metadata['best_mean_absolute_error'] = min(history_meta['mean_absolute_error'])\n# metadata['best_val_mean_absolute_error'] = min(history_meta['val_mean_absolute_error'])\n# metadata['Evaluation_Loss'] = str(score[0])\n# metadata['Evaluation_Accuracy'] = str(score[1])\n# metadata['Model_Name'] = model_blob_path\n#\n# #do statistical analysis/summary stats on above variables e.g std dev, variance,\n# #create json\n# blob.metadata = metadata\n# try:\n# blob.patch()\n# # except exceptions.NotFound:, except google.api_core.exceptions.Forbidden:\n# except exceptions.Forbidden:\n# raise ValueError(\"Error: Access to GCP Storage bucket forbidden, check IAM policy, 403 Error\")\n# except exceptions.NotFound:\n# raise ValueError(\"Error: Access to GCP Storage bucket forbidden, check IAM policy, 404 Error\")\n# except exceptions.PermissionDenied:\n# raise ValueError(\"Error: Access to GCP Storage bucket forbidden, check IAM policy\")\n# except exceptions.TooManyRequests:\n# raise ValueError(\"Error: Access to GCP Storage bucket forbidden, check IAM policy\")\n# #https://googleapis.dev/python/google-api-core/latest/exceptions.html\n# # call get_iam_policy and change_iam_policy func to view and change IAM policy to get rid of error\n# #cloudstorage.Error, cloudstorage.AuthorizationError, cloudstorage.ForbiddenError, cloudstorage.NotFoundError, cloudstorage.TimeoutError\n\n#save and upload model to bucket\ndef upload_model(model, model_blob_path,model_save_path):\n\n #model.get_layer dense_1\n #get file name from args\n print('Saving model')\n\n model.save(model_save_path)\n upload_file(model_blob_path, model_save_path)\n\n#upload blob to bucket\ndef upload_file(blob_path, filepath):\n\n print('Uploading blob to GCP Storage')\n blob = bucket.blob(blob_path)\n blob.upload_from_filename(filepath)\n\n #blob_path is GCP Storage filepath\n #filepath is local path to file\n\n#download blob from bucket to local dir\ndef download_file(blob_path, filepath):\n\n print('Downloading file...')\n blob = bucket.blob(blob_path)\n blob.download_to_filename(filepath)\n\ndef get_best_model(project_id, job_name):\n\n # Define the credentials for the service account\n credentials = service_account.Credentials.from_service_account_file(\"service-account.json\")\n #credentials = GoogleCredentials.get_application_default()\n\n project_id = 'projects/{}'.format(project_id)\n job_id = '{}/jobs/{}'.format(project_id, job_name)\n\n ml = discovery.build('ml', 'v1', credentials=credentials)\n\n try:\n request = ml.projects().jobs().get(name=job_id).execute()\n except errors.HttpError as err:\n print('Error getting job details')\n print(err._get_reason())\n\n #get first best model\n best_model = request['trainingOutput']['trials'][0]\n\n print('Best Hyperparameters:')\n print(json.dumps(best_model, indent=4))\n\n# Create a list for each field\n\n trial_id, eval_score, conv1_filters, conv1_filters, conv3_filters, window_size, conv2d_dropout, \\\n kernel_regularizer, pool_size, recurrent_layer1, recurrent_layer1, recurrent_dropout, \\\n recurrent_recurrent_dropout, after_recurrent_dropout, bidirection, recurrent_layer, \\\n dense_1, dense_2, dense_3, dense_4, dense_dropout, optimizer, learning_rate, epochs, \\\n batch_size, elapsed_time = [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], \\\n [], [], [], [], [], [], [], [], [], [], []\n\n # Loop through the json and append the values of each field to the lists\n for each in request['trainingOutput']['trials']:\n trial_id.append(each['trialId'])\n eval_score.append(each['finalMetric']['eval_score'])\n conv1_filters.append(each['hyperparameters']['conv1_filters'])\n conv2_filters.append(each['hyperparameters']['conv2_filters'])\n conv3_filters.append(each['hyperparameters']['conv3_filters'])\n window_size.append(each['hyperparameters']['window_size'])\n conv2d_dropout.append(each['hyperparameters']['conv2d_dropout'])\n\n # Put the lsits into a df, transpose and name the columns\n df = pd.DataFrame([trial_id, eval_score, conv1_filters, conv2_filters, conv3_filters, window_size, conv2d_dropout]).T\n df.columns = ['trial_id', 'eval_score', 'conv1_filters', 'conv2_filters', 'conv3_filters', 'window_size', 'conv2d_dropout']\n\n # Display the df\n df.head()\n return df\n\n#List all objects within bucket\ndef list_bucket_objects():\n\n blobs = storage_client.list_blobs(BUCKET_NAME)\n\n for blob in blobs:\n print(blob.name)\n\n#Delete specified blob from bucket\ndef delete_blob(blob_name):\n\n\n bucket = storage_client.bucket(BUCKET_NAME)\n blob = bucket.blob(blob_name)\n blob.delete()\n\n print(\"Blob {} deleted.\".format(blob_name))\n\n\n# \"\"\"View IAM Policy for a bucket\"\"\"\ndef view_bucket_iam_members():\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(BUCKET_NAME)\n\n policy = bucket.get_iam_policy(requested_policy_version=3)\n\n for binding in policy.bindings:\n print(\"Role: {}, Members: {}\".format(binding[\"role\"], binding[\"members\"]))\n\n#update iam policy of bucket so above functions can work\ndef update_bucket_policy(bucket_name):\n pass\n","sub_path":"psp_gcp/training/training_utils/gcp_utils.py","file_name":"gcp_utils.py","file_ext":"py","file_size_in_byte":9064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"14267162","text":"#!/usr/bin/env python3\n\"\"\"\nPerforms the Baum-Welch algorithm for a hidden markov model\nhttps://www.adeveloperdiary.com/data-science/machine-learning/\nderivation-and-implementation-of-baum-welch-algorithm-for-hidden-markov-model/\n\"\"\"\n\n\nimport numpy as np\n\n\ndef forward(Observation, Emission, Transition, Initial):\n \"\"\"\n Returns: P, F, or None, None on failure\n \"\"\"\n T = Observation.shape[0]\n\n N, _ = Emission.shape\n\n F = np.zeros((N, T))\n\n # Initialization step\n F[:, 0] = Initial.transpose() * Emission[:, Observation[0]]\n\n # Recursion\n for i in range(1, T):\n F[:, i] = \\\n np.matmul(F[:, i - 1], Transition) * Emission[:, Observation[i]]\n\n # Likelihood of the observations given the model\n P = np.sum(F[:, T - 1])\n\n return P, F\n\n\ndef backward(Observation, Emission, Transition, Initial):\n \"\"\"\n Returns: P, B, or None, None on failure\n \"\"\"\n T = Observation.shape[0]\n\n N, _ = Emission.shape\n\n B = np.zeros((N, T))\n\n # Initialization step\n B[:, T - 1] = np.ones(N)\n\n # Recursion\n for t in range(T - 2, -1, -1):\n prob = \\\n np.sum(B[:, t + 1] *\n Emission[:, Observation[t + 1]] * Transition, axis=1)\n B[:, t] = prob\n\n # Likelihood of the observations given the model\n P = np.sum(Initial[:, 0] * Emission[:, Observation[0]] * B[:, 0])\n\n return P, B\n\n\ndef baum_welch(Observations, Transition, Emission, Initial, iterations=1000):\n \"\"\"\n Returns: the converged Transition, Emission, or None, None on failure\n \"\"\"\n if type(Observations) is not np.ndarray or len(Observations.shape) != 1:\n return None, None\n\n if type(Transition) is not np.ndarray or len(Transition.shape) != 2:\n return None, None\n\n if type(Emission) is not np.ndarray or len(Emission.shape) != 2:\n return None, None\n\n if type(Initial) is not np.ndarray or len(Initial.shape) != 2:\n return None, None\n\n T = Observations.shape[0]\n N, M = Emission.shape\n\n if Transition.shape[0] != N or Transition.shape[1] != N:\n return None, None\n\n if Initial.shape[0] != N or Initial.shape[1] != 1:\n return None, None\n\n if not np.sum(Transition, axis=1).all():\n return None, None\n\n if not np.sum(Emission, axis=1).all():\n return None, None\n\n if not np.sum(Initial) == 1:\n return None, None\n\n for _ in range(iterations):\n _, alpha = forward(Observations, Emission, Transition, Initial)\n _, beta = backward(Observations, Emission, Transition, Initial)\n\n xi = np.zeros((N, N, T - 1))\n for t in range(T - 1):\n a = np.matmul(alpha[:, t].transpose(), Transition)\n b = Emission[:, Observations[t + 1]].transpose()\n c = beta[:, t + 1]\n denominator = np.matmul(a * b, c)\n\n for i in range(N):\n a = alpha[i, t]\n b = Transition[i]\n c = Emission[:, Observations[t + 1]].transpose()\n d = beta[:, t + 1].transpose()\n numerator = a * b * c * d\n xi[i, :, t] = numerator / denominator\n\n gamma = np.sum(xi, axis=1)\n\n # TRANSITION CALCULATION\n num = np.sum(xi, 2)\n den = np.sum(gamma, axis=1).reshape((-1, 1))\n Transition = num / den\n\n # EMISSION CALCULATION\n # Add additional T'th element in gamma\n xi_sum = np.sum(xi[:, :, T - 2], axis=0)\n xi_sum = xi_sum.reshape((-1, 1))\n gamma = np.hstack((gamma, xi_sum))\n\n denominator = np.sum(gamma, axis=1)\n denominator = denominator.reshape((-1, 1))\n\n for i in range(M):\n gamma_i = gamma[:, Observations == i]\n Emission[:, i] = np.sum(gamma_i, axis=1)\n\n Emission = Emission / denominator\n\n return Transition, Emission\n","sub_path":"unsupervised_learning/0x02-hmm/6-baum_welch.py","file_name":"6-baum_welch.py","file_ext":"py","file_size_in_byte":3824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"71016047","text":"from numpy import linalg as LA\nimport numpy as np\nimport math\n\nm = 15\nv_range = [-1,1]\ng = 0.7\n\nv = np.linspace(v_range[0], v_range[1], num=m).tolist()\n\nb = []\nfor _v in v:\n # _b = (math.pi / 2)* _v*(1-g) / (g - 2 * g * abs(_v) + 1)\n _b = _v*(1-g) / (g - 2 * g * LA.norm(v, ord=2) + 1)\n b.append(_b)\n\nx = []\nt = 0\nfor i, _b in enumerate(b):\n if i<(m-1)/2:\n # print(_b)\n t+=_b\n\nt2=0\n\nresult = []\nfor i, _b in enumerate(b):\n b[i] = (_b * 1 / t)\n _r = 0\n if i<(m-1)/2:\n for j in range(i,int((m-1)/2)):\n _r += b[j] \n elif i>(m-1)/2:\n for j in range(i,(m)):\n _r += b[j] \n result.append(_r)\n\nprint(result)\n# print(t)\n# print(b)\n# print(x)","sub_path":"old/st_vec.py","file_name":"st_vec.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"607023934","text":"#!/usr/bin/python3\nimport sys, praw, os\n\nreddit = praw.Reddit(\n user_agent='CMV Chapter Creation (by u/goromal)',\n client_id='e_Lf801LK7AtTA',\n client_secret='8tsv5GGTFJYI_-Ef50GWyJrdA5HXBw'\n)\n\nclass RedditPost(object):\n def __init__(self, url, branching_factor=4, max_depth=4):\n self.submission = reddit.submission(url=url)\n title = self.submission.title\n author = self.submission.author.name\n description = self.submission.selftext\n\n self.tree = {'title': title, 'author': author, 'content': description} \n self.populate_tree(self.tree, self.submission, self.expand_sub, 1, branching_factor, max_depth)\n \n def expand_sub(self, root):\n return sorted(root.comments, key=lambda x:x.score,reverse=True)\n\n def expand_com(self, root):\n return sorted(root.replies, key=lambda x:x.score,reverse=True)\n\n def populate_tree(self, tree_root, comment_root, expansion_fn, d, b, d_max):\n self.submission.comments.replace_more(limit=None)\n if d <= d_max:\n tree_root['responses'] = list()\n children = expansion_fn(comment_root)\n for i in range(min(b, len(children))):\n child = children[i]\n author = (child.author.name if not child.author is None else '')\n content = child.body\n tree_root['responses'].append({'title': '', 'author': author, 'content': content})\n self.populate_tree(tree_root['responses'][i], child, self.expand_com, d+1, b, d_max)\n \n def getTree(self):\n return self.tree\n","sub_path":"common_res/reddit_tools.py","file_name":"reddit_tools.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"244955494","text":"import numpy as np\nimport pandas as pd\nimport cv2\nimport pylab as pyl\nimport os\nimport itertools\nfrom matplotlib.pyplot import ion\n\n\ndef threshold_value(img):\n \"\"\"\n Returns a threshold value (0.9 or 0.98) based on whether any slice\n of the image within a central box is enterely white (white is a bitch!)\n 0.9 or 0.98 come simply from a lot of experimentation.\n \"\"\"\n\n is_color = len(img.shape) == 3\n is_grey = len(img.shape) == 2\n\n if is_color:\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n elif is_grey:\n gray = img.copy()\n\n slices = gray.mean(axis = 1)[20:gray.shape[0]-30]\n is_white = any(x > 0.9*255 for x in slices)\n if is_white:\n return 0.98\n else:\n return 0.9\n\ndef threshold_img(img):\n \"\"\"\n Simple wrap-up function for cv2.threshold()\n \"\"\"\n\n is_color = len(img.shape) == 3\n is_grey = len(img.shape) == 2\n\n t = threshold_value(img)\n\n if is_color:\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n elif is_grey:\n gray = img.copy()\n\n blurred = cv2.GaussianBlur(gray, (3, 3), 0)\n (_, thresh) = cv2.threshold(blurred, t*255, 1, cv2.THRESH_BINARY_INV)\n\n return thresh\n\n\ndef get_edges(arr, thresh):\n \"\"\"\n Given an array returns the min/max where that array is less that 255*thresh\n i.e. is not white. If all the slice array is white, returns the middle point.\n \"\"\"\n\n if np.any(np.where(arr < thresh*255)[0]):\n e1 = min(np.where(arr < thresh*255)[0])\n e2 = max(np.where(arr < thresh*255)[0])\n\n else:\n e1 = len(arr)/2\n e2 = len(arr)/2\n\n return e1,e2\n\n\ndef bounding_box(img):\n \"\"\"\n Returns right, left, lower and upper limits for the limiting box enclosing\n the item (shoe, dress). Note that given the shapes and colors of some items,\n finding the contours and compute the bounding box is not a viable solution.\n \"\"\"\n\n is_color = len(img.shape) == 3\n is_grey = len(img.shape) == 2\n\n if is_color:\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n elif is_grey:\n gray = img.copy()\n\n slices = gray.mean(axis = 1)[20:gray.shape[0]-30]\n is_white = any(x > 0.9*255 for x in slices)\n\n if (is_white):\n h1 = min(np.apply_along_axis(get_edges, axis=0, arr=gray , thresh = 0.98)[0,:])\n h2 = max(np.apply_along_axis(get_edges, axis=0, arr=gray , thresh = 0.98)[1,:])\n w1 = min(np.apply_along_axis(get_edges, axis=1, arr=gray , thresh = 0.98)[:,0])\n w2 = max(np.apply_along_axis(get_edges, axis=1, arr=gray , thresh = 0.98)[:,1])\n else :\n h1 = min(np.apply_along_axis(get_edges, axis=0, arr=gray , thresh = 0.9)[0,:])\n h2 = max(np.apply_along_axis(get_edges, axis=0, arr=gray , thresh = 0.9)[1,:])\n w1 = min(np.apply_along_axis(get_edges, axis=1, arr=gray , thresh = 0.9)[:,0])\n w2 = max(np.apply_along_axis(get_edges, axis=1, arr=gray , thresh = 0.9)[:,1])\n\n return w1, w2, h1, h2\n\n\ndef shape_df(img, axis, nsteps):\n \"\"\"\n Returns a data frame with the initial and end points enclosing the product\n in the image, across the x/y axis. Why a dataframe and not tuples? just for\n convenience.\n \"\"\"\n\n is_color = len(img.shape) == 3\n is_grey = len(img.shape) == 2\n\n if is_color:\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n elif is_grey:\n gray = img.copy()\n\n edges = bounding_box(gray)\n gray_c = gray[edges[2]:edges[3]+1, edges[0]:edges[1]+1]\n thr = threshold_value(gray_c)\n\n if axis == 'x' :\n cuts = np.rint(np.linspace(5, gray_c.shape[1]-1, nsteps, endpoint=True)).astype(int)\n\n init = np.apply_along_axis(get_edges, 0, arr = gray_c, thresh = thr)[0,:][cuts]\n end = np.apply_along_axis(get_edges, 0, arr = gray_c, thresh = thr)[1,:][cuts]\n\n df = pd.DataFrame(data = {'coord' : cuts, 'init' : init, 'end' : end},\n columns=['coord', 'init', 'end'])\n\n elif axis == 'y':\n cuts = np.round(np.linspace(4, gray_c.shape[0]-1, nsteps, endpoint=True)).astype(int)\n\n init = np.apply_along_axis(get_edges, 1, arr = gray_c, thresh = thr)[:,0][cuts]\n end = np.apply_along_axis(get_edges, 1, arr = gray_c, thresh = thr)[:,1][cuts]\n\n df = pd.DataFrame(data = {'coord' : cuts, 'init' : init, 'end' : end},\n columns=['coord', 'init', 'end'])\n\n return df\n\n\ndef shape_points(img, nsteps, mirrow=False, only_upper=False):\n \"\"\"\n Simple formatting the shape_df output to be passed to the ShapeContext class\n \"\"\"\n\n if mirrow:\n im = cv2.flip(img, 2)\n else:\n im = img.copy()\n\n df_y = shape_df(im, 'y', nsteps)\n df_x = shape_df(im, 'x', nsteps)\n\n if (not df_y.empty) and (not df_x.empty):\n y_init = [(df_y.init[i], df_y.coord[i]) for i in range(df_y.shape[0])]\n y_end = [(df_y.end[i], df_y.coord[i]) for i in range(df_y.shape[0])]\n x_init = [(df_x.coord[i], df_x.init[i]) for i in range(df_x.shape[0])]\n x_end = [(df_x.coord[i], df_x.end[i]) for i in range(df_x.shape[0])]\n\n if only_upper: return x_init\n\n return y_init+y_end+x_init+x_end\n else:\n return []\n\n\ndef plot_shape(img, axis, df=None, nsteps=None):\n \"\"\"\n function to overplot the shape points onto the image img\n \"\"\"\n\n if df is not None and nsteps:\n print('Error: provide data frame or nsteps, not both')\n return None\n\n if df is not None:\n edges = bounding_box(img)\n img_c = img[edges[2]:edges[3]+1, edges[0]:edges[1]+1]\n pyl.figure()\n pyl.gray()\n pyl.imshow(img_c)\n if axis == 'y':\n pyl.plot(df.init,df.coord, 'r*')\n pyl.plot(df.end, df.coord, 'r*')\n pyl.show()\n if axis == 'x':\n pyl.plot(df.coord,df.init, 'r*')\n pyl.plot(df.coord,df.end, 'r*')\n pyl.show()\n\n elif nsteps:\n pyl.figure()\n pyl.gray()\n pyl.imshow(img)\n if axis == 'y':\n df = shape_df(img, 'y', nsteps)\n pyl.plot(df.init,df.coord, 'r*')\n pyl.plot(df.end, df.coord, 'r*')\n pyl.show()\n if axis == 'x':\n df = shape_df(img, 'x', nsteps)\n pyl.plot(df.coord,df.init, 'r*')\n pyl.plot(df.coord,df.end, 'r*')\n pyl.show()","sub_path":"codes/PhaseDiaPredict/shapeContextLib/morphology_utils.py","file_name":"morphology_utils.py","file_ext":"py","file_size_in_byte":6289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"72474864","text":"import pytest\n\n\n@pytest.fixture()\ndef greeting_form():\n from form_designer.models import FormDefinition, FormDefinitionField\n fd = FormDefinition.objects.create(\n mail_to='test@example.com',\n mail_subject='Someone sent you a greeting: {{ greeting }}',\n mail_reply_to='Greeting Bot ',\n )\n FormDefinitionField.objects.create(\n form_definition=fd,\n name='greeting',\n label='Greeting',\n field_class='django.forms.CharField',\n required=True,\n )\n FormDefinitionField.objects.create(\n form_definition=fd,\n name='upload',\n field_class='django.forms.FileField',\n required=False,\n )\n return fd\n","sub_path":"form_designer/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"191646451","text":"import sys\nif sys.platform[:5] == 'linux':\n import matplotlib\n matplotlib.use('Agg')\n\nimport re\nimport os, psutil\nfrom os import walk\nfrom os import listdir\nfrom os.path import isfile, join, isdir, exists\nimport time\nimport numpy as np\nimport pandas as pd\nfrom copy import deepcopy\nimport lenstronomy\nimport astropy\nimport scipy\nimport pickle\nfrom Lens_Modeling_Auto.auto_modeling_functions import openFITS\nfrom Lens_Modeling_Auto.auto_modeling_functions import calcBackgroundRMS\nfrom Lens_Modeling_Auto.auto_modeling_functions import prepareData\nfrom Lens_Modeling_Auto.auto_modeling_functions import get_kwarg_names\nfrom Lens_Modeling_Auto.auto_modeling_functions import printMemory\nfrom Lens_Modeling_Auto.auto_modeling_functions import mask_for_sat\nfrom Lens_Modeling_Auto.auto_modeling_functions import estimate_radius\nfrom Lens_Modeling_Auto.auto_modeling_functions import find_lens_gal\nfrom Lens_Modeling_Auto.fit_sequence_functions import initial_model_params\nfrom Lens_Modeling_Auto.fit_sequence_functions import initial_modeling_fit\nfrom Lens_Modeling_Auto.fit_sequence_functions import initial_fits_arcs_masked\nfrom Lens_Modeling_Auto.fit_sequence_functions import initial_fits_arcs_masked_alt\nfrom Lens_Modeling_Auto.fit_sequence_functions import model_deblended\nfrom Lens_Modeling_Auto.fit_sequence_functions import model_deblended_long\nfrom Lens_Modeling_Auto.fit_sequence_functions import full_sampling\nfrom Lens_Modeling_Auto.plot_functions import make_modelPlots\nfrom Lens_Modeling_Auto.plot_functions import make_chainPlots\nfrom Lens_Modeling_Auto.plot_functions import make_cornerPlots\nfrom Lens_Modeling_Auto.plot_functions import save_chain_list\nfrom lenstronomy.Util.mask_util import mask_center_2d\n\n\n\n\n#####################################################################################################################\n\n#################################################### User Inputs ####################################################\n\n#####################################################################################################################\n\n# nohup python -u ./Lens_Modeling_Auto/DES_deblended.py > /results_test/output_logs/output1.log &\n \n\n# file paths to image data and results destination [TO DO BY USER]\ndata_path = 'DES_lenses' #path to image data\nresults_path = 'DES_lenses/results_test' #path to designated results folder\n\nif not exists(results_path):\n os.mkdir(results_path)\n\n#Folder names for data, psf, noise map, original image [TO DO BY USER]\nim_path = data_path + '/data' #add name of folder with image data\ndeblended_path = '/DES_lenses/MuSCADeT_models_v4.pkl' #add name of folder with MuSCADeT deblended data\ndeblended_path_alt = '/DES_lenses/MuSCADeT_models_v4_2.pkl' #add name of folder with additional MuSCADeT deblended data\n# LRG_path = data_path + 'output_of_the_network_rescaled/LRG' \n# source_path = data_path + 'output_of_the_network_rescaled/sources'\n# im_path = data_path + '/simulations'\npsf_path = data_path + '/psf' #add name of folder with psf data\nnoise_path = data_path + '/psf' #add name of folder with rms data, OR folder with FITS files that contain exposure times in header files (if using 'EXPTIME' for noise_type)\nnoise_type = 'EXPTIME' # 'NOISE_MAP' or 'EXPTIME'\nband_list = ['g','r','i'] #list of bands\nobj_name_location = 0 # index corresponding to which string of numbers in filenames are the ID \n\n#Modeling Options [TO DO BY USER]\nuse_shapelets = False #If True,then at the end of the modeling it tries shapelets instead of Sersic for the source profile if chi^2 is too large\nfix_seed = False #bool. If True, uses saved seed values for each image from a previous modeling run\nsource_seed_path = '/random_seed_init/' #path to seed values to be used\nuse_mask = True #bool; whether or not masks should be used in the modeling\nmask_pickle_path = '/masks/'#path to masks created previously. If None, new masks will be created by the script\nMask_rad_file = None #path to csv file or 'None'\n\n#model lists\nlens_model_list = ['SIE','SHEAR'] \nsource_model_list = ['SERSIC_ELLIPSE']\nlens_light_model_list = ['SERSIC_ELLIPSE']\npoint_source_model_list = None\nthis_is_a_test = False #If true, changes PSO and MCMC settings to make modeling very fast (for debugging/troubleshooting)\nnumCores = 1 # number of CPUs to use \n\n#path to Reff and n_s source distributions that lenstronomy uses for kde prior method. \n#Warning: Method is very slow. Better to set to None\nkde_prior_path = None #'/Users/markmaus/Desktop/Physics_EPFL/Specialization_Project/kde_priors/'\nif kde_prior_path != None:\n with open(kde_prior_path + 'R_source.pickle', 'rb') as handle:\n kde_Rsource = pickle.load(handle)\n \n with open(kde_prior_path + 'n_source.pickle', 'rb') as handle:\n kde_nsource = pickle.load(handle)\nelse:\n kde_Rsource = None\n kde_nsource = None\n\n#specify IDs of specific images to model. Otherwise model all images in data folder \nselect_objects = None #['03310601','06653211','06788344','14083401',\n# '14327423','15977522','16033319','17103670',\n# '19990514'] #List of strings with object IDs, or None\n\n\n# Additional info for images [TO DO BY USER]\ndeltaPix = 0.27\nzeroPt = 30\npsf_upsample_factor = 1\nra_dec = 'csv' # 'csv', 'header', or 'None'\nra_dec_loc = '.csv'#path to csv file or header file, or 'None'\nMask_rad_file = None #'.csv' #path to csv file with mask radii or 'None'\n\nid_col_name = 'id_1'\n\nprintMemory('Beginning')\n\n#####################################################################################################################\n\n########################################### Find Data and sort filenames ############################################\n\n#####################################################################################################################\n\n#unpack deblended pkl file:\nwith open(deblended_path, 'rb') as handle:\n data_structure = pickle.load(handle)\n \nwith open(deblended_path_alt, 'rb') as handle:\n data_structure_alt = pickle.load(handle)\n \nLRG_all_data = deepcopy(data_structure[2])\nsource_all_data = deepcopy(data_structure[3])\n\nLRG_all_data_alt = deepcopy(data_structure_alt[2])\nsource_all_data_alt = deepcopy(data_structure_alt[3])\n\nim_files = [f for f in listdir(im_path) if isfile('/'.join([im_path,f]))]\n# LRG_files = [f for f in listdir(LRG_path) if isfile('/'.join([LRG_path,f]))]\n# source_files = [f for f in listdir(source_path) if isfile('/'.join([source_path,f]))]\n\n# im_files = deepcopy(data_structure[0])\n\npsf_files,noise_files = [],[]\npsf_files_dict, noise_files_dict = {},{}\n\nfor b in band_list: \n psf_files.append([f for f in listdir(psf_path + '/' + b) if isfile('/'.join([psf_path + '/' + b,f]))])\n noise_files.append([f for f in listdir(noise_path + '/' + b) if isfile('/'.join([noise_path + '/' + b,f]))])\n psf_files_dict[b] = [f for f in listdir(psf_path + '/' + b) if isfile('/'.join([psf_path + '/' + b,f]))]\n noise_files_dict[b] = [f for f in listdir(noise_path + '/' + b) if isfile('/'.join([noise_path + '/' + b,f]))]\n\n\n# print(im_files[:10])\n \nobj_names = []\nif not select_objects:\n for x in im_files:\n obj_names.append(re.findall('\\d+', x)[obj_name_location])\nelse: obj_names = deepcopy(select_objects)\n\n# obj_names = obj_names[:10]\n\ndata_pairs_dicts = []\nfor i,obj in enumerate(obj_names):\n for x in im_files:\n if int(obj) == int(re.findall('\\d+', x)[obj_name_location]): im = x\n \n# for y in LRG_files:\n# if obj == re.findall('\\d+', y)[obj_name_location]: LRG = y\n \n# for z in source_files:\n# if obj == re.findall('\\d+', z)[obj_name_location]: source = z\n\n psf = {}\n for b in band_list:\n for file in psf_files_dict[b]:\n if int(obj) == int(re.findall('\\d+', file)[obj_name_location]): psf[b] = '/'.join([b,file])\n\n noise = {}\n for b in band_list:\n for file in noise_files_dict[b]:\n if int(obj) == int(re.findall('\\d+', file)[obj_name_location]): noise[b]= '/'.join([b,file])\n\n if ra_dec == 'csv':\n df_info = pd.read_csv(ra_dec_loc)\n for j in range(len(df_info.loc[:,:])):\n if int(df_info.loc[j,'id']) == int(obj): RA,DEC = df_info.loc[j,'ra'],df_info.loc[j,'dec']\n else: RA, DEC = 'N/A','N/A'\n \n data_pairs_dicts.append({'image_data': im , \n# 'LRG_data': LRG,'source_data': source,\n 'psf': psf , 'noise_map': noise, \n 'noise_type': noise_type,'object_ID': str(int(obj)),'RA': RA, 'DEC': DEC})\n\ndata_pairs_dicts = sorted(data_pairs_dicts, key=lambda k: float(k['object_ID']))\ndata_pairs_cut = []\nprint('\\n')\nprint('############## Files Organized #################')\nprint('files to model:')\nprint('\\n')\ncount = 0\nfor i,x in enumerate(data_pairs_dicts): \n if (not x['psf']) or (not x['noise_map']):\n# if (not x['psf']) or (not x['noise_map']) or (not x['LRG_data']) or (not x['source_data']):\n continue\n count += 1\n print('image {}'.format(count))\n print('ID: {}'.format(x['object_ID']))\n print('RA: {}, DEC: {}'.format(x['RA'],x['DEC']))\n print('Full Image data: ',x['image_data'])\n# print('LRG data: ',x['LRG_data'])\n# print('Lensed source data: ',x['source_data'])\n print('PSF: ',x['psf'])\n print('Noise: ',x['noise_map'])\n print('\\n')\n data_pairs_cut.append(x)\n \ndata_pairs_dicts = deepcopy(data_pairs_cut)\nprint('\\n')\nprint('I will now begin modeling the images')\nprint('\\n') \n \n##################################################################################################################### \n\n################################################### Begin Modeling ##################################################\n\n#####################################################################################################################\n\nf = open(results_path + \"/initial_params.txt\",\"w\")#append mode\nf.write('\\n' + '###############################################################################################' + ' \\n')\nf.write('\\n')\nf.write('\\n' + '################################### Modeling Initial Params ###################################' + ' \\n')\nf.write('\\n')\nf.write('\\n' + '###############################################################################################' + ' \\n')\nf.write('\\n')\nf.write('lenstronomy version: {}'.format(lenstronomy.__version__))\nf.write('\\n')\nf.write('numpy version: {}'.format(np.__version__))\nf.write('\\n')\nf.write('astropy version: {}'.format(astropy.__version__))\nf.write('\\n')\nf.write('scipy version: {}'.format(scipy.__version__))\nf.write('\\n')\nf.close()\n\nprintMemory('Before loop')\n\ntic0 = time.perf_counter()\n\nf = open(results_path + \"/Modeling_times.txt\",\"w\")\nf.write('\\n' + '###############################################################################################' + ' \\n')\nf.write('\\n')\nf.write('\\n' + '######################################## Modeling Times #######################################' + ' \\n')\nf.write('\\n')\nf.write('\\n' + '###############################################################################################' + ' \\n')\nf.close()\n\nfor it in range(len(data_pairs_dicts[48:])): \n it += 48\n \n# if (not data_pairs_dicts[it]['psf']) or (not data_pairs_dicts[it]['noise_map']):\n# continue\n# elif (good_images_indices != None) and (it not in good_images_indices):\n# continue\n \n \n print('\\n')\n print('modeling image {}'.format(it + 1))\n print('\\n')\n print(data_pairs_dicts[it])\n print('\\n')\n tic = time.perf_counter()\n \n f = open(results_path + \"/initial_params.txt\",\"a\")#append mode\n f.write('\\n')\n f.write('\\n' + '################################### image {} ###################################'.format(it + 1) + ' \\n')\n f.write('\\n')\n print(data_pairs_dicts[it],file = f)\n f.write('\\n')\n f.close()\n \n file = data_pairs_dicts[it]['image_data']\n \n #band_index = np.where(np.array(band_list) == band)[0][0]\n data,hdr = openFITS(im_path + '/' + file)\n# LRG_data,_ = openFITS(LRG_path + '/' + data_pairs_dicts[it]['LRG_data'])\n# source_data,_ = openFITS(source_path + '/' + data_pairs_dicts[it]['source_data'])\n\n \n \n if file in data_structure[0]:\n index = data_structure[0].index(file)\n LRG_data = deepcopy(LRG_all_data[index])\n source_data = deepcopy(source_all_data[index])\n else:\n index = data_structure_alt[0].index(file)\n LRG_data = deepcopy(LRG_all_data_alt[index])\n source_data = deepcopy(source_all_data_alt[index])\n \n psf, psf_hdr = [],[]\n noise_map,noise_hdr = [],[]\n for b in band_list:\n d,h = openFITS(psf_path + '/' + data_pairs_dicts[it]['psf'][b])\n if np.ndim(d)== 3:\n psf.extend(d)\n elif np.ndim(d)== 2:\n psf.append(d)\n psf_hdr.append(h)\n \n # psf.extend(d)\n # psf_hdr.extend(h)\n# psf.append(d)\n# psf_hdr.append(h)\n\n\n d2,h2 = openFITS(noise_path + '/' + data_pairs_dicts[it]['noise_map'][b])\n if np.ndim(d2)== 3:\n noise_map.extend(d2)\n elif np.ndim(d2)== 2:\n noise_map.append(d2)\n noise_hdr.append(h2)\n \n # noise_map.extend(d2)\n # noise_hdr.extend(h2)\n# noise_map.append(d2)\n# noise_hdr.append(h2)\n\n\n data_dict = {'image_data': [], 'LRG_data': [], 'source_data': [],'image_hdr': [], \n 'psf': psf, 'psf_hdr': psf_hdr, \n 'noise_map': noise_map, 'noise_hdr': noise_hdr}\n \n printMemory('After openFITS')\n\n for i,b in enumerate(band_list):\n# for j,h in enumerate(hdr):\n# if h['BAND'] == b:\n# data_dict['image_data'].append(data[i])\n# data_dict['image_hdr'].append(hdr[0])\n if np.ndim(data) == 4:\n data_dict['image_data'].append(data[0][i])\n elif np.ndim(data) == 3:\n data_dict['image_data'].append(data[i])\n data_dict['image_hdr'].append(hdr[0])\n \n if np.ndim(LRG_data) == 4:\n data_dict['LRG_data'].append(LRG_data[0][i])\n elif np.ndim(data) == 3:\n data_dict['LRG_data'].append(LRG_data[i])\n \n if np.ndim(source_data) == 4:\n data_dict['source_data'].append(source_data[0][i])\n elif np.ndim(data) == 3:\n data_dict['source_data'].append(source_data[i])\n# data_dict['image_hdr'].append(hdr[0])\n \n print('calculating background values')\n print('\\n')\n background_rms = calcBackgroundRMS(data_dict['image_data']) #calculate rms background\n background_rms_LRG = calcBackgroundRMS(data_dict['LRG_data'])\n background_rms_source = calcBackgroundRMS(data_dict['source_data'])\n print('\\n')\n\n lens_info = []\n LRG_info = []\n source_info = []\n\n for i,x in enumerate(data_dict['image_data']):\n\n lens_info.append({'deltaPix': deltaPix ,\n 'numPix': len(x),\n 'background_rms': background_rms[i],\n 'psf_type': 'PIXEL',\n 'psf_upsample_factor': psf_upsample_factor})\n\n if noise_type == 'EXPTIME': \n lens_info[i]['exposure_time'] = data_dict['noise_hdr'][i][0]['EXPTIME']\n# lens_info[i]['exposure_time'] = 800.\n lens_info[i]['noise_map'] = None\n else:\n lens_info[i]['exposure_time'] = None\n lens_info[i]['noise_map'] = data_dict['noise_map'][i] \n \n for i,x in enumerate(data_dict['LRG_data']):\n\n LRG_info.append({'deltaPix': deltaPix ,\n 'numPix': len(x),\n 'background_rms': background_rms_LRG[i],\n 'psf_type': 'PIXEL',\n 'psf_upsample_factor': psf_upsample_factor})\n\n if noise_type == 'EXPTIME': \n LRG_info[i]['exposure_time'] = data_dict['noise_hdr'][i][0]['EXPTIME']\n# LRG_info[i]['exposure_time'] = 800.\n LRG_info[i]['noise_map'] = None\n else:\n LRG_info[i]['exposure_time'] = None\n LRG_info[i]['noise_map'] = data_dict['noise_map'][i] \n \n for i,x in enumerate(data_dict['source_data']):\n\n source_info.append({'deltaPix': deltaPix ,\n 'numPix': len(x),\n 'background_rms': background_rms_source[i],\n 'psf_type': 'PIXEL',\n 'psf_upsample_factor': psf_upsample_factor})\n\n if noise_type == 'EXPTIME': \n source_info[i]['exposure_time'] = data_dict['noise_hdr'][i][0]['EXPTIME']\n# source_info[i]['exposure_time'] = 800.\n source_info[i]['noise_map'] = None\n else:\n source_info[i]['exposure_time'] = None\n source_info[i]['noise_map'] = data_dict['noise_map'][i] \n\n kwargs_data, kwargs_psf = prepareData(lens_info,data_dict['image_data'],\n data_dict['psf']) \n kwargs_data_LRG, kwargs_psf = prepareData(LRG_info,data_dict['LRG_data'],\n data_dict['psf']) \n kwargs_data_source, kwargs_psf = prepareData(source_info,data_dict['source_data'],\n data_dict['psf']) \n \n \n printMemory('After prepareData')\n \n \n ############################## Prepare Mask ############################\n \n c_x,c_y = find_lens_gal(kwargs_data[-1]['image_data'],deltaPix,show_plot=False,title=data_pairs_dicts[it]['object_ID'])\n \n if Mask_rad_file == None:\n mask_size_ratio = None\n mask_size_px,mask_size_as = estimate_radius(kwargs_data[0]['image_data'],\n deltaPix,center_x=c_x,center_y=c_y,show_plot=False, name = None)\n else:\n df_mask = pd.read_csv(Mask_rad_file)\n mask_size_ratio = None\n mask_size_as = float(df_mask.loc[df_mask[id_col_name] == int(data_pairs_dicts[it]['object_ID']),'dst_arcsec']) #+8.*deltaPix \n \n \n gal_mask_list = []\n gal_rad_as = 6 * deltaPix \n mask_list = []\n mask_dict_list = []\n source_mask_list = []\n# sizes_As = []\n# sizes_px = []\n\n if use_mask:\n if mask_pickle_path != None:\n print('Using saved mask instead of creating one')\n# mask_list = []\n for k,data in enumerate(kwargs_data): \n with open(mask_pickle_path + '{}/{}.pickle'.format(band_list[k],data_pairs_dicts[it]['object_ID']), 'rb') as handle:\n mask_dict = pickle.load(handle)\n mask_list.append(mask_dict['mask'])\n mask_dict_list.append(mask_dict)\n \n mask_gal = mask_for_sat(data['image_data'],deltaPix,\n lens_rad_arcsec = gal_rad_as,\n center_x=c_x,center_y=c_y,\n lens_rad_ratio = None, \n show_plot = False)\n gal_mask_list.append(mask_gal)\n \n mask_path = results_path + '/masks'\n \n \n \n if mask_pickle_path != mask_path:\n if not exists(mask_path):\n os.mkdir(mask_path)\n band_path = mask_path + '/' + band_list[k]\n \n if not exists(band_path):\n os.mkdir(band_path)\n \n with open(band_path + '/{}.pickle'.format(data_pairs_dicts[it]['object_ID']), 'wb') as handle:\n pickle.dump(mask_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n \n \n \n else:\n for k,data in enumerate(kwargs_data): \n if not exists(results_path + '/masks'):\n os.mkdir(results_path + '/masks')\n mask_path = results_path + '/masks'\n \n band_path = mask_path + '/' + band_list[k]\n if not exists(band_path):\n os.mkdir(band_path)\n\n mask = mask_for_sat(data['image_data'],deltaPix,\n lens_rad_arcsec = mask_size_as,\n center_x=c_x,center_y=c_y,\n lens_rad_ratio = mask_size_ratio, \n show_plot = False)\n mask_list.append(mask)\n\n\n mask_dict = {}\n mask_dict['c_x'] = c_x\n mask_dict['c_y'] = c_y\n mask_dict['size arcsec'] = mask_size_as\n mask_dict['size pixels'] = mask_size_px\n mask_dict['mask'] = mask\n mask_dict_list.append(mask_dict)\n with open(band_path + '/{}.pickle'.format(data_pairs_dicts[it]['object_ID']), 'wb') as handle:\n pickle.dump(mask_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n # sizes_As.append(size_Arcsec)\n # sizes_px.append(size_pix)\n \n \n if mask_center:\n for i,m in enumerate(mask_dict_list):\n source_mask = deepcopy(m['mask'])\n numPix = len(source_mask)\n center_mask = np.zeros([numPix,numPix])\n\n for j in range(numPix):\n center_mask[j] = mask_center_2d(c_x,c_y, 3, np.linspace(0,numPix - 1,numPix), j)\n source_mask[center_mask == 0] = 0\n source_mask_list.append(source_mask)\n else:\n source_mask_list = deepcopy(mask_list)\n \n else: mask_list = None\n \n \n \n file = open(results_path+\"/initial_params.txt\",\"a\")#append mode \n file.write(\"Mask Size: \\n\")\n file.write(\"{} pixels,{} arcsec \\n\".format(mask_dict_list[0]['size pixels'],mask_dict_list[0]['size arcsec']))\n file.write(\"Mask Center: \\n\")\n file.write(\"({},{}) \\n\".format(mask_dict_list[0]['c_x'],mask_dict_list[0]['c_y']))\n if mask_pickle_path != None:\n file.write(mask_pickle_path)\n file.close() \n \n ################################################################################################################# \n \n ################################################## Initial PSOs #################################################\n \n ################################################################################################################# \n print('\\n')\n print('I will start with initial fits of the lens, source and lens light profiles')\n print('\\n')\n \n \n if this_is_a_test:\n fitting_kwargs_list = [['PSO', {'sigma_scale': 1, 'n_particles': 50, 'n_iterations': 50,'threadCount': numCores}]\n# ,['MCMC', {'n_burn': 0, 'n_run': 50, 'walkerRatio': 10, 'sigma_scale': .1,'threadCount':numCores}]\n ]\n else:\n fitting_kwargs_list = [['PSO', {'sigma_scale': 1, 'n_particles': 100, 'n_iterations': 2000,'threadCount': numCores}]\n #,['MCMC', {'n_burn': 0, 'n_run': 100, 'walkerRatio': 10, 'sigma_scale': .1,'threadCount':numCores}]\n ]\n\n \n if fix_seed:\n with open(source_seed_path + '{}.pickle'.format(data_pairs_dicts[it]['object_ID']), 'rb') as handle:\n seed_val = pickle.load(handle)\n print('Using seed from: {}'.format(source_seed_path))\n print(seed_val)\n else: seed_val = None\n \n name = '{}.pickle'.format(data_pairs_dicts[it]['object_ID'])\n save_seed_path = results_path + '/random_seed_init/'\n save_seed_file = save_seed_path + name\n \n init_chainList_path = results_path + '/chain_lists_init/'\n init_chainList_file = init_chainList_path + name\n if not exists(save_seed_path):\n os.mkdir(save_seed_path)\n if not exists(init_chainList_path):\n os.mkdir(init_chainList_path)\n \n \n (lens_initial_params,source_initial_params,\n lens_light_initial_params,ps_initial_params) = initial_model_params(lens_model_list)\n \n \n# (kwargs_params,kwargs_fixed, kwargs_result,\n# chain_list,kwargs_likelihood, kwargs_model, kwargs_data_joint, \n# multi_band_list, kwargs_constraints) = model_deblended_long(fitting_kwargs_list,lens_model_list,\n# source_model_list,lens_light_model_list,\n# lens_initial_params,source_initial_params,\n# lens_light_initial_params,kwargs_data,\n# kwargs_data_LRG,kwargs_data_source,kwargs_psf,\n# num = it+1,object_ID = data_pairs_dicts[it]['object_ID'],\n# mask_list = mask_list,\n# source_mask_list = source_mask_list,\n# gal_mask_list = gal_mask_list,fix_seed = fix_seed,\n# fix_seed_val = seed_val,save_seed_file = save_seed_file, \n# chainList_file = init_chainList_file,\n# kde_nsource=kde_nsource,kde_Rsource=kde_Rsource,\n# results_path = results_path, band_list = band_list\n# )\n (kwargs_params,kwargs_fixed, kwargs_result,\n chain_list,kwargs_likelihood, kwargs_model, kwargs_data_joint, \n multi_band_list, kwargs_constraints) = model_deblended(fitting_kwargs_list,lens_model_list,\n source_model_list,lens_light_model_list,\n lens_initial_params,source_initial_params,\n lens_light_initial_params,kwargs_data,\n kwargs_data_LRG,kwargs_data_source,kwargs_psf,\n num = it+1,object_ID = data_pairs_dicts[it]['object_ID'],\n mask_list = mask_list,\n source_mask_list = source_mask_list,\n gal_mask_list = gal_mask_list,fix_seed = fix_seed,\n fix_seed_val = seed_val,save_seed_file = save_seed_file, \n chainList_file = init_chainList_file,\n results_path = results_path, band_list = band_list\n )\n \n# kwargs_params,kwargs_fixed, kwargs_result,\n# chain_list,kwargs_likelihood, kwargs_model, \n# kwargs_data_joint, multi_band_list, kwargs_constraints = initial_modeling_fit(fitting_kwargs_list,lens_model_list,source_model_list,\n# lens_light_model_list,lens_initial_params,\n# source_initial_params,lens_light_initial_params,\n# kwargs_data,kwargs_psf,mask_list)\n \n# kwargs_params,kwargs_fixed, kwargs_result,chain_list,kwargs_likelihood, kwargs_model, kwargs_data_joint, multi_band_list, kwargs_constraints= initial_fits_arcs_masked(fitting_kwargs_list,lens_model_list,\n# source_model_list,lens_light_model_list,\n# lens_initial_params,source_initial_params,\n# lens_light_initial_params,kwargs_data,\n# kwargs_psf,mask_list = mask_list,\n# gal_mask_list = gal_mask_list)\n \n# kwargs_params,kwargs_fixed, kwargs_result,chain_list,kwargs_likelihood, kwargs_model, kwargs_data_joint, multi_band_list, kwargs_constraints= initial_fits_arcs_masked_alt(fitting_kwargs_list,lens_model_list,\n# source_model_list,lens_light_model_list,\n# lens_initial_params,source_initial_params,\n# lens_light_initial_params,kwargs_data,\n# kwargs_psf,mask_list = mask_list,\n# gal_mask_list = gal_mask_list)\n\n# exec(open('Lens_Modeling_Auto/initial_modeling_fit.py').read())\n \n printMemory('After initial fit')\n \n \n toc1 = time.perf_counter() \n \n print('\\n')\n print('First sampling took: {:.2f} minutes'.format((toc1 - tic)/60.0))\n \n f = open(results_path + \"/Modeling_times.txt\",\"a\")\n f.write('\\n')\n f.write('Image: {}'.format(it+1))\n f.write('\\n')\n f.write('Pre-sampling optimization time: {:.4f} minutes'.format((toc1 - tic)/60.0))\n f.close()\n \n multi_source_model_list = []\n multi_lens_light_model_list = []\n \n for i in range(len(kwargs_data)):\n multi_source_model_list.extend(deepcopy(source_model_list))\n multi_lens_light_model_list.extend(deepcopy(lens_light_model_list))\n \n model_kwarg_names = get_kwarg_names(lens_model_list,multi_source_model_list,\n multi_lens_light_model_list,kwargs_fixed)\n \n \n ################################################################################################################# \n \n ################################################# Full Sampling #################################################\n \n ################################################################################################################# \n \n print('\\n')\n print('I will now run the full sampling')\n print('\\n')\n \n if this_is_a_test:\n fitting_kwargs_list = [['PSO', {'sigma_scale': 0.1, 'n_particles': 50, 'n_iterations': 50,'threadCount': numCores}]\n# ,['MCMC', {'n_burn': 0, 'n_run': 50, 'walkerRatio': 10, 'sigma_scale': .05,'threadCount':numCores}]\n ]\n else:\n fitting_kwargs_list = [['PSO', {'sigma_scale': 1, 'n_particles': 150, 'n_iterations': 2000,'threadCount': numCores}]\n ,['MCMC', {'n_burn': 200, 'n_run': 1000, 'walkerRatio': 10, 'sigma_scale': .05,'threadCount':numCores}]\n ]\n \n \n \n (chain_list,kwargs_result,kwargs_params,\n kwargs_likelihood, kwargs_model, \n kwargs_data_joint, multi_band_list, \n kwargs_constraints) = full_sampling(fitting_kwargs_list,kwargs_params,kwargs_data, \n kwargs_psf,lens_model_list,source_model_list,\n lens_light_model_list,\n kde_nsource=kde_nsource,\n kde_Rsource=kde_Rsource,\n mask_list=mask_list)\n \n# if not this_is_a_test:\n# exec(open('Lens_Modeling_Auto/Full_Sampling.py').read())\n \n printMemory('After Full Sampling')\n\n toc2 = time.perf_counter()\n print('\\n')\n print('Full sampling took: {:.2f} minutes'.format((toc2 - toc1)/60.0), '\\n',\n 'Total time for this image: {:.2f} minutes'.format((toc2 - tic)/60.0))\n \n f = open(results_path + \"/Modeling_times.txt\",\"a\")\n f.write('\\n')\n f.write('Main Sampling time: {:.4f} minutes'.format((toc2 - toc1)/60.0))\n f.close()\n \n print('\\n')\n \n ################################################################################################################# \n \n ######################################### Create Plots and Save Results #########################################\n \n ################################################################################################################# \n \n# if it == 0:\n if not exists(results_path + '/modelPlot_results'):\n os.mkdir(results_path + '/modelPlot_results')\n if not exists(results_path + '/chainPlot_results'):\n os.mkdir(results_path + '/chainPlot_results')\n if not exists(results_path + '/cornerPlot_results'):\n os.mkdir(results_path + '/cornerPlot_results')\n if not exists(results_path + '/chain_lists'):\n os.mkdir(results_path + '/chain_lists')\n \n print('creating plots of results')\n \n modelPlot_path = results_path + '/modelPlot_results'\n chainPlot_path = results_path + '/chainPlot_results'\n cornerPlot_path = results_path + '/cornerPlot_results'\n chainList_path = results_path + '/chain_lists'\n \n red_X_squared = make_modelPlots(multi_band_list,kwargs_model,kwargs_result,\n kwargs_data,kwargs_psf, lens_info,\n lens_model_list,source_model_list,lens_light_model_list,\n mask_list,band_list,modelPlot_path,it+1,data_pairs_dicts[it]['object_ID'])\n \n printMemory('After modelPlot')\n \n save_chain_list(chain_list,chainList_path,it+1,data_pairs_dicts[it]['object_ID'])\n \n printMemory('After saving chain_list')\n \n del chain_list\n \n printMemory('After clearing chain_list')\n \n \n# make_chainPlots(chain_list, chainPlot_path, it+1, data_pairs_dicts[it]['object_ID'])\n# printMemory('After chainPlot')\n \n# make_cornerPlots(chain_list,cornerPlot_path,it+1, data_pairs_dicts[it]['object_ID'])\n \n# printMemory('After cornerPlot')\n \n# exec(open('Lens_Modeling_Auto/plot_results.py').read())\n \n# printMemory('After plot_results')\n \n csv_path = results_path\n \n \n #Create csv files\n# if it == 0:\n if not exists(csv_path + '/lens_results.csv'):\n exec(open('Lens_Modeling_Auto/create_csv.py').read())\n# exec(open('Lens_Modeling_Auto/create_csv_old.py').read())\n \n #Save results in csv file\n print('\\n')\n print('writing model parameter results to csv files')\n \n toc3 = time.perf_counter()\n image_model_time = (toc3 - tic)/60.0\n print(kwargs_result)\n exec(open('Lens_Modeling_Auto/save_to_csv_full.py').read())\n \n# exec(open('Lens_Modeling_Auto/save_to_csv_full_old.py').read())\n \n ################################################################################################################# \n \n ################################################ Model Shapelets ################################################\n \n #################################################################################################################\n \n if ((red_X_squared >= 1.5) and (use_shapelets == True)):\n \n n_max = 10\n print('\\n')\n print('Reduced Chi^2 is still too high! I will now try modeling the source with shapelets with n_max = {}'.format(n_max))\n print('\\n')\n \n source_model_list = ['SHAPELETS']\n \n multi_source_model_list = []\n \n for i in range(len(kwargs_data)):\n multi_source_model_list.extend(deepcopy(source_model_list))\n \n \n fixed_source = []\n kwargs_source_init = []\n kwargs_source_sigma = []\n kwargs_lower_source = []\n kwargs_upper_source = []\n \n beta_init = kwargs_result['kwargs_source'][0]['R_sersic'] / 3.\n #beta_init = 0.05\n \n fixed_source.append({'n_max': n_max,\n 'center_x': kwargs_result['kwargs_source'][0]['center_x'],\n 'center_y': kwargs_result['kwargs_source'][0]['center_y']})\n \n kwargs_source_init.append({'center_x': 0.01, 'center_y': 0.01, 'beta': beta_init})\n kwargs_source_sigma.append({'center_x': 0.01, 'center_y': 0.01, 'beta': 0.05})\n kwargs_lower_source.append({'center_x': -1.5, 'center_y': -1.5, 'beta': beta_init / np.sqrt(n_max + 1)})\n kwargs_upper_source.append({'center_x': 1.5, 'center_y': 1.5, 'beta': beta_init * np.sqrt(n_max + 1)})\n \n source_params_update = [[],[],[],[],[]]\n for i in range(len(kwargs_data)):\n source_params_update[0].extend(deepcopy(kwargs_source_init))\n source_params_update[1].extend(deepcopy(kwargs_source_sigma))\n source_params_update[2].extend(deepcopy(fixed_source))\n source_params_update[3].extend(deepcopy(kwargs_lower_source))\n source_params_update[4].extend(deepcopy(kwargs_upper_source))\n \n \n \n lens_params_update = deepcopy(lens_params) \n lens_light_params_update = deepcopy(lens_light_params)\n\n lens_params_update[0] = deepcopy(kwargs_result['kwargs_lens'])\n #source_params_update[0] = deepcopy(kwargs_result['kwargs_source'])\n lens_light_params_update[0] = deepcopy(kwargs_result['kwargs_lens_light'])\n \n \n file = open(results_path+\"/initial_params.txt\",\"a\")#append mode \n file.write('\\n')\n file.write('Addition of Shapelets: \\n')\n file.write('\\n')\n file.write(\"Model lists: \\n\")\n file.write(\"lens model: \" + str(lens_model_list) + \" \\n\")\n file.write(\"source model: \" + str(multi_source_model_list) + \" \\n\")\n file.write(\"lens light model: \"+ str(multi_lens_light_model_list) + \" \\n\")\n \n file.write(\"\\n\")\n file.write(\"kwargs_source (init,sigma,fixed,lower,upper): \\n\") \n# file.write(\"\\n\")\n\n for i in range(len(source_params_update)):\n# file.write(\"\\n\")\n print(source_params_update[i], file=file)\n# file.write(\"\\n\")\n \n file.close()\n \n# SHAPELETS_indices = [i for i,x in enumerate(multi_source_model_list) if x == 'SHAPELETS']\n\n# for j in SHAPELETS_indices:\n# source_params_update[0][j]['beta'] = kwargs_result['kwargs_source'][j-1]['R_sersic']\n\n kwargs_params = {'lens_model': lens_params_update,\n 'source_model': source_params_update,\n 'lens_light_model': lens_light_params_update}\n \n kwargs_fixed = {'kwargs_lens': deepcopy(lens_params_update[2]), \n 'kwargs_source': deepcopy(source_params_update[2]), \n 'kwargs_lens_light': deepcopy(lens_light_params_update[2])}\n \n model_kwarg_names = get_kwarg_names(lens_model_list,multi_source_model_list,\n multi_lens_light_model_list,kwargs_fixed)\n #exec(open('Lens_Modeling_Auto/update_source_params_lists.py').read())\n \n \n# model_kwarg_names = get_kwarg_names(lens_model_list,multi_source_model_list,\n# multi_lens_light_model_list,kwargs_fixed)\n \n \n if this_is_a_test:\n fitting_kwargs_list = [['PSO', {'sigma_scale': 1, 'n_particles': 50, 'n_iterations': 100,'threadCount': numCores}]\n ,['MCMC', {'n_burn': 0, 'n_run': 10, 'walkerRatio': 10, 'sigma_scale': .05,'threadCount':numCores}]\n ]\n else:\n fitting_kwargs_list = [['PSO', {'sigma_scale': 1, 'n_particles': 300, 'n_iterations': 2000,'threadCount': numCores}]\n ,['MCMC', {'n_burn': 200, 'n_run': 800, 'walkerRatio': 10, 'sigma_scale': .05,'threadCount':numCores}]\n ]\n \n \n \n# fitting_kwargs_list = [['PSO', {'sigma_scale': 1, 'n_particles': 200, 'n_iterations': 2000,'threadCount': numCores}]\n# ,['MCMC', {'n_burn': 200, 'n_run': 800, 'walkerRatio': 10, 'sigma_scale': .05}]]\n# fitting_kwargs_list = [['PSO', {'sigma_scale': 0.5, 'n_particles': 50, 'n_iterations': 100,'threadCount':numCores}],\n# ['MCMC', {'n_burn': 0, 'n_run': 10, 'walkerRatio': 10, 'sigma_scale': .1,'threadCount':numCores}]]\n \n \n \n \n \n \n exec(open('Lens_Modeling_Auto/model_shapelets.py').read())\n toc4 = time.perf_counter()\n shapelets_modeling_time = (toc4 - tic)/60.0\n print('\\n')\n print('Full sampling with shapelets (n_max = {}) took: {:.2f} minutes'.format(n_max,(toc4 - toc3)/60.0), '\\n',\n 'Total time: {:.2f} minutes'.format((toc4 - tic)/60.0))\n\n \n csv_path = results_path\n #Save results in csv file\n print('\\n')\n print('writing model parameter results to csv files')\n \n print(kwargs_result)\n \n toc_end = time.perf_counter()\n image_model_time = (toc_end - tic)/60.0\n \n exec(open('Lens_Modeling_Auto/save_to_csv_lens.py').read())\n# exec(open('Lens_Modeling_Auto/save_to_csv_lens_old.py').read())\n print('\\n')\n print('image {} modeling completed!'.format(it+1))\n print('\\n')\n \n print('Modeling time for this image: {} minutes'.format((toc_end - tic)/60.0), '\\n',\n 'Total time of this modeling run: {} hours'.format((toc_end - tic0)/3600.0))\n \n print('\\n')\n \n f = open(results_path + \"/Modeling_times.txt\",\"a\")\n f.write('\\n')\n f.write('Modeling time for this image: {:.4f} minutes'.format((toc_end - tic)/60.0))\n f.write('\\n')\n f.write('Total time of this modeling run: {:.4f} hours'.format((toc_end - tic0)/3600.0))\n f.write('\\n')\n f.close()\n \n printMemory('After save to csv/end of image')\n \n\n \n \n \n","sub_path":"DES_deblended.py","file_name":"DES_deblended.py","file_ext":"py","file_size_in_byte":42110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"30667689","text":"\n\nimport kivy\nkivy.require('1.0.7')\n\nfrom kivy.app import App\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.config import Config\n\nfrom kivy.core.window import Window\nfrom kivy.uix.vkeyboard import VKeyboard\n\n\n# from kivy.uix.image import Image\n\n\n# Config.set('kivy', 'keyboard_mode', 'systemandmulti')\n\n\n\nclass root(BoxLayout):\n\n def __init__(self, **kwargs):\n super(root, self).__init__(**kwargs)\n self.request_key\n\n # -- For define\n def product(self, instance):\n try:\n # -- Ertefa Frame\n self.ertefaFrame.text = str(int(self.ertefa.text) - 6)\n # -- Arze Frame\n self.arzFrame.text = str(int(self.arz.text) - 6)\n # -- Motahrek\n self.motaharek.text = str(float(self.ertefa.text) - 7.5)\n # -- Ertefa Tur\n self.ertefaTur.text = str(float(self.ertefa.text) - 3.5)\n # -- Game Tur\n self.gamTur.text = str(float(self.arz.text) / 2.5)\n # -- Metre morab\n\n def NRound(x):\n diff = float(str(x-int(x))[:4])\n if diff !=0 and diff <= 0.5:\n x = int(x) + 0.5\n else:\n x = round(x)\n return x\n\n metrz = (float(self.arz.text) / 100) * (float(self.ertefa.text) / 100)\n roundMetr = NRound(metrz)\n\n if roundMetr < 1 :\n self.metr.text = str(\"1\")\n else:\n self.metr.text = str(roundMetr)\n\n\n\n\n # -- Nakh 1 va 2\n self.nakha.text = str(((int(self.ertefa.text) + int(self.arz.text) ) *2 ) +30 )\n\n\n # -- Nakh 3\n nakhbb = int(self.ertefa.text)\n\n if nakhbb >= 170 :\n self.nakhb.text = str( ((float(self.ertefa.text) / 4 )*3) + (int(self.arz.text) * 2) +30)\n else:\n self.nakhb.text = str(\"--\")\n\n\n if self.gheymat.text == '' :\n self.gheymat.text = str(int(1))\n self.gheymatKoll.text = ''\n else:\n # -- Gheyamt Koll\n self.gheymatKoll.text = str(float(roundMetr) * float(self.gheymat.text))\n\n except Exception:\n self.ertefa.text = ''\n self.arz.text = ''\n self.gheymat.text = ''\n\n # -- Ertefa Frame\n self.ertefaFrame.text = ''\n # -- Arze Frame\n self.arzFrame.text = ''\n # -- Motahrek\n self.motaharek.text = ''\n # -- Ertefa Tur\n self.ertefaTur.text = ''\n # -- Game Tur\n self.gamTur.text = ''\n # -- Metre morab\n self.metr.text = ''\n # -- Gheyamt Koll\n self.gheymatKoll.text = ''\n\n self.nakha.text = ''\n self.nakhb.text = ''\n\n\n\n def cleanz(self,instance):\n\n # - inputs\n self.ertefa.text = ''\n self.arz.text= ''\n self.gheymat.text=''\n\n\n # -- Ertefa Frame\n self.ertefaFrame.text = ''\n # -- Arze Frame\n self.arzFrame.text = ''\n # -- Motahrek\n self.motaharek.text = ''\n # -- Ertefa Tur\n self.ertefaTur.text = ''\n # -- Game Tur\n self.gamTur.text = ''\n # -- Metre morab\n self.metr.text = ''\n # -- Gheyamt Koll\n self.gheymatKoll.text = ''\n\n self.nakha.text = ''\n self.nakhb.text = ''\n\n\nclass HeroApp(App):\n kv_directory = 'template1'\n\n def build(self):\n return root()\n\n\n\n\nif __name__ in ('__main__', '__android__'):\n HeroApp().run()\n","sub_path":"andriod/main_old Version.py","file_name":"main_old Version.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"567023985","text":"import discord\nimport asyncio\nimport config_queries\nimport ChooseYourAdventure.commands\nimport Jaegermore.commands\nimport WarriorsvsSoldiers.commands\nimport AttackonWikia.commands\n\n# Each instance contains server ID, channel ID, and client as attributes\ninstances = []\n\ncommands_list = ['~host', '~join', '~leave', '~kick', '~start', '~reset', '~add', '~remove', '~roles', '~randomroles', '~fast', '~normal', '~casual', '~ranked', '~players', '~advantage', \n'~next', '~pick', '~kidnap', '~status', '~players', '~rules', '~profile', '~badges', '~gamestats', '~leaderboard', '~lb']\n\nclass Instance():\n def __init__(self, server_id, channel_id, game_name, client):\n self.game_name = game_name\n self.server_id = server_id\n self.channel_id = channel_id\n self.client = client\n self.game = self.create_game(game_name)\n\n def create_game(self, game_name):\n if game_name == 'Choose Your Adventure':\n return ChooseYourAdventure.commands.Game(self.client)\n elif game_name == 'Jaegermore':\n return Jaegermore.commands.Game(self.client)\n elif game_name == 'Warriors vs Soldiers':\n return WarriorsvsSoldiers.commands.Game(self.client, self.channel_id)\n elif game_name == 'Attack on Wikia':\n return AttackonWikia.commands.Game(self.client)\n\n async def msg_handler(self, message):\n # Select the correct game handler to handle the message\n await self.game.msg_handler(message)\n\ndef get_cur_game(message):\n # Returns the game state from games corresponding to the channel\n # Returns None if no game is enabled for the channel\n # If private message, return the game state of Warriors vs Soldiers if person is in game\n cur_games = []\n for instance in instances:\n if str(message.channel.type) == 'private':\n try:\n # For multiple wvs games\n if instance.game_name == 'Warriors vs Soldiers' and message.author in list(map(lambda x:x[0], instance.game.state.players)) and \\\n instance.game.state.status not in ['waiting for game', 'waiting for players', 'game ended']:\n cur_games.append(instance)\n except:\n continue\n else:\n if instance.channel_id == message.channel.id:\n cur_games.append(instance)\n break\n\n return cur_games\n\ndef get_config_msg(cur_games):\n config_title = 'Which game would you like to enable for this channel?'\n config_title += '\\n(Current game: ' + (cur_games[0].game_name if cur_games else 'None') + ')'\n config_msg = ':one: **Choose Your Adventure (Locked)\\n\\n\\\n:two: Jaegermore (Locked)\\n\\n\\\n:three: Warriors vs Soldiers\\n\\n\\\n:four: Attack on Wikia (Locked)\\n\\n\\\n:zero: None**'\n config_embed = discord.Embed(title = config_title, description = config_msg, colour=0xE5D2BB)\n config_embed.set_footer(text='Type in the corresponding number to select an option, e.g. \\'3\\' for Warriors vs Soldiers.')\n return config_embed\n\ndef config_bot(config_msg, client):\n # Configures bot for the channel and return embed message\n if config_msg == None:\n return discord.Embed(description = 'Config has timed out after 60 seconds. Type `~config` to start again.', colour=0xE5D2BB)\n else:\n options = {\n '1️⃣':'Choose Your Adventure',\n '2️⃣':'Jaegermore',\n '3️⃣':'Warriors vs Soldiers',\n '4️⃣':'Attack on Wikia',\n '0️⃣':'None',\n '1':'Choose Your Adventure',\n '2':'Jaegermore',\n '3':'Warriors vs Soldiers',\n '4':'Attack on Wikia',\n '0':'None'\n }\n locked_options = {\n '1️⃣':'Choose Your Adventure',\n '2️⃣':'Jaegermore',\n '4️⃣':'Attack on Wikia',\n '1':'Choose Your Adventure',\n '2':'Jaegermore',\n '4':'Attack on Wikia'\n }\n if config_msg.content in locked_options and config_msg.author.id != 238808836075421697:\n locked_msg = 'If you\\'d like to play this game, please join the [Attack on Titan Wiki server](https://discord.gg/attackontitan).'\n locked_embed = discord.Embed(title = 'Game currently unavailable to the public', description = locked_msg, colour=0xE5D2BB)\n return locked_embed\n\n chosen_game = options[config_msg.content]\n\n # Update DB\n config_queries.update_config(config_msg.guild.id, config_msg.channel.id, chosen_game)\n\n # Update instances\n inside = False\n for instance in instances:\n if instance.channel_id == config_msg.channel.id:\n if chosen_game == 'None':\n instances.remove(instance)\n else:\n instance.game = instance.create_game(chosen_game)\n instance.game_name = chosen_game\n inside = True\n break\n if inside == False and chosen_game != 'None':\n instances.append(Instance(config_msg.guild.id, config_msg.channel.id, chosen_game, client))\n\n if chosen_game == 'None':\n embed_msg = 'All games have been disabled for this channel!'\n else:\n embed_msg = chosen_game + ' has been enabled for this channel!'\n \n starting_commands = ''\n if chosen_game == 'Warriors vs Soldiers':\n starting_commands = '❗Type **`~host`** to begin a new game.\\n\\n' + \\\n '❗Type **`~rules`** to view the game rules.\\n\\n' + \\\n '❗Type **`~help`** to see the full list of commands.'\n config_embed = discord.Embed(title = embed_msg, description = starting_commands, colour=0xE5D2BB)\n return config_embed","sub_path":"botconfig.py","file_name":"botconfig.py","file_ext":"py","file_size_in_byte":5774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"51914733","text":"import argparse\nfrom pathlib import Path\nimport os\nimport urllib, json\nfrom json_extract import flatten_json\nimport csv\nimport json_extract\n\narg_parser = argparse.ArgumentParser()\narg_parser.add_argument(\"-i\", \"--input\", required=True, help=\"Input file directory\")\narg_parser.add_argument(\"-o\", \"--output\", required=False, help=\"Output file directory and name, default location is inside the folder under /csvs\")\nargs = vars(arg_parser.parse_args())\n\nsrc = args['input']\noutput = args['output']\n\nif output is None:\n output = os.path.join(src, 'csvs')\nif not os.path.exists(output):\n os.mkdir(output)\n\nprint(\"Source directory set to: \" + src)\nprint(\"Output directory set to: \" + output)\n\nfilenames = (f for f in os.listdir(src) if f.endswith('.geojson'))\nfor file in filenames:\n file_path = os.path.join(src, file)\n filename_pruned = file.split('-')[0]\n filename_no_extension = os.path.splitext(filename_pruned)[0]\n filename_output = os.path.join(output, filename_no_extension + \".csv\")\n\n current_geojson_path = os.path.join(src, file)\n\n output_csv = open(filename_output, \"w\")\n output_csv_writer = csv.writer(output_csv)\n\n header_row = ['LON', 'LAT', 'NUMBER', 'STREET', 'UNIT', 'CITY', 'DISTRICT', 'REGION', 'POSTCODE', 'ID', 'HASH']\n output_csv_writer.writerow(header_row)\n\n print('Outputting ' + filename_output + ' from ' + file_path)\n\n with open(current_geojson_path, 'r') as geojson:\n for line in geojson:\n json_data = json.loads(line)\n coords = flatten_json(json_data)\n longitude = coords['geometry_coordinates_0']\n latitude = coords['geometry_coordinates_1']\n number = coords['properties_number']\n street = coords['properties_street']\n unit = coords['properties_unit']\n city = coords['properties_city']\n district = coords['properties_district']\n region = coords['properties_region']\n postcode = coords['properties_postcode']\n prop_id = coords['properties_id']\n prop_hash = coords['properties_hash']\n\n new_row = [longitude, latitude, number, street, unit, city, district, region, postcode, prop_id, prop_hash]\n output_csv_writer.writerow(new_row)\n\n \n","sub_path":"projects/north-carolina/convert_geojson_to_csv_manual.py","file_name":"convert_geojson_to_csv_manual.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"396947047","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import transforms, datasets\n\n\n#딥러닝 모델 설계할 때 장비 확인\nif torch.cuda.is_available():\n DEVICE = torch.device('cuda')\nelse:\n DEVICE = torch.device('cpu')\nprint('Using pytorch version:',torch.__version__,'Device:',DEVICE) #Using pytorch version: 1.7.1 Device: cuda\n\nBATCH_SIZE = 32\nEPOCHS = 10\n\n\n#data load\n\ntrain_dataset = datasets.CIFAR10(root = \"../data/FashionMNIST\",train=True, download=True,transform=transforms.ToTensor())\ntest_dataset = datasets.CIFAR10(root = \"../data/FashionMNIST\",train=False, download=True,transform=transforms.ToTensor())\n\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset,batch_size=BATCH_SIZE, shuffle=True)\ntest_loader = torch.utils.data.DataLoader(dataset=test_dataset,batch_size=BATCH_SIZE, shuffle=False)\n\nfor(X_train, Y_train) in train_loader:\n print('x_train: ',X_train.size(), 'type: ', X_train.type())\n print('y_train: ',Y_train.size(), 'type: ', X_train.type())\n break\n\npltsize = 1\nplt.figure(figsize=(10*pltsize,pltsize))\nfor i in range(10):\n plt.subplot(1,10,i+1)\n plt.axis('off')\n plt.imshow(np.transpose(X_train[i],(1,2,0)))\n plt.title('Class: ' + str(Y_train[i].item()))\n#plt.show()\n\n################MLP Model#####################3\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.fc1 = nn.Linear(32*32*3,512)\n self.fc2 = nn.Linear(512,256)\n self.fc3 = nn.Linear(256,10)\n def forward(self,x):\n x = x.view(-1,32*32*3)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n x = F.relu(x)\n x = self.fc3(x)\n x = F.log_softmax(x, dim=1)\n return x\n#optimizer, objective function setting\n\nmodel = Net().to(DEVICE)\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001)\ncriterion = nn.CrossEntropyLoss()\n\nprint(model)\n\n# x_train: torch.Size([32, 3, 32, 32]) type: torch.FloatTensor\n# y_train: torch.Size([32]) type: torch.FloatTensor\n# Net(\n# (fc1): Linear(in_features=3072, out_features=512, bias=True)\n# (fc2): Linear(in_features=512, out_features=256, bias=True)\n# (fc3): Linear(in_features=256, out_features=10, bias=True)\n# )\n\ndef train(model, train_loader,optimizer,log_interval):\n model.train()\n for batch_idx,(image,label) in enumerate(train_loader):\n image = image.to(DEVICE)\n label = label.to(DEVICE)\n optimizer.zero_grad()\n output = model(image)\n loss = criterion(output,label)\n loss.backward()\n optimizer.step()\n\n if batch_idx % log_interval == 0:\n print(\"Train Epoch: {} [{}/{}({:.0f}%)]\\tTrain Loss: {:.6f}\".format(Epoch,batch_idx*len(image),len(train_loader.dataset),100.*batch_idx/len(train_loader), loss.item()))\n\n#evaluate\n\ndef evaluate(model, test_loader):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad(): #gradient 흐름 억제 #그라디언트를 통해 파라미터 값이 없데이트 되는 현상을 방지\n for image, label in test_loader:\n image = image.to(DEVICE)\n label = label.to(DEVICE)\n output = model(image)\n\n test_loss += criterion(output,label).item()\n prediction = output.max(1,keepdim = True)[1]\n correct += prediction.eq(label.view_as(prediction)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n test_accuracy = 100. * correct /len(test_loader.dataset)\n\n return test_loss, test_accuracy\n\n#Loss, acc \n\nfor Epoch in range(1, EPOCHS+1):\n train(model, train_loader, optimizer, log_interval=200)\n test_loss, test_accuracy = evaluate(model, test_loader)\n print(\"\\n[EPOCH: {}], \\tTest Loss: {:.4f}, \\tTest Accuracy: {:.2f} % \\n\".format(Epoch, test_loss,test_accuracy))\n# Test Loss: 0.0455, Test Accuracy: 48.52 %\n","sub_path":"pytorch/cifar10_MLP.py","file_name":"cifar10_MLP.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"415673938","text":"# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\n\n\n_VERSION = '0.1'\n\ndef get_version():\n return _VERSION\n\nsetup(\n name='albumthief',\n license='GPLv2',\n description='Very fast album downloader made using gevent. ',\n author='Chaobin Tang',\n author_email='cbtchn@gmail.com',\n url='https://github.com/chaobin/albumthief',\n version=get_version(),\n classifiers=[\n 'Programming Language :: Python :: 2.6',\n ],\n packages = ['albumthief',],\n install_requires=[\n 'gevent',\n 'BeautifulSoup',\n 'requests'\n ],\n entry_points = {\n 'console_scripts': [\n 'steal-album = albumthief.manage:main'\n ]\n }\n)","sub_path":"pypi_install_script/albumthief-0.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"200972753","text":"import random\nimport numpy as np\n\n\nclass EpsilonGreedyAction:\n def __init__(self, explore_start=1.0, explore_stop=0.01, decay_rate=0.0001):\n self.explore_start = explore_start\n self.explore_stop = explore_stop\n self.decay_rate = decay_rate\n self.decay_step = 0\n\n def get_action(self, state, possible_actions, model):\n explore_probability = self.explore_stop + (self.explore_start - self.explore_stop) * np.exp(\n -self.decay_rate * self.decay_step)\n\n # Random değer eğer epsilon değerimizden küçük ise rastgele hareket seç = keşif(exploration)\n if explore_probability > np.random.rand():\n action = random.choice(possible_actions)\n else: # Random değer eğer epsilon değerimizden büyük ise greedy eylemi seç = sömürü(exploitation)\n\n # Eylemlerin değerlerini hesap et\n q_values = model.sess.run(model.output_, feed_dict={\n model.inputs_: state.reshape((1, *state.shape))\n })\n\n # Maximum değere sahip eylemi seç (greedy eylem)\n choice = np.argmax(q_values)\n action = possible_actions[int(choice)]\n self.decay_step += 1\n\n return action, explore_probability\n","sub_path":"play_doom/epsilon_greedy_action.py","file_name":"epsilon_greedy_action.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"477286620","text":"from django import template\n\nfrom django.template.loader import render_to_string\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom phileo.models import Like\nfrom phileo.utils import _allowed, widget_context\nfrom phileo.settings import LIKABLE_MODELS\n\nregister = template.Library()\n\n\n@register.assignment_tag\ndef who_likes(athlete):\n return Like.objects.filter(\n receiver_content_type=ContentType.objects.get_for_model(athlete),\n receiver_object_id=athlete.pk\n )\n\n\nclass LikesNode(template.Node):\n\n def __init__(self, user, model_list, varname):\n self.user = template.Variable(user)\n\n # Default to all the registered models\n if len(model_list) == 0:\n # These need to look like strings, otherwise they will be treated as variables\n # when they are `resolve()`d later\n model_list = ['\"%s\"' % model for model in LIKABLE_MODELS]\n\n self.model_list = [template.Variable(m) for m in model_list]\n self.varname = varname\n\n def render(self, context):\n user = self.user.resolve(context)\n content_types = []\n\n for raw_model_name in self.model_list:\n try:\n model_name = raw_model_name.resolve(context)\n except template.VariableDoesNotExist:\n continue\n\n if not _allowed(model_name):\n continue\n\n app, model = model_name.split(\".\")\n content_type = ContentType.objects.get(app_label=app, model__iexact=model)\n content_types.append(content_type)\n\n context[self.varname] = Like.objects.filter(\n sender=user,\n receiver_content_type__in=content_types\n )\n return \"\"\n\n\n@register.tag\ndef likes(parser, token):\n \"\"\"\n {% likes user \"app.Model\" \"app.Model\" \"app.Model\" as like_objs %}\n \"\"\"\n tokens = token.split_contents()\n user = tokens[1]\n varname = tokens[-1]\n model_list = tokens[2:-2]\n\n return LikesNode(user, model_list, varname)\n\n\nclass LikeRenderer(template.Node):\n\n def __init__(self, varname):\n self.varname = template.Variable(varname)\n\n def render(self, context):\n like = self.varname.resolve(context)\n\n instance = like.receiver\n content_type = like.receiver_content_type\n app_name = content_type.app_label\n model_name = content_type.model.lower()\n\n like_context = {\n 'instance': instance,\n 'like': like,\n }\n\n return render_to_string([\n 'phileo/%s/%s.html' % (app_name, model_name),\n 'phileo/%s/like.html' % (app_name),\n 'phileo/_like.html',\n ], like_context, context)\n\n\n@register.tag\ndef render_like(parser, token):\n \"\"\"\n {% likes user as like_list %}\n
      \n {% for like in like_list %}\n
    • {% render_like like %}
    • \n {% endfor %}\n
    \n \"\"\"\n\n tokens = token.split_contents()\n var = tokens[1]\n\n return LikeRenderer(var)\n\n\n@register.filter\ndef likes_count(obj):\n \"\"\"\n Something like:\n\n
    {{ obj|likes_count }}
    \n\n will render:\n\n
    34
    \n \"\"\"\n return Like.objects.filter(\n receiver_content_type=ContentType.objects.get_for_model(obj),\n receiver_object_id=obj.pk\n ).count()\n\n\n@register.inclusion_tag(\"phileo/_widget.html\")\ndef phileo_widget(user, obj):\n return widget_context(user, obj)\n\n\n@register.inclusion_tag(\"phileo/_widget_brief.html\")\ndef phileo_widget_brief(user, obj):\n return widget_context(user, obj)\n\n\nclass LikedObjectsNode(template.Node):\n\n def __init__(self, objects, user, varname):\n self.objects = template.Variable(objects)\n self.user = template.Variable(user)\n self.varname = varname\n\n def get_objects(self, user, objects):\n is_stream = None\n get_id = None\n indexed = {}\n\n for obj in objects:\n if hasattr(obj, \"cast\") and callable(obj.cast):\n obj = obj.cast()\n if is_stream is None and get_id is None:\n is_stream = not hasattr(obj, \"_meta\")\n get_id = lambda x: is_stream and x.item.id or x.id\n\n ct = ContentType.objects.get_for_model(is_stream and obj.item or obj)\n if ct not in indexed.keys():\n indexed[ct] = []\n obj.liked = False\n indexed[ct].append(obj)\n\n for ct in indexed.keys():\n likes = Like.objects.filter(\n sender=user,\n receiver_content_type=ct,\n receiver_object_id__in=[get_id(o) for o in indexed[ct]]\n )\n\n for obj in indexed[ct]:\n for like in likes:\n if like.receiver_object_id == get_id(obj):\n obj.liked = True\n yield obj\n\n def render(self, context):\n user = self.user.resolve(context)\n objects = self.objects.resolve(context)\n context[self.varname] = self.get_objects(user, objects)\n return \"\"\n\n\n@register.tag\ndef liked(parser, token):\n \"\"\"\n {% liked objects by user as varname %}\n \"\"\"\n tag, objects, _, user, _, varname = token.split_contents()\n return LikedObjectsNode(objects, user, varname)\n","sub_path":"phileo/templatetags/phileo_tags.py","file_name":"phileo_tags.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"353044801","text":"import json\r\nimport gzip\r\nimport os \r\nimport sys\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom datetime import date\r\nimport datetime\r\n\r\n\r\n\r\ndef draw_graph():\r\n # date -----------------------------------\r\n today = date.today()\r\n\r\n now = datetime.datetime.now()\r\n nowDate = str(now.strftime('%Y-%m-%d'))\r\n nowTime = str(now.strftime('%H'))\r\n\r\n # path -----------------------------------\r\n path = './cwEC2logtest_networkout_web1.txt'\r\n\r\n # read -----------------------------------\r\n with open(path,'r') as j_file : \r\n net_json_data = json.load(j_file)\r\n print(type(net_json_data), '\\n\\n') ## dic type\r\n\r\n net_json_data2 = net_json_data['Datapoints']\r\n\r\n net_time_data = []\r\n net_max_data = []\r\n\r\n for i in range(len(net_json_data2)):\r\n net_time_data.append(net_json_data2[i]['Timestamp'])\r\n net_max_data.append(net_json_data2[i]['Maximum'])\r\n\r\n # time - hh\r\n net_time_data2 = []\r\n\r\n for i in range(len(net_time_data)):\r\n Z_NUM = net_time_data[0].find('T') + 1\r\n Z_NUM2 = Z_NUM + 2\r\n net_time_data2_split_t = net_time_data[i][Z_NUM:Z_NUM2] \r\n net_time_data2.append(int(net_time_data2_split_t))\r\n\r\n # graph\r\n plt.bar(net_time_data2 ,net_max_data,width=0.7,color = 'green')\r\n plt.xlabel('TIME')\r\n plt.ylabel(\"NET-MAX\")\r\n plt.title('WEB_NET-MAX_GRAPH')\r\n plt.savefig('./{0}-{1}_WEB1_NET_MAX_graph.png'.format(nowDate,nowTime),dpi=300)\r\n # plt.show()","sub_path":"09-Source Code/합본코드/WEB1_net_max_loggraph.py","file_name":"WEB1_net_max_loggraph.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"334992220","text":"import numpy as np\nfrom matplotlib.pyplot import *\nimport matplotlib.pyplot as plt\nfrom my_round import my_round\nfrom mpl_toolkits.basemap import Basemap\n\n\n#Each line in the data files is a six-hourly update\n#Lines counted as 6 hours periods\n\n#buoydata_1=np.loadtxt(\"./elnino_drifters.dat\",unpack=True,delimiter=\",\")\n#buoydata_1=np.loadtxt(\"./elnino_drifters.dat\",unpack=True,delimiter=\",\")\n#buoydata_1=np.loadtxt(\"./lanina_drifters.dat\",unpack=True,delimiter=\",\")\n\nbuoydata_1=np.loadtxt(\"./all_drogued_drifters.dat\",unpack=True,delimiter=\",\")\nbuoydata_2=np.loadtxt(\"./all_undrogued_drifters.dat\",unpack=True,delimiter=\",\")\n#buoydata_1=np.loadtxt(\"./BD_test2.dat\",unpack=True,delimiter=\",\")\n\nbuoydata_ID=np.concatenate((buoydata_1[0],buoydata_2[0]))\nbuoydata_LAT=np.concatenate((buoydata_1[4],buoydata_2[4]))\nbuoydata_LON=np.concatenate((buoydata_1[5],buoydata_2[5]))\n\nn=0.5\nlats=np.arange(-90,90.5,n)\nlons=np.arange(0,360.5,n)\n\ntimemap=np.zeros( (lats.size-1,lons.size-1) )\ndrifternum=np.zeros( (lats.size-1,lons.size-1) )\n\nfor i in range(buoydata_ID.size-1):\n\tif buoydata_LON[i]>=360 or abs(buoydata_LAT[i])>=90:\n\t\tprint (\"Dodgy values\")\n\telse:\n\t\tprint (100.0*i/buoydata_ID.size)\n\t\tlat=buoydata_LAT[i]\n\t\tlon=buoydata_LON[i]\n\t\ttimemap[(lat+90)/n][lon/n]=timemap[(lat+90)/n][lon/n]+1\n\t\t# makes of use of the irregular numpy rounding i.e. 7.6-->7.0\n\t\t#if buoydata_ID[i+1]==buoydata_ID[i] and my_round(buoydata_LON[i],n)!=my_round(buoydata_LON[i+1],n) and my_round(buoydata_LAT[i],n)!=my_round(buoydata_LAT[i+1],n):\n\t\t#\tdrifternum[(lat+90)/n][lon/n]==drifternum[(lat+90)/n][lon/n]+1\n\n# Writing drifter coverage vector to file for analysis\n\nfile=open(\"Halfdeg_coverage_total.dat\",\"w\")\n\nfor i in range(lats.size-1):\n\tfor j in range(lons.size-1):\n\t\tif timemap[i][j]==0:\n\t\t\ttimemap[i][j]=np.float('nan')\n\t\tfile.write(\"{:} \\n\".format(timemap[i][j]))\nfile.close()","sub_path":"GDP_DATA/Drifter_Coverage/coverage_data.py","file_name":"coverage_data.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"134224012","text":"import unittest\n\nclass Solution(object):\n def canWinNim(self, n):\n \"\"\"\n :type n: int\n :rtype: bool\n \"\"\"\n return True if n % 4 else False\n def canWinNim1(self, n, m):\n pass\n\nclass SolutionTestCase(unittest.TestCase):\n def test_correct(self):\n import csv\n import os\n output = {\n \"TRUE\": True,\n \"FALSE\": False,\n }\n solution = Solution()\n with open(os.path.abspath(r\"../tests.csv\")) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n self.assertEqual(\n solution.canWinNim(int(row[\"Input\"])),\n output[row[\"Output\"]]\n )\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"problems/292. Nim Game/Python/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"406257284","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\ncommon.py\n\nCreated by Greg Rogalski on 2010-10-30.\nCopyright (c) 2010 Greg Rogalski, MD. All rights reserved.\n\"\"\"\n\nimport sys\nimport os\nfrom google.appengine.api import users\n\nMAX_RESULTS_PER_PAGE = 20 #how many transactions to show per page\nDEFAULT_POINTS = 5\nDEFAULT_INTEREST = 0.025\n\n\ndef auth():\n user = users.get_current_user()\n \n if user.nickname() in ['grogo99','kimheroy']:\n # self.response.headers['Content-Type'] = 'text/plain'\n # self.response.out.write('Hello, ' + user.nickname())\n return user.nickname()\n else:\n # redirect(users.create_login_url(self.request.uri))\n return None\n \n\n\n","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"159879204","text":"\"\"\"\n Make sure our nexrad files are current!\n\"\"\"\nfrom __future__ import print_function\nimport os\nimport sys\nimport stat\nimport datetime\nSAMPLES = ['KDMX', 'KAMA', 'KLWX', 'KFFC', 'KBMX', 'KBGM', 'KCLE']\n\n\ndef check():\n \"\"\"Check things please\"\"\"\n now = datetime.datetime.now()\n count = []\n for nexrad in SAMPLES:\n fn = \"/mnt/mtarchive/nexrd2/raw/%s/dir.list\" % (nexrad,)\n mtime = os.stat(fn)[stat.ST_MTIME]\n ts = datetime.datetime.fromtimestamp(mtime)\n diff = (now - ts).days * 86400. + (now - ts).seconds\n if diff > 300:\n count.append(nexrad)\n return count\n\n\ndef main():\n \"\"\"Go Main Go\"\"\"\n badcount = check()\n msg = '%s/%s outage %s' % (len(badcount), len(SAMPLES),\n ','.join(badcount))\n if len(badcount) < 3:\n print('OK - %s' % (msg,))\n sys.exit(0)\n elif len(badcount) < 4:\n print('WARNING - %s' % (msg,))\n sys.exit(1)\n else:\n print('CRITICAL - %s' % (msg,))\n sys.exit(2)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"nagios/check_nexrad2.py","file_name":"check_nexrad2.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"647475902","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport numpy as np\n\nclass LinearRegression():\n\n def __init__(self):\n\n self.weights = None\n self.bias = None\n\n def fit(self, X, y):\n\n num_epochs = 10000\n N,K = X.shape\n inputs = Variable(X)\n actual = Variable(y)\n\n criterion = nn.MSELoss()\n linear = torch.nn.Linear(K, 1, bias=True)\n optimizer = optim.Adam(linear.parameters())\n\n for epoch in range(num_epochs):\n\n outputs = linear(inputs)\n loss = criterion(outputs, actual)\n print('loss',float(loss.cpu().data))\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # print(linear.weight)\n\n self.weight = linear.weight.data.numpy()[0]\n self.bias = linear.bias.data.numpy()[0]\n\n\n def predict(self, X):\n\n return self.bias + X.dot(self.weight)\n\n\n\nif __name__ == \"__main__\":\n\n N = 5000\n K = 7\n filename='lograndtmp.txt'\n X=[]\n y=[]\n with open(filename, 'r') as file_to_read:\n while True:\n lines = file_to_read.readline() # 整行读取数据\n if not lines:\n break\n E_tmp = [float(i) for i in lines.split(' ')] # 将整行数据分割处理,如果分割符是空格,括号里就不用传入参数,如果是逗号, 则传入‘,'字符。\n X.append(E_tmp[:7])\n y.append(E_tmp[-1])\n # mean = np.random.normal(5, .2, size = K)\n\n # X = np.random.multivariate_normal(mean = datas, cov = np.identity(K), size = (N))\n B = np.random.uniform( size = K)\n # y = X.dot(B).reshape(N,1)# + np.random.normal(0,1, size = N)\n\n X_torch = torch.from_numpy(np.asarray(X)).float()\n y_torch = torch.from_numpy(np.asarray(y)).float()\n\n Linear = LinearRegression()\n Linear.fit(X_torch, y_torch)\n\n print(\"actual Beta\", B)","sub_path":"regre/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"571476901","text":"def merge(seq, low, mid, high):\n i = low\n j = mid\n res = []\n while i < mid and j < high:\n if seq[i] <= seq[j]:\n res.append(seq[i])\n i += 1\n else:\n res.append(seq[j])\n j += 1\n res += seq[i:mid]\n res += seq[j:high]\n seq[low:high] = res\n\ndef mergesort_without_using_recursion(seq):\n i = 1\n while i < len(seq):\n low = 0\n while low < len(seq):\n mid = low + i\n high = min(low + 2 * i, len(seq))\n if mid < high:\n merge(seq, low, mid, high)\n low += 2 * i\n i *= 2\n return seq\n","sub_path":"snippet/mergesort_without_using_recursion.py","file_name":"mergesort_without_using_recursion.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"290508079","text":"# !/usr/bin/env python\n# _*_ coding:utf-8 _*_\nimport re\nimport requests\n\n\nclass TitleofGuoke(object):\n\n # 初始化参数\n def __init__(self):\n self.url = 'https://www.guokr.com/ask/highlight/?page=5'\n self.headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko\"}\n\n # 发送请求\n def send_request(self):\n response = requests.get(self.url, headers=self.headers)\n resp_data = response.content.decode()\n # print(resp_data)\n return resp_data\n\n # 解析数据\n def parse_data(self,data):\n pattern = re.compile('

    (.*)

    ')\n get_data1 = pattern.findall(data)\n # print(get_data1.group(2))\n return get_data1\n\n\n # 保存数据\n def save_data(self, data):\n with open('guoke.html','w') as file:\n # file.write(data)\n for temp in data:\n file.write(temp[1] + '\\n')\n print('写入数据成功')\n\n # 调用函数\n def run(self):\n data = self.send_request()\n get_data = self.parse_data(data)\n self.save_data(get_data)\n\n\n\nif __name__ == '__main__':\n guoke = TitleofGuoke()\n guoke.run()","sub_path":"scrapy_lian/learn03/4-guo_ke.py","file_name":"4-guo_ke.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"122276580","text":"\n# from django.template.loader import get_template\n# from django.template import Template, Context\nfrom django.shortcuts import render_to_response\nfrom django.http import HttpResponse, Http404\nimport datetime\n\ndef cur_exchange ( request ):\n\timport urllib2\n\tfrom bs4 import BeautifulSoup \n\n\tcur_date = datetime.datetime.now()\n\n\tbanks_url = [\"http://www.trustbank.by/\", \"http://www.priorbank.by/\", \"http://www.belinvestbank.by/\"]\n\n\t# USD table header\n\t# print \"USD\"\n\n\t# print\n\n\t# print \"Bank | SELL | BUY | DELTA |\"\n\n\t# print\n\t# USD table header\n\t# loop for each bank's url\n\tfor url in banks_url:\n\n\t\trequest = urllib2.urlopen(url)\n\n\t\tsoup = BeautifulSoup (request.read())\n\n\t\tif url == \"http://www.trustbank.by/\":\n\n\t\t\tcurs_table = soup.table\n\t\t\ttrust_usd_sell = int(curs_table.find_all(\"tr\")[2].find_all(\"td\")[1].contents[0])\n\t\t\ttrust_usd_buy = int(curs_table.find_all(\"tr\")[1].find_all(\"td\")[1].contents[0])\n\t\t\ttrustbank_delta = trust_usd_sell - trust_usd_buy\n\n\t\t\t#print \"Trustbank | %s | %s | %s | \" % (trust_usd_buy, trust_usd_sell, trustbank_delta)\n\n\t\tif url == \"http://www.priorbank.by/\":\n\n\t\t\tprior_curs_table = soup.find_all(\"div\", id='cash_toolbox_rates_1')[0].table\n\t\t\tprior_usd_sell = int(prior_curs_table.find_all(\"tr\")[0].find_all(\"td\")[2].contents[0])\n\t\t\tprior_usd_buy = int(prior_curs_table.find_all(\"tr\")[0].find_all(\"td\")[1].contents[0])\n\t\t\tpriorbank_delta = prior_usd_sell - prior_usd_buy\n\t\t\t\n\t\t\t#print \"Priorbank | %s | %s | %s | \" % (prior_usd_buy, prior_usd_sell, priorbank_delta)\n\n\t\tif url == \"http://www.belinvestbank.by/\":\n\n\t\t\tbelinvest_cur_table = soup.find_all(\"div\", class_='courses_block')[1].table.tbody\n\t\t\tbelinvest_usd_buy_str = (belinvest_cur_table.find_all(\"tr\")[0].find_all(\"td\")[1].contents[0])\n\t\t\tbelinvest_usd_sell_str = (belinvest_cur_table.find_all(\"tr\")[0].find_all(\"td\")[2].contents[0])\n\n\t\t\tbelinvest_usd_buy = int(belinvest_usd_buy_str.split(\".\")[0])\n\t\t\tbelinvest_usd_sell = int(belinvest_usd_sell_str.split(\".\")[0])\n\t\t\tbelinvest_delta = belinvest_usd_sell - belinvest_usd_buy\n\n\t\t\t#print \"Belinvestbank | %s | %s | %s |\" % (belinvest_usd_buy, belinvest_usd_sell, belinvest_delta)\n\n\t# ctx = Context (\n\t# \t# {\n\n\t# \t# 'trust_usd_sell': trust_usd_sell,\n\t# \t# 'trust_usd_buy': trust_usd_buy,\n\t# \t# 'trustbank_delta': trustbank_delta,\n\t# \t# 'prior_usd_sell': prior_usd_sell,\n\t# \t# 'prior_usd_buy': prior_usd_buy,\n\t# \t# 'priorbank_delta': priorbank_delta,\n\t# \t# 'belinvest_usd_buy': belinvest_usd_buy,\n\t# \t# 'belinvest_usd_sell': belinvest_usd_sell,\n\t# \t# 'belinvest_delta': belinvest_delta\n\n\t# \t# }\n\t# \tlocals()\n\t# \t)\n\treturn render_to_response ('cur_exch.html', locals())","sub_path":"Curr_exch/Curr_exch/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"21704619","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport pandas as pd\nimport os\nimport re\n#import math # May be needed for isnan\nimport html\nimport nltk\nimport spacy\nimport string\nimport unicodedata\n\nglobal DEBUG\nDEBUG = False\n\nfrom bs4 import BeautifulSoup\n\nnltk.download('wordnet')\nnltk.download('averaged_perceptron_tagger')\nfrom nltk.corpus import wordnet as wn \nfrom nltk.stem.wordnet import WordNetLemmatizer \n\nfrom nltk.corpus import stopwords\nstopword_list = set(stopwords.words('english'))\n\nfrom nltk.tokenize import word_tokenize, sent_tokenize\nfrom nltk.tokenize.toktok import ToktokTokenizer\n#from nltk.tokenize.stanford import StanfordTokenizer\n\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.stem.snowball import SnowballStemmer\n\nlemmatizer = WordNetLemmatizer()\ntokenizer = ToktokTokenizer()\n\nfrom IPython.display import display_markdown\n\ndef as_markdown(head='', body='Some body text'):\n if head is not '':\n display_markdown(f\"#### {head}\\n\\n>{body}\\n\", raw=True)\n else:\n display_markdown(f\">{body}\\n\", raw=True) \n\n# POS_TAGGER_FUNCTION : TYPE 1 \ndef pos_tagger(nltk_tag): \n if nltk_tag.startswith('J'): \n return wn.ADJ \n elif nltk_tag.startswith('V'): \n return wn.VERB \n elif nltk_tag.startswith('N'): \n return wn.NOUN \n elif nltk_tag.startswith('R'): \n return wn.ADV \n else: \n return None\n\n# Useful example code [here](https://www.geeksforgeeks.org/python-lemmatization-approaches-with-examples/?ref=rp).\n# \n# | Tag | What it Means |\n# | :-- | :------------ |\n# | CC | coordinating conjunction |\n# | CD | cardinal digit |\n# | DT | determiner |\n# | EX | existential there (like: “there is” … think of it like “there exists”) |\n# | FW | foreign word |\n# | IN | preposition/subordinating conjunction |\n# | JJ | adjective ‘big’ |\n# | JJR | adjective, comparative ‘bigger’ |\n# | JJS | adjective, superlative ‘biggest’ |\n# | LS | list marker 1 |\n# | MD | modal could, will |\n# | NN | noun, singular ‘desk’ |\n# | NNS | noun plural ‘desks’ |\n# | NNP | proper noun, singular ‘Harrison’ |\n# | NNPS | proper noun, plural ‘Americans’ |\n# | PDT | predeterminer ‘all the kids’ |\n# | POS | possessive ending parent‘s |\n# | PRP | personal pronoun I, he, she |\n# | PRP\\$ | possessive pronoun my, his, hers |\n# | RB | adverb very, silently, |\n# | RBR | adverb, comparative better |\n# | RBS | adverb, superlative best |\n# | RP | particle give up |\n# | TO | to go ‘to‘ the store. |\n# | UH | interjection errrrrrrrm |\n# | VB | verb, base form take |\n# | VBD | verb, past tense took |\n# | VBG | verb, gerund/present participle taking |\n# | VBN | verb, past participle taken |\n# | VBP | verb, sing. present, non-3d take |\n# | VBZ | verb, 3rd person sing. present takes |\n# | WDT | wh-determiner which |\n# | WP | wh-pronoun who, what |\n# | WP\\$ | possessive wh-pronoun whose |\n# | WRB | wh-abverb where, when |\n\npos_tags = \"\"\"| CC | coordinating conjunction |\n| CD | cardinal digit |\n| DT | determiner |\n| EX | existential there |\n| FW | foreign word |\n| IN | preposition/subordinating conjunction |\n| JJ | adjective ‘big’ |\n| JJR | adjective, comparative |\n| JJS | adjective, superlative |\n| LS | list marker 1 |\n| MD | modal could, will |\n| NN | noun, singular |\n| NNS | noun plural |\n| NNP | proper noun, singular |\n| NNPS | proper noun, plural |\n| PDT | predeterminer |\n| POS | possessive ending |\n| PRP | personal pronoun |\n| PRP | possessive pronoun |\n| RB | adverb very, silently, |\n| RBR | adverb, comparative |\n| RBS | adverb, superlative |\n| RP | participle |\n| TO | to go ‘to‘ |\n| UH | interjection |\n| VB | verb, base form |\n| VBD | verb, past tense |\n| VBG | verb, gerund/present participle |\n| VBN | verb, past participle |\n| VBP | verb, sing. present, non-3d |\n| VBZ | verb, 3rd person sing. present |\n| WDT | wh-determiner |\n| WP | wh-pronoun |\n| WP | possessive |\n| WRB | wh-abverb |\"\"\"\n\nglobal pos_lkp\npos_lkp = {}\nfor p in pos_tags.split(\"\\n\"):\n m = re.search(r\"^\\| ([A-Z\\$]{2,5})\\s+\\| ([^\\|]+)\", p)\n pos_lkp[m.groups()[0]] = m.groups()[1].strip()\n\ndef lemmatise_text(txt:str) -> list: # lemmatise tokens \n tokenized = sent_tokenize(txt)\n processed = []\n for i in tokenized: \n \n # Word tokenizers is used to find the words \n # and punctuation in a string \n wordsList = nltk.word_tokenize(i)\n \n # Using a part-of-speech \n # tagger or POS-tagger. \n tagged = nltk.pos_tag(wordsList) \n \n for t in tagged:\n try:\n processed.append( (lemmatizer.lemmatize(t[0], pos=pos_tagger(t[1])), t[1]) )\n except KeyError:\n if DEBUG: print(f\"Can't process: {t[0]} / {pos_tagger(t[1])}\")\n processed.append( (t[0],None) )\n \n if DEBUG: print(f\"Text with PoS tags: {processed}\")\n return ' '.join([x[0] for x in processed])\n\n# These seem to come through surprisingly often and need to resolved\ndef remove_quotemarks(text:str) -> str:\n smart_quotes = set(['‘','“','”','\"','\"',\"’\"])\n sq = re.compile(f\"({'|'.join(smart_quotes)})\",flags=re.IGNORECASE|re.DOTALL)\n text = re.sub(\"\\w’\\w\",\"'\",text,flags=re.IGNORECASE|re.DOTALL) # This is for possessives\n text = sq.sub(\" \",text) # Get rid of everything else\n return text\n\n# Adapted from https://www.kdnuggets.com/2018/08/practitioners-guide-processing-understanding-text-2.html\ndef remove_accented_chars(text:str) -> str:\n return unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n\n# From https://www.kdnuggets.com/2018/08/practitioners-guide-processing-understanding-text-2.html\n# From https://stackoverflow.com/questions/19790188/expanding-english-language-contractions-in-python\nglobal CONTRACTION_MAP\nCONTRACTION_MAP = { \n \"ain't\": \"am not\",\n \"aren't\": \"are not\",\n \"can't\": \"cannot\",\n \"can't've\": \"cannot have\",\n \"'cause\": \"because\",\n \"could've\": \"could have\",\n \"couldn't\": \"could not\",\n \"couldn't've\": \"could not have\",\n \"didn't\": \"did not\",\n \"doesn't\": \"does not\",\n \"don't\": \"do not\",\n \"hadn't\": \"had not\",\n \"hadn't've\": \"had not have\",\n \"hasn't\": \"has not\",\n \"haven't\": \"have not\",\n \"he'd\": \"he would\",\n \"he'd've\": \"he would have\",\n \"he'll\": \"he will\",\n \"he'll've\": \"he will have\",\n \"he's\": \"hhe is\",\n \"how'd\": \"how did\",\n \"how'd'y\": \"how do you\",\n \"how'll\": \"how will\",\n \"how's\": \"how is\",\n \"I'd\": \"I would\",\n \"I'd've\": \"I would have\",\n \"I'll\": \"I will\",\n \"I'll've\": \"I will have\",\n \"I'm\": \"I am\",\n \"I've\": \"I have\",\n \"i'd\": \"I would\",\n \"i'd've\": \"I would have\",\n \"i'll\": \"I will\",\n \"i'll've\": \"I will have\",\n \"i'm\": \"I am\",\n \"i've\": \"I have\",\n \"isn't\": \"is not\",\n \"it'd\": \"it would\",\n \"it'd've\": \"it would have\",\n \"it'll\": \"it will\",\n \"it'll've\": \"it will have\",\n \"it's\": \"it is\",\n \"let's\": \"let us\",\n \"ma'am\": \"madam\",\n \"mayn't\": \"may not\",\n \"might've\": \"might have\",\n \"mightn't\": \"might not\",\n \"mightn't've\": \"might not have\",\n \"must've\": \"must have\",\n \"mustn't\": \"must not\",\n \"mustn't've\": \"must not have\",\n \"needn't\": \"need not\",\n \"needn't've\": \"need not have\",\n \"o'clock\": \"of the clock\",\n \"oughtn't\": \"ought not\",\n \"oughtn't've\": \"ought not have\",\n \"shan't\": \"shall not\",\n \"sha'n't\": \"shall not\",\n \"shan't've\": \"shall not have\",\n \"she'd\": \"she had / she would\",\n \"she'd've\": \"she would have\",\n \"she'll\": \"she will\",\n \"she'll've\": \"she will have\",\n \"she's\": \"she is\",\n \"should've\": \"should have\",\n \"shouldn't\": \"should not\",\n \"shouldn't've\": \"should not have\",\n \"so've\": \"so have\",\n \"so's\": \"so is\",\n \"that'd\": \"that would\",\n \"that'd've\": \"that would have\",\n \"that's\": \"that is\",\n \"there'd\": \"there would\",\n \"there'd've\": \"there would have\",\n \"there's\": \"there is\",\n \"they'd\": \"tthey would\",\n \"they'd've\": \"they would have\",\n \"they'll\": \"they will\",\n \"they'll've\": \"they will have\",\n \"they're\": \"they are\",\n \"they've\": \"they have\",\n \"to've\": \"to have\",\n \"wasn't\": \"was not\",\n \"we'd\": \"we would\",\n \"we'd've\": \"we would have\",\n \"we'll\": \"we will\",\n \"we'll've\": \"we will have\",\n \"we're\": \"we are\",\n \"we've\": \"we have\",\n \"weren't\": \"were not\",\n \"what'll\": \"hat will\",\n \"what'll've\": \"what will have\",\n \"what're\": \"what are\",\n \"what's\": \"what is\",\n \"what've\": \"what have\",\n \"when's\": \"when is\",\n \"when've\": \"when have\",\n \"where'd\": \"where did\",\n \"where's\": \"where is\",\n \"where've\": \"where have\",\n \"who'll\": \"who will\",\n \"who'll've\": \"who will have\",\n \"who's\": \"who is\",\n \"who've\": \"who have\",\n \"why's\": \"why is\",\n \"why've\": \"why have\",\n \"will've\": \"will have\",\n \"won't\": \"will not\",\n \"won't've\": \"will not have\",\n \"would've\": \"would have\",\n \"wouldn't\": \"would not\",\n \"wouldn't've\": \"would not have\",\n \"y'all\": \"you all\",\n \"y'all'd\": \"you all would\",\n \"y'all'd've\": \"you all would have\",\n \"y'all're\": \"you all are\",\n \"y'all've\": \"you all have\",\n \"you'd\": \"you would\",\n \"you'd've\": \"you would have\",\n \"you'll\": \"you will\",\n \"you'll've\": \"you will have\",\n \"you're\": \"you are\",\n \"you've\": \"you have\"\n}\n\ndef expand_contractions(text, contraction_mapping=CONTRACTION_MAP):\n \n contractions_pattern = re.compile('({})'.format('|'.join(contraction_mapping.keys())), \n flags=re.IGNORECASE|re.DOTALL)\n def expand_match(contraction):\n match = contraction.group(0)\n first_char = match[0]\n expanded_contraction = contraction_mapping.get(match) if contraction_mapping.get(match) else contraction_mapping.get(match.lower()) \n expanded_contraction = first_char+expanded_contraction[1:]\n return expanded_contraction\n \n expanded_text = contractions_pattern.sub(expand_match, text)\n expanded_text = re.sub(\"'\", \"\", expanded_text)\n return expanded_text\n\n# Adapted from https://www.kdnuggets.com/2018/08/practitioners-guide-processing-understanding-text-2.html\ndef remove_special_chars(text:str, remove_digits:bool=False, replace_with_spaces:bool=True) -> str:\n pattern = r'[^a-zA-z0-9\\s]' if not remove_digits else r'[^a-zA-z\\s]'\n return re.sub(pattern, ' ' if replace_with_spaces else '', text)\n\n# Adapted from https://www.kdnuggets.com/2018/08/practitioners-guide-processing-understanding-text-2.html\nglobal NUMBER_MAP\nNUMBER_MAP = {\n 'k': 1e3,\n 'm': 1e6,\n 'b': 1e9,\n 'bn': 1e9,\n 't': 1e12,\n 'tn': 1e12\n}\n\ndef expand_numbers(text:str, number_mapping=NUMBER_MAP):\n \n number_pattern = re.compile('([0-9]+([0-9.]*)?)({})\\b'.format('|'.join(number_mapping.keys())), \n flags=re.IGNORECASE|re.DOTALL)\n def expand_number_match(number):\n num = float(number.group(1))\n suffix = number.group(len(number.groups()))\n \n exp = number_mapping.get(suffix) if number_mapping.get(suffix) else number_mapping.get(suffix.lower())\n return str(int(num * exp))\n \n expanded_txt = number_pattern.sub(expand_number_match, text)\n return expanded_txt\n\ndef strip_html_tags(doc:str) -> str:\n soup = BeautifulSoup(doc, \"html.parser\")\n return soup.get_text()\n\ndef remove_stopwords(text, is_lower_case=False):\n tokens = tokenizer.tokenize(text)\n tokens = [token.strip() for token in tokens]\n if is_lower_case:\n filtered_tokens = [token for token in tokens if token not in stopword_list]\n else:\n filtered_tokens = [token for token in tokens if token.lower() not in stopword_list]\n filtered_text = ' '.join(filtered_tokens) \n return filtered_text\n\n#punkts = set([',',';',':','-','–','—','!','?']) # Doesn't work because of special chars\ndef remove_punctuation(text:str, keep_sentences:bool=True):\n pk = re.compile(r'[,;:\\-!–?—\\.\\/\\\\\\&]' if keep_sentences is True else r'[,;:\\-!–?—]',flags=re.IGNORECASE|re.DOTALL)\n return pk.sub(\" \",text) # Remove punctuation -- not needed if you remove special chars instead\n\ndef remove_short_text(doc:str, shortest_word:int=1):\n text = re.split('\\s+',doc)\n punkts = re.compile(r'[,;:\\-!–?—\\.\\/\\\\]',flags=re.IGNORECASE|re.DOTALL)\n return ' '.join([x for x in text if len(x)>shortest_word or punkts.match(x)])\n\ndef normalise_document(doc, html_stripping=True, contraction_expansion=True,\n accented_char_removal=True, text_lower_case=True, \n text_lemmatization=True, special_char_removal=False, \n punctuation_removal=True, keep_sentences=True, \n stopword_removal=True, remove_digits=False, infer_numbers=True, \n shortest_word=2):\n \n if DEBUG: print(f\"Input:\\n\\t{doc}\")\n\n try: \n # strip HTML\n if html_stripping:\n # bs4 strips out semantically important whitespace so we need\n # to insert an extra space after end-tags.\n doc = re.sub(r'(\\/[A-Za-z]+\\d?|[A-Za-z]+ \\/)>','\\\\1> ', html.unescape(doc))\n doc = strip_html_tags(doc)\n if DEBUG: print(f\"After HTML removal:\\n\\t{doc}\")\n \n # remove extra newlines\n doc = re.sub(r'[\\r|\\n|\\r\\n]+', ' ', doc)\n \n # remove extra whitespace\n doc = re.sub('\\s+', ' ', doc)\n if DEBUG: print(f\"After newline and whitespace removal:\\n\\t{doc}\")\n \n # remove quotemarks\n doc = remove_quotemarks(doc)\n \n # remove accented characters\n if accented_char_removal:\n doc = remove_accented_chars(doc)\n if DEBUG: print(f\"After accent removal:\\n\\t{doc}\")\n \n # expand contractions \n if contraction_expansion:\n doc = expand_contractions(doc)\n if DEBUG: print(f\"After contraction expansion:\\n\\t{doc}\")\n \n # infer numbers from abbreviations\n if infer_numbers:\n doc = expand_numbers(doc)\n if DEBUG: print(f\"After number expansion:\\n\\t{doc}\")\n \n # lowercase the text \n if text_lower_case:\n doc = doc.lower()\n if DEBUG: print(f\"After lower-casing:\\n\\t{doc}\")\n \n # lemmatize text\n if text_lemmatization:\n doc = lemmatise_text(doc)\n if DEBUG: print(f\"After lemmatisation:\\n\\t{doc}\")\n \n # remove special characters and\\or digits \n if special_char_removal:\n doc = remove_special_chars(doc, remove_digits)\n if DEBUG: print(f\"After special char removal:\\n\\t{doc}\")\n \n # remove stopwords\n if stopword_removal:\n doc = remove_stopwords(doc, is_lower_case=text_lower_case)\n if DEBUG: print(f\"After stopword removal:\\n\\t{doc}\")\n \n # Deal with HTML entities -- not sure\n # why these aren't picked up earlier in \n # the HTML function...\n doc = html.unescape(doc)\n \n # remove punctuation\n if punctuation_removal:\n doc = remove_punctuation(doc, keep_sentences)\n if DEBUG: print(f\"After punctuation removal:\\n\\t{doc}\")\n \n # remove short words\n if shortest_word > 1:\n doc = remove_short_text(doc, shortest_word)\n if DEBUG: print(f\"After short text removal:\\n\\t{doc}\")\n \n return doc\n except TypeError as err:\n if DEBUG:\n print(f\"Problems with: {doc}\")\n print(err)\n #traceback.print_exc(file=sys.stdout)\n rval = doc if doc is not None else ''\n return rval\n","sub_path":"data/textual/.ipynb_checkpoints/__init__-checkpoint.py","file_name":"__init__-checkpoint.py","file_ext":"py","file_size_in_byte":15612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"591429543","text":"import sys\nsys.setrecursionlimit(10000)\n\ndef howmany(t):\n if t==2:\n return 1\n elif t==4:\n return 2\n else:\n sum=0\n for i in range(2,t-2,2):\n sum+=howmany(i)*howmany(t-i-2)\n sum+=2*howmany(t-2)\n return sum\nn=int(input())\nres=[]\nfor _ in range(n):\n res.append(int(input()))\nfor h in res:\n print(howmany(h),end=\"\\n\")","sub_path":"Code/CodeRecords/2205/60749/256550.py","file_name":"256550.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"649261938","text":"import sys\n\ndef set_deb_control(version , arch):\n file_name = \"./package/debian/DEBIAN/control\"\n template = \"Package: tpflow\\n\"\n template+= \"Version: \"+version+\"\\n\"\n template+= \"Replaces: tpflow\\n\"\n template+= \"Section: non-free/misc\\n\"\n template+= \"Priority: optional\\n\"\n template+= \"Architecture: \"+arch+\"\\n\"\n template+= \"Maintainer: Aleksandrs Livincovs \\n\"\n template+= \"Description: Thingplex TpFlow is IoT rule and flow engine.\\n\"\n\n f = open(file_name,\"w\")\n f.write(template)\n f.close()\n\n\ndef set_version_file(version):\n file_name = \"./VERSION\"\n f = open(file_name,\"w\")\n f.write(version)\n f.close() \n\nif __name__ == \"__main__\":\n environment = sys.argv[1] \n version = sys.argv[2]\n arch = sys.argv[3]\n set_deb_control(version,arch)\n set_version_file(version)","sub_path":"scripts/config_env.py","file_name":"config_env.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"542407005","text":"#!/usr/bin/env python\n # -*- coding: utf-8 -*-\nimport re\nfrom collections import OrderedDict\nfrom xlrd import open_workbook\n\nXLS_COLUMNS = ('group_name', 'short_form', 'long_form', 'pinyin', 'type',\n 'english')\n\nPINYIN_FILE = \"pinyin.txt\"\n\n# lesson file format pattern:\n# No. Short Form Long Form Pinyin English\nLESSON_REGEX = re.compile(r\"\"\"^(?:\\ *\\d+\\.\\ + | \\ {2,})\n (?P[^\\x00-\\x7F]+)\n \\ {2,}\n (?P[^\\x00-\\x7F]+)?\n (?:\\ {2,}(?Pv\\.o\\.)\\s+ | \\ {2,})\n (?P\\w.+?)\n \\ {2,}\n (?P[\\w\\(\\[].+?)\n \\s*$\"\"\", re.VERBOSE | re.UNICODE | re.MULTILINE)\n\nclass PinyinFixer(object):\n \"\"\"Fix pinyins that don't display correctly within the lesson PDFs.\"\"\"\n def __init__(self, pinyin_file):\n \"\"\"Creates a Pinyin Fixer.\n\n File must follow a \"keyvalue\" format.\n \"\"\"\n fix_data = pinyin_file.read()\n self.fixes = re.findall(r'(.+?)\\t(.+?)\\s', fix_data, re.MULTILINE)\n\n def fix(self, s):\n \"\"\"Returns a fixed pinyin string.\"\"\"\n for before, after in self.fixes:\n s = s.replace(before, after)\n return s\n\n\nclass TXTLessonReader(object):\n \"\"\"Reads and parses Chinese lesson vocabulary TXT files.\"\"\"\n def __init__(self, lesson_file, fixer):\n self.lesson_file = lesson_file\n self.fixer = fixer\n self._group_name = ''\n\n def __iter__(self):\n return self\n\n def next(self):\n \"\"\"Iterates through a lesson file, returning a dict for each line\n that matches the LESSON_REGEX\"\"\"\n line = self.lesson_file.next()\n m = LESSON_REGEX.search(line)\n\n while m is None:\n # word group name lines start with '!!', so this is where we\n # capture them\n if line.startswith('!!'):\n self._group_name = line[2:].strip()\n\n line = self.lesson_file.next()\n m = LESSON_REGEX.search(line)\n\n return {\n 'short_form': m.group('short_form'),\n # if the long form isn't available, use the short form\n 'long_form': m.group('long_form') or m.group('short_form'),\n 'verb_object': m.group('verb_object'),\n 'pinyin': self.fixer.fix(m.group('pinyin')), # fix broken pinyins\n 'english': m.group('english'),\n 'group_name': self._group_name,\n }\n\n\nclass XLSLessonReader(object):\n \"\"\"Reads and parses Chinese lesson vocabulary XLS files.\"\"\"\n def __init__(self, lesson_file, fixer):\n sh = lesson_file.sheet_by_index(0)\n self.lesson_file = (sh.row_values(rownum) for\n rownum in range(0, sh.nrows))\n self.fixer = fixer\n self._group_name = ''\n\n def __iter__(self):\n return self\n\n def next(self):\n \"\"\"Iterates through a lesson spreadsheet, returning a dict for each row\n that uses XLS_COLUMNS for keys\"\"\"\n row = dict(zip(XLS_COLUMNS, self.lesson_file.next()))\n\n # the 'english' column contains two values, separated by a line return\n # we have to do this BEFORE the line return search/replace below\n (english, _, example) = row.pop('english').strip().partition('\\n')\n example = example.replace('\\n', ' ').replace('/', u'•')\n\n # some cells have unnecessary line returns\n for (k,v) in row.iteritems():\n if v:\n row[k] = v.replace('\\n', '')\n\n # group names are set once and then used until another group is seen\n if row['group_name']:\n self._group_name = row['group_name']\n\n return {\n 'short_form': row['short_form'],\n # if the long form isn't available, use the short form\n 'long_form': row['long_form'] or row['short_form'],\n 'pinyin': self.fixer.fix(row['pinyin']), # fix broken pinyins\n 'type': row['type'],\n 'english': english,\n 'example': example,\n 'group_name': self._group_name,\n }\n\n\nclass FlashcardWriter(object):\n \"\"\"Constructs and writes out flash card files.\"\"\"\n def __init__(self):\n self.flashcards = OrderedDict()\n\n def add_card(self, word):\n \"\"\"Takes a lesson word and adds it to the flash card deck.\"\"\"\n card = \"%s[%s]\\t%s\\t\" % (word['short_form'], word['long_form'],\n word['pinyin'])\n if 'type' in word and word['type']:\n card += '(%s.) ' % (word['type'])\n card += '%s' % (word['english'])\n\n if 'verb_object' in word and word['verb_object']:\n card += ' (v.o)'\n if 'example' in word and word['example']:\n card += u'•%s' % (word['example'])\n\n if word['group_name'] not in self.flashcards:\n self.flashcards[word['group_name']] = []\n self.flashcards[word['group_name']].append(card)\n\n def write(self, flashcard_file, lesson_title):\n \"\"\"Write the deck to a file.\"\"\"\n for (group_name, deck) in self.flashcards.iteritems():\n flashcard_file.write(\"// %s/%s\\r\\n\" % (lesson_title, group_name))\n for card in deck:\n flashcard_file.write(\"%s\\r\\n\" % card)\n\n\nif __name__ == '__main__':\n import codecs\n import sys\n\n if len(sys.argv) != 4:\n usage = '%s '\n raise SystemExit(usage % (sys.argv[0]))\n\n input_file = sys.argv[1]\n\n if input_file.endswith('txt'):\n reader = TXTLessonReader(codecs.open(input_file, encoding='utf-8'),\n PinyinFixer(codecs.open(PINYIN_FILE, encoding='utf-8')))\n elif input_file.endswith('xls'):\n reader = XLSLessonReader(open_workbook(input_file),\n PinyinFixer(codecs.open(PINYIN_FILE, encoding='utf-8')))\n else:\n raise SystemExit('unknown input file type')\n\n writer = FlashcardWriter()\n\n for word in reader:\n writer.add_card(word)\n\n # save the deck\n flashcard_file = codecs.open(sys.argv[2], encoding='utf-8', mode='w')\n writer.write(flashcard_file, lesson_title=sys.argv[3])\n flashcard_file.close()\n","sub_path":"flashcard.py","file_name":"flashcard.py","file_ext":"py","file_size_in_byte":6366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"212949960","text":"food = {}\n\ndays = int(input())\n\nwhile days != 0:\n for i in range(days):\n item = str(input())\n food[item.split(\" \")[0]] = item.split(\" \")[1:]\n\n orders = {}\n output = []\n\n for key in food:\n for item in food[key]:\n if item not in orders:\n orders[item] = []\n\n for key in orders:\n for item in food:\n if key in food[item]:\n orders[key].append(item)\n\n orders[key].sort()\n output.append(key + \" \" + \" \".join(orders[key]))\n\n output.sort()\n\n for item in output:\n print(item)\n\n print(\"\")\n\n food = {}\n\n days = int(input())\n","sub_path":"solutions/baconeggsandspam.py","file_name":"baconeggsandspam.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"191210139","text":"import pywt\nimport numpy as np\nfrom PIL import Image\nimport cv2\nimport time\nimport os\nfrom numba import vectorize\n\n@vectorize([\"float32(float32, float32)\"], target='cuda')\ndef varianceWeight(list1):\n ech = []\n for i in list1:\n ech.append(i.var())\n res = [k/sum(ech) for k in ech]\n return res\n\n# def getVarianceImg(array):\n# k = 3\n# row, col = array.shape\n# varImg = np.zeros((row, col))\n# for i in range(row):\n# for j in range(col):\n# up = i-k if i-k > 0 else 0\n# down = i+k if i+k < row else row\n# left = j-k if j-k > 0 else 0\n# right = j+k if j+k < col else col\n# window = array[up:down, left:right]\n# mean, var = cv2.meanStdDev(window)\n# varImg[i, j] = var\n# return varImg\n\ndef testWave(img_ch, wavelet1 = \"sym4\", lev = 5, w=3): # haar、db4、sym4、bior2.4\n transf = []\n for i in range(len(img_ch)):\n transf.append(pywt.wavedec2(img_ch[i], wavelet1, level=lev))\n if i >= 1:\n assert len(transf[i]) == len(transf[0])\n\n recWave = []\n\n for k in range(len(transf[0])):\n cvtArray, cvtArray1, cvtArray2 = [], [], []\n # 低频分量\n if k == 0:\n list1 = [transf[low][0] for low in range(len(transf))]\n coe = varianceWeight(list1)\n\n lowFreq = np.zeros(transf[0][0].shape)\n row, col = transf[0][0].shape\n for i in range(row):\n for j in range(col):\n lowFreq[i, j] = sum([coe[m] * transf[m][0][i, j] for m in range(len(transf))])\n recWave.append(lowFreq)\n continue\n\n # 高频分量\n for array1, array2, array3, array4, array5, array6 in zip(\n transf[0][k],transf[1][k],transf[2][k],transf[3][k],transf[4][k],transf[5][k]):\n\n array = [array1, array2, array3, array4, array5, array6]\n\n tmp_row, tmp_col = array[0].shape\n highFreq = np.zeros((tmp_row, tmp_col))\n\n sum_arr = [sum(map(sum, abs(array[n]))) for n in range(len(array))]\n idx = np.argsort(sum_arr)\n\n for i in range(tmp_row):\n for j in range(tmp_col):\n highFreq[i, j] = (array[idx[-1]][i, j] + array[idx[-2]][i, j] + array[idx[-3]][i, j]) / w\n cvtArray.append(highFreq)\n\n recWave.append(tuple(cvtArray))\n\n return pywt.waverec2(recWave, wavelet1)\n\ndef BGR_2_YIQ(img):\n B, G, R = cv2.split(img)\n Y = 0.299 * R + 0.587 * G + 0.114 * B\n I = 0.596 * R - 0.274 * G - 0.322 * B\n Q = 0.211 * R - 0.523 * G + 0.312 * B\n img = cv2.merge([Y, I, Q])\n return img\n\ndef YIQ_to_BGR(Y, I, Q):\n # Y, I, Q = cv2.split(img)\n R = Y + 0.956 * I + 0.624 * Q\n G = Y - 0.272 * I - 0.647 * Q\n B = Y - 1.106 * I + 1.703 * Q\n img = cv2.merge([B, G, R])\n return img\n\ndef read_img_dir(file_path):\n img_dir = []\n for root, dirs, file_names in os.walk(file_path):\n for file_name in file_names:\n image_path = os.path.join(root, file_name)\n img_dir.append(image_path)\n return img_dir\n\nif __name__ == '__main__':\n start_time0 = time.time()\n img_dir = read_img_dir(\"../test/\")\n img_data = []\n print(img_dir)\n for dir in img_dir[:6]:\n print(dir)\n img = cv2.imread(dir)\n img = cv2.resize(img, (img.shape[1] // 2, img.shape[0] // 2))\n img = BGR_2_YIQ(img)\n img_data.append(img)\n start_time1 = time.time()\n h, w = img_data[0].shape[:2]\n print(img_data[0].shape)\n\n Y = [img_data[y][:, :, 0] for y in range(len(img_data))]\n I = [img_data[i][:, :, 1] for i in range(len(img_data))]\n Q = [img_data[q][:, :, 2] for q in range(len(img_data))]\n print(len(Y), Y[0].shape)\n\n # haar、db4、sym4、bior2.4\n model = [\"coif1\", \"coif2\", \"coif3\", \"coif4\", \"coif5\",\n \"sym2\", \"sym6\", \"sym10\", \"sym14\", \"sym18\", \"sym20\",\n \"db1\", \"db5\", \"db9\", \"db13\", \"db17\", \"db20\",\n \"bior1.1\", \"bior2.2\", \"bior3.3\", \"bior4.4\", \"bior5.5\", \"bior6.8\",\n \"rbio1.1\", \"rbio2.2\", \"rbio3.3\", \"rbio4.4\", \"rbio5.5\", \"rbio6.8\"]#\"sym4\", , \"bior2.4\", \"haar\", \"db4\", \"db10\",\n w1 = [i/10 for i in range(29, 30)]\n print(w1)\n for i in model:\n for j in w1:\n for l in [3]:\n Y1 = testWave(Y, wavelet1=i, lev=int(l), w=int(j))\n I1 = testWave(I, wavelet1=i, lev=int(l), w=int(j))\n Q1 = testWave(Q, wavelet1=i, lev=int(l), w=int(j))\n\n img = YIQ_to_BGR(Y1, I1, Q1)\n img_save_path = \"../result/test8_\" + i + \"_w\" + str(j) + \"_\" + str(l) + \".png\"\n cv2.imwrite(img_save_path, img)\n\n print(\"time ; %.4f s\" % (time.time() - start_time1))\n\n\n\n\n\n\n\n","sub_path":"Wavelet transform fuse/src/image_fuse_wt_ch3_4.py","file_name":"image_fuse_wt_ch3_4.py","file_ext":"py","file_size_in_byte":4779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"141340893","text":"\"\"\"Bartlett's trasmission chain experiment from Remembering (1932).\"\"\"\n\nfrom dallinger.networks import Chain\nfrom dallinger.nodes import Source\nfrom dallinger.experiments import Experiment\nimport random\nimport base64\nimport os\nimport json\n\n\nclass IteratedDrawing(Experiment):\n \"\"\"Define the structure of the experiment.\"\"\"\n\n def __init__(self, session):\n \"\"\"Call the same function in the super (see experiments.py in dallinger).\n\n A few properties are then overwritten.\n Finally, setup() is called.\n \"\"\"\n super(IteratedDrawing, self).__init__(session)\n self.experiment_repeats = 1\n self.setup()\n\n def setup(self):\n \"\"\"Setup the networks.\n\n Setup only does stuff if there are no networks, this is so it only\n runs once at the start of the experiment. It first calls the same\n function in the super (see experiments.py in dallinger). Then it adds a\n source to each network.\n \"\"\"\n if not self.networks():\n super(IteratedDrawing, self).setup()\n for net in self.networks():\n DrawingSource(network=net)\n\n def create_network(self):\n \"\"\"Return a new network.\"\"\"\n return Chain(max_size=10)\n\n def add_node_to_network(self, node, network):\n \"\"\"Add node to the chain and receive transmissions.\"\"\"\n network.add_node(node)\n parent = node.neighbors(direction=\"from\")[0]\n parent.transmit()\n node.receive()\n\n def recruit(self):\n \"\"\"Recruit one participant at a time until all networks are full.\"\"\"\n if self.networks(full=False):\n self.recruiter().recruit_participants(n=1)\n else:\n self.recruiter().close_recruitment()\n\n\nclass DrawingSource(Source):\n \"\"\"A Source that reads in a random image from a file and transmits it.\"\"\"\n\n __mapper_args__ = {\n \"polymorphic_identity\": \"drawing_source\"\n }\n\n def _contents(self):\n \"\"\"Define the contents of new Infos.\n\n transmit() -> _what() -> create_information() -> _contents().\n \"\"\"\n images = [\n \"owl.png\",\n ]\n\n image = random.choice(images)\n\n image_path = os.path.join(\"static\", \"stimuli\", image)\n uri_encoded_image = (\n \"data:image/png;base64,\" +\n base64.b64encode(open(image_path, \"rb\").read())\n )\n\n return json.dumps({\n \"image\": uri_encoded_image,\n \"sketch\": \"\"\n })\n","sub_path":"demos/iterated-drawing/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"599965361","text":"'''THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND\nNON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE\nDISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY,\nWHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.'''\n\n# Bitcoin Cash (BCH) qpz32c4lg7x7lnk9jg6qg7s4uavdce89myax5v5nuk\n# Ether (ETH) - 0x843d3DEC2A4705BD4f45F674F641cE2D0022c9FB\n# Litecoin (LTC) - Lfk5y4F7KZa9oRxpazETwjQnHszEPvqPvu\n# Bitcoin (BTC) - 34L8qWiQyKr8k4TnHDacfjbaSqQASbBtTd\n\n# contact :- github@jamessawyer.co.uk\n\n\n\nfrom .config import (\n error_analysis,\n sample_data,\n CORRELATION,\n CORRELATION_THRESHOLD,\n VERBOSE,\n)\nfrom .context import pandas_ta\n\nfrom unittest import TestCase, skip\nimport pandas.testing as pdt\nfrom pandas import DataFrame, Series\n\nimport talib as tal\n\n\nclass TestTrend(TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.data = sample_data\n cls.data.columns = cls.data.columns.str.lower()\n cls.open = cls.data[\"open\"]\n cls.high = cls.data[\"high\"]\n cls.low = cls.data[\"low\"]\n cls.close = cls.data[\"close\"]\n if \"volume\" in cls.data.columns:\n cls.volume = cls.data[\"volume\"]\n\n @classmethod\n def tearDownClass(cls):\n del cls.open\n del cls.high\n del cls.low\n del cls.close\n if hasattr(cls, \"volume\"):\n del cls.volume\n del cls.data\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def test_adx(self):\n result = pandas_ta.adx(self.high, self.low, self.close)\n self.assertIsInstance(result, DataFrame)\n self.assertEqual(result.name, \"ADX_14\")\n\n try:\n expected = tal.ADX(self.high, self.low, self.close)\n pdt.assert_series_equal(result.iloc[:, 0], expected)\n except AssertionError as ae:\n try:\n corr = pandas_ta.utils.df_error_analysis(result.iloc[:, 0],\n expected,\n col=CORRELATION)\n self.assertGreater(corr, CORRELATION_THRESHOLD)\n except Exception as ex:\n error_analysis(result, CORRELATION, ex)\n\n def test_amat(self):\n result = pandas_ta.amat(self.close)\n self.assertIsInstance(result, DataFrame)\n self.assertEqual(result.name, \"AMAT_EMA_8_21_2\")\n\n def test_aroon(self):\n result = pandas_ta.aroon(self.high, self.low)\n self.assertIsInstance(result, DataFrame)\n self.assertEqual(result.name, \"AROON_14\")\n\n try:\n expected = tal.AROON(self.high, self.low)\n expecteddf = DataFrame({\n \"AROOND_14\": expected[0],\n \"AROONU_14\": expected[1]\n })\n pdt.assert_frame_equal(result, expecteddf)\n except AssertionError as ae:\n try:\n aroond_corr = pandas_ta.utils.df_error_analysis(\n result.iloc[:, 0], expecteddf.iloc[:, 0], col=CORRELATION)\n self.assertGreater(aroond_corr, CORRELATION_THRESHOLD)\n except Exception as ex:\n error_analysis(result.iloc[:, 0], CORRELATION, ex)\n\n try:\n aroonu_corr = pandas_ta.utils.df_error_analysis(\n result.iloc[:, 1], expecteddf.iloc[:, 1], col=CORRELATION)\n self.assertGreater(aroonu_corr, CORRELATION_THRESHOLD)\n except Exception as ex:\n error_analysis(result.iloc[:, 1],\n CORRELATION,\n ex,\n newline=False)\n\n def test_aroon_osc(self):\n result = pandas_ta.aroon(self.high, self.low)\n\n try:\n expected = tal.AROONOSC(self.high, self.low)\n pdt.assert_series_equal(result.iloc[:, 2], expected)\n except AssertionError as ae:\n try:\n aroond_corr = pandas_ta.utils.df_error_analysis(result.iloc[:,\n 2],\n expected,\n col=CORRELATION)\n self.assertGreater(aroond_corr, CORRELATION_THRESHOLD)\n except Exception as ex:\n error_analysis(result.iloc[:, 0], CORRELATION, ex)\n\n def test_chop(self):\n result = pandas_ta.chop(self.high, self.low, self.close)\n self.assertIsInstance(result, Series)\n self.assertEqual(result.name, \"CHOP_14_1_100\")\n\n def test_cksp(self):\n result = pandas_ta.cksp(self.high, self.low, self.close)\n self.assertIsInstance(result, DataFrame)\n self.assertEqual(result.name, \"CKSP_10_1_9\")\n\n def test_decay(self):\n result = pandas_ta.decay(self.close)\n self.assertIsInstance(result, Series)\n self.assertEqual(result.name, \"LDECAY_5\")\n\n result = pandas_ta.decay(self.close, mode=\"exp\")\n self.assertIsInstance(result, Series)\n self.assertEqual(result.name, \"EXPDECAY_5\")\n\n def test_decreasing(self):\n result = pandas_ta.decreasing(self.close)\n self.assertIsInstance(result, Series)\n self.assertEqual(result.name, \"DEC_1\")\n\n def test_dpo(self):\n result = pandas_ta.dpo(self.close)\n self.assertIsInstance(result, Series)\n self.assertEqual(result.name, \"DPO_20\")\n\n def test_increasing(self):\n result = pandas_ta.increasing(self.close)\n self.assertIsInstance(result, Series)\n self.assertEqual(result.name, \"INC_1\")\n\n def test_long_run(self):\n result = pandas_ta.long_run(self.close, self.open)\n self.assertIsInstance(result, Series)\n self.assertEqual(result.name, \"LR_2\")\n\n def test_psar(self):\n result = pandas_ta.psar(self.high, self.low)\n self.assertIsInstance(result, DataFrame)\n self.assertEqual(result.name, \"PSAR_0.02_0.2\")\n\n # Combine Long and Short SAR\"s into one SAR value\n psar = result[result.columns[:2]].fillna(0)\n psar = psar[psar.columns[0]] + psar[psar.columns[1]]\n psar.name = result.name\n\n try:\n expected = tal.SAR(self.high, self.low)\n pdt.assert_series_equal(psar, expected)\n except AssertionError as ae:\n try:\n psar_corr = pandas_ta.utils.df_error_analysis(psar,\n expected,\n col=CORRELATION)\n self.assertGreater(psar_corr, CORRELATION_THRESHOLD)\n except Exception as ex:\n error_analysis(psar, CORRELATION, ex)\n\n def test_qstick(self):\n result = pandas_ta.qstick(self.open, self.close)\n self.assertIsInstance(result, Series)\n self.assertEqual(result.name, \"QS_10\")\n\n def test_short_run(self):\n result = pandas_ta.short_run(self.close, self.open)\n self.assertIsInstance(result, Series)\n self.assertEqual(result.name, \"SR_2\")\n\n def test_ttm_trend(self):\n result = pandas_ta.ttm_trend(self.high, self.low, self.close)\n self.assertIsInstance(result, DataFrame)\n self.assertEqual(result.name, \"TTMTREND_6\")\n\n def test_vortex(self):\n result = pandas_ta.vortex(self.high, self.low, self.close)\n self.assertIsInstance(result, DataFrame)\n self.assertEqual(result.name, \"VTX_14\")\n","sub_path":"tests/test_indicator_trend.py","file_name":"test_indicator_trend.py","file_ext":"py","file_size_in_byte":7761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"53076421","text":"#!/usr/bin/env python3\n\n\ndata = ['Trang', 'Trung', 'Tien',\n 'Dai', 'Duong', 'Dung', 'Hung', 'Huy', 'Hoang']\n\nMAGIC_NUMBER = 20200000\n\n\ndef solve(students, N=5):\n '''Biết những bạn có tên bắt đầu bằng chữ `D` sẽ ngồi phòng thi số N,\n các bạn có tên bắt đầu chữ `H` ngồi phòng thi số N+1, và các bạn còn lại,\n nếu có tên kết thúc là `ng` sẽ ngồi cùng phòng các bạn tên `H`, còn lại\n ngồi cùng phòng `D`.\n Tất cả các học viên đều sinh năm 1990.\n Mã học viên được tính bằng: hash(NAME) % MAGIC_NUMBER\n (chú ý số này mỗi lần chạy sẽ khác nhau).\n Ví dụ: mã học viên của 'Dung' là: hash('Dung') % MAGIC_NUMBER\n\n Trả về result là list các tuple chứa\n (mã sinh viên, tên học viên, năm sinh, phòng thi), sắp xếp\n theo thứ tự tên học viên.\n '''\n\n result = []\n # Xoá dòng raise và Viết code vào đây set result làm kết quả\n raise NotImplementedError(\"Học viên chưa làm bài này\")\n\n return result\n\n\ndef main():\n students = data\n # Cho danh sách học viên students\n for msv, *ignore, room in solve(students):\n print(msv, room)\n print(\"DEBUG\", ignore, type(ignore), len(ignore))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pyfml/exercises/ex5_5.py","file_name":"ex5_5.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"345780640","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 29 16:42:51 2017\n\n@author: throne\n\"\"\"\nfrom urllib.request import urlretrieve\nfrom bs4 import BeautifulSoup\nfrom getHTML import download\ndef getAbsoluteURL(source):\n if source.startswith(\"http://www.\"):\n url = \"http://\"+source[11:]\n elif source.startswith(\"www.\"):\n url = source[4:]\n url = \"http://\"+url\n elif source.startswith(\"https://www.\"):\n url = \"https://\"+source[11:]\n else:\n url = source\n return url\n\n\ndef getPicture():\n url = input(\"请输入要爬取的网址:\")\n #url = getAbsoluteURL(url)\n path = input(\"请输入图片存放的位置:\")\n html = download(url)\n soup = BeautifulSoup(html, 'html.parser')\n pUrls = soup.find_all('img', recursive=True)\n count = 1\n for i in pUrls:\n src = i.attrs['src']\n pathOfpicture = path + \"\\\\\" + str(count) + \".png\"\n print(\"第{}张图片正在下载,地址为{}\".format(count, pathOfpicture))\n urlretrieve(src, pathOfpicture)\n count += 1\n \nif __name__ == \"__main__\":\n getPicture()\n","sub_path":"scraping/getPictures.py","file_name":"getPictures.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"212097467","text":"# The Super Intense Awesome Guessing Game Of Intense Awesomeness\nprint(\"\\n\" * 200)\nprint(\"Think of a number and I will guess it!\")\n\n\nbiggest = int(input(\"Enter the largest number in the range! > \"))\nsmallest = int(input(\"Enter the smallest number in the range! > \"))\n\nwhile True:\n guess = int((biggest + smallest) / 2)\n print(\"I guess...\", guess, \"!\")\n hint = input(\"please type 'high', 'low' or 'correct' > \")\n if hint in [\"high\", \"High\"]:\n biggest = guess - 1\n if hint in [\"low\", \"Low\"]:\n smallest = guess + 1\n if hint in [\"correct\", \"Correct\"]:\n print(\"Haha! I knew I would win!\")\n break\n if biggest == smallest:\n guess = biggest\n print(\"Oh!! I know it! It's\", guess, \"!\")\n break\n \n \n \n \n","sub_path":"guess_game.py","file_name":"guess_game.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"345974373","text":"## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved.\n## Use of this source code is governed by a BSD-style license that can be\n## found in the COPYING file.\nimport os\n\nimport qisys.archive\nimport qitoolchain\nimport qibuild.config\n\ndef test_simple(qitoolchain_action):\n qitoolchain_action(\"create\", \"foo\")\n qibuild.config.add_build_config(\"foo\", toolchain=\"foo\")\n word_package = qitoolchain_action.get_test_package(\"world\")\n qitoolchain_action(\"add-package\", \"-c\", \"foo\", word_package)\n tc = qitoolchain.get_toolchain(\"foo\")\n world_package = tc.packages[0]\n assert world_package.name == \"world\"\n assert world_package.path\n\ndef test_legacy_no_name_given(tmpdir, qitoolchain_action):\n qitoolchain_action(\"create\", \"foo\")\n qibuild.config.add_build_config(\"foo\", toolchain=\"foo\")\n world = tmpdir.mkdir(\"world\")\n world.ensure(\"include\", \"world.h\", file=True)\n world.ensure(\"lib\", \"libworld.so\", file=True)\n archive = qisys.archive.compress(world.strpath)\n error = qitoolchain_action(\"add-package\", \"-c\", \"foo\", archive, raises=True)\n assert \"Must specify --name\" in error\n\ndef test_legacy_happy_path(tmpdir, qitoolchain_action):\n qitoolchain_action(\"create\", \"foo\")\n qibuild.config.add_build_config(\"foo\", toolchain=\"foo\")\n world = tmpdir.mkdir(\"world\")\n world.ensure(\"include\", \"world.h\", file=True)\n world.ensure(\"lib\", \"libworld.so\", file=True)\n archive = qisys.archive.compress(world.strpath)\n qitoolchain_action(\"add-package\", \"-c\", \"foo\", \"--name\", \"world\", archive)\n tc = qitoolchain.get_toolchain(\"foo\")\n world_package = tc.get_package(\"world\")\n assert os.path.exists(os.path.join(world_package.path, \"include\", \"world.h\"))\n assert os.path.exists(os.path.join(world_package.path, \"lib\", \"libworld.so\"))\n","sub_path":"python/qitoolchain/test/test_qitoolchain_add_package.py","file_name":"test_qitoolchain_add_package.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"456358065","text":"##############################################################################\n#\n# Copyright (c) 2012 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\nimport unittest\n\n\nclass _Base:\n\n def setUp(self):\n from zope.interface.interface import adapter_hooks\n self._restore = adapter_hooks[:]\n\n def tearDown(self):\n from zope.interface.interface import adapter_hooks\n adapter_hooks[:] = self._restore\n\n\nclass Test_clone(_Base, unittest.TestCase):\n\n def _callFUT(self, obj):\n from zope.copy import clone\n return clone(obj)\n\n def test_wo_hooks(self):\n from zope.copy.examples import Demo\n demo = Demo()\n demo.freeze()\n self.assertTrue(demo.isFrozen())\n copied = self._callFUT(demo)\n self.assertFalse(copied is demo)\n self.assertTrue(isinstance(copied, Demo))\n self.assertTrue(copied.isFrozen())\n\n def test_w_simple_hook(self):\n from zope.copy.examples import Data\n from zope.copy.examples import Demo\n from zope.copy.interfaces import ICopyHook\n demo = Demo()\n demo.freeze()\n\n class Hook:\n def __init__(self, context):\n self.context = context\n\n def __call__(self, obj, register):\n return None\n\n def _adapt(iface, obj):\n if iface is ICopyHook and isinstance(obj, Data):\n return Hook(obj)\n _registerAdapterHook(_adapt)\n copied = self._callFUT(demo)\n self.assertFalse(copied is demo)\n self.assertTrue(isinstance(copied, Demo))\n self.assertFalse(copied.isFrozen())\n\n def test_subobject_wo_post_copy_hook(self):\n from zope.location.location import Location\n from zope.location.location import locate\n\n from zope.copy.examples import Subobject\n o = Location()\n s = Subobject()\n o.subobject = s\n locate(s, o, 'subobject')\n self.assertTrue(s.__parent__ is o)\n self.assertEqual(o.subobject(), 0)\n self.assertEqual(o.subobject(), 1)\n self.assertEqual(o.subobject(), 2)\n c = self._callFUT(o)\n self.assertTrue(c.subobject.__parent__ is c)\n self.assertEqual(c.subobject(), 3)\n self.assertEqual(o.subobject(), 3)\n\n def test_subobject_w_post_copy_hook(self):\n from zope.location.location import Location\n from zope.location.location import locate\n\n from zope.copy.examples import Subobject\n from zope.copy.interfaces import ICopyHook\n o = Location()\n s = Subobject()\n o.subobject = s\n locate(s, o, 'subobject')\n self.assertTrue(s.__parent__ is o)\n self.assertEqual(o.subobject(), 0)\n self.assertEqual(o.subobject(), 1)\n self.assertEqual(o.subobject(), 2)\n\n class Hook:\n def __init__(self, context):\n self.context = context\n\n def __call__(self, obj, register):\n obj = Subobject()\n\n def reparent(translate):\n obj.__parent__ = translate(self.context.__parent__)\n register(reparent)\n return obj\n\n def _adapt(iface, obj):\n if iface is ICopyHook and isinstance(obj, Subobject):\n return Hook(obj)\n _registerAdapterHook(_adapt)\n c = self._callFUT(o)\n self.assertTrue(c.subobject.__parent__ is c)\n self.assertEqual(c.subobject(), 0)\n self.assertEqual(o.subobject(), 3)\n\n\nclass Test_copy(_Base, unittest.TestCase):\n\n def _callFUT(self, obj):\n from zope.copy import copy\n return copy(obj)\n\n def test_clears_attrs(self):\n from zope.copy.examples import Demo\n parent = Demo()\n demo = Demo()\n demo.__parent__ = parent\n demo.__name__ = 'demo'\n copied = self._callFUT(demo)\n self.assertFalse(copied is demo)\n self.assertTrue(isinstance(copied, Demo))\n self.assertEqual(copied.__parent__, None)\n self.assertEqual(copied.__name__, None)\n\n def test_w_readonly___parent___and___name__(self):\n global Foo # make unpicklable\n parent = object()\n\n class Foo:\n @property\n def __parent__(self):\n return parent\n\n @property\n def __name__(self):\n return 'foo'\n foo = Foo()\n copied = self._callFUT(foo)\n self.assertFalse(copied is foo)\n self.assertTrue(isinstance(copied, Foo))\n self.assertTrue(copied.__parent__ is parent)\n self.assertEqual(copied.__name__, 'foo')\n\n\nclass CopyPersistentTests(_Base, unittest.TestCase):\n\n def _getTargetClass(self):\n from zope.copy import CopyPersistent\n return CopyPersistent\n\n def _makeOne(self, obj):\n return self._getTargetClass()(obj)\n\n def test_ctor(self):\n obj = object()\n cp = self._makeOne(obj)\n self.assertTrue(cp.toplevel is obj)\n self.assertEqual(cp.pids_by_id, {})\n self.assertEqual(cp.others_by_pid, {})\n self.assertEqual(cp.registered, [])\n\n def test_id_wo_hook(self):\n obj = object()\n cp = self._makeOne(obj)\n self.assertEqual(cp.id(obj), None)\n\n def test_id_w_hook_already_cached(self):\n from zope.copy.interfaces import ICopyHook\n obj = object()\n cp = self._makeOne(obj)\n cp.pids_by_id[id(obj)] = 'PID'\n\n class Hook:\n def __init__(self, context):\n self.context = context\n\n def __call__(self, obj, register):\n raise AssertionError(\"Not called\")\n\n def _adapt(iface, obj):\n assert iface is ICopyHook\n return Hook(obj)\n _registerAdapterHook(_adapt)\n self.assertEqual(cp.id(obj), 'PID')\n\n def test_id_w_hook_raising_ResumeCopy(self):\n from zope.copy.interfaces import ICopyHook\n from zope.copy.interfaces import ResumeCopy\n obj = object()\n cp = self._makeOne(obj)\n\n class Hook:\n def __init__(self, context):\n self.context = context\n\n def __call__(self, obj, register):\n raise ResumeCopy()\n\n def _adapt(iface, obj):\n assert iface is ICopyHook\n return Hook(obj)\n _registerAdapterHook(_adapt)\n self.assertEqual(cp.id(obj), None)\n\n def test_id_w_hook_normal(self):\n from zope.copy.interfaces import ICopyHook\n obj = object()\n cp = self._makeOne(obj)\n\n class Hook:\n def __init__(self, context):\n self.context = context\n\n def __call__(self, obj, register):\n return None\n\n def _adapt(iface, obj):\n assert iface is ICopyHook\n return Hook(obj)\n _registerAdapterHook(_adapt)\n self.assertEqual(cp.id(obj), 1)\n obj2 = object()\n self.assertEqual(cp.id(obj2), 2)\n self.assertEqual(cp.pids_by_id, {id(obj): 1, id(obj2): 2})\n self.assertEqual(cp.others_by_pid, {1: None, 2: None})\n\n\ndef _registerAdapterHook(func):\n from zope.interface.interface import adapter_hooks\n adapter_hooks.insert(0, func)\n\n\ndef test_suite():\n return unittest.defaultTestLoader.loadTestsFromName(__name__)\n","sub_path":"src/zope/copy/tests/test_copy.py","file_name":"test_copy.py","file_ext":"py","file_size_in_byte":7761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"24859738","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# What is the largest prime factor of the number 600851475143 ?\nfrom functions import isprime\nprimes=[]\nnumber=600851475143\nfor i in range(2,10000):\n if isprime(i)==1:\n primes.append(i)\nfactor=[]\nfor i in primes:\n if number%i==0:\n factor.append(i)\nprint(factor[-1])\n","sub_path":"LargestPrimeFactor.py","file_name":"LargestPrimeFactor.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"568265058","text":"import math\r\nimport random\r\nfrom utils import pygame, display_width, display_height, gameDisplay, white\r\n\r\nsaucer_speed = 5\r\nbullet_speed = 15\r\nsnd_saucerB = pygame.mixer.Sound(\"Sounds/saucerBig.wav\")\r\nsnd_saucerS = pygame.mixer.Sound(\"Sounds/saucerSmall.wav\")\r\n\r\n\r\n# Create class saucer\r\nclass Saucer:\r\n def __init__(self):\r\n self.x = 0\r\n self.y = 0\r\n self.state = \"Dead\"\r\n self.type = \"Large\"\r\n self.dirchoice = ()\r\n self.bullets = []\r\n self.cd = 0\r\n self.bdir = 0\r\n self.soundDelay = 0\r\n\r\n def updateSaucer(self):\r\n # Move player\r\n self.x += saucer_speed * math.cos(self.dir * math.pi / 180)\r\n self.y += saucer_speed * math.sin(self.dir * math.pi / 180)\r\n\r\n # Choose random direction\r\n if random.randrange(0, 100) == 1:\r\n self.dir = random.choice(self.dirchoice)\r\n\r\n # Wrapping\r\n if self.y < 0:\r\n self.y = display_height\r\n elif self.y > display_height:\r\n self.y = 0\r\n if self.x < 0 or self.x > display_width:\r\n self.state = \"Dead\"\r\n\r\n # Shooting\r\n if self.type == \"Large\":\r\n self.bdir = random.randint(0, 360)\r\n if self.cd == 0:\r\n self.bullets.append(Bullet(self.x, self.y, self.bdir))\r\n self.cd = 30\r\n else:\r\n self.cd -= 1\r\n\r\n # Play SFX\r\n if self.type == \"Large\":\r\n pygame.mixer.Sound.play(snd_saucerB)\r\n else:\r\n pygame.mixer.Sound.play(snd_saucerS)\r\n\r\n def createSaucer(self):\r\n # Create saucer\r\n # Set state\r\n self.state = \"Alive\"\r\n\r\n # Set random position\r\n self.x = random.choice((0, display_width))\r\n self.y = random.randint(0, display_height)\r\n\r\n # Set random type\r\n if random.randint(0, 1) == 0:\r\n self.type = \"Large\"\r\n self.size = 20\r\n else:\r\n self.type = \"Small\"\r\n self.size = 10\r\n\r\n # Create random direction\r\n if self.x == 0:\r\n self.dir = 0\r\n self.dirchoice = (0, 45, -45)\r\n else:\r\n self.dir = 180\r\n self.dirchoice = (180, 135, -135)\r\n\r\n # Reset bullet cooldown\r\n self.cd = 0\r\n\r\n def drawSaucer(self):\r\n # Draw saucer\r\n pygame.draw.polygon(gameDisplay, white,\r\n ((self.x + self.size, self.y),\r\n (self.x + self.size / 2, self.y + self.size / 3),\r\n (self.x - self.size / 2, self.y + self.size / 3),\r\n (self.x - self.size, self.y),\r\n (self.x - self.size / 2, self.y - self.size / 3),\r\n (self.x + self.size / 2, self.y - self.size / 3)), 1)\r\n pygame.draw.line(gameDisplay, white,\r\n (self.x - self.size, self.y),\r\n (self.x + self.size, self.y))\r\n pygame.draw.polygon(gameDisplay, white,\r\n ((self.x - self.size / 2, self.y - self.size / 3),\r\n (self.x - self.size / 3, self.y - 2 * self.size / 3),\r\n (self.x + self.size / 3, self.y - 2 * self.size / 3),\r\n (self.x + self.size / 2, self.y - self.size / 3)), 1)\r\n\r\n\r\n# Create class bullet\r\nclass Bullet:\r\n def __init__(self, x, y, direction):\r\n self.x = x\r\n self.y = y\r\n self.dir = direction\r\n self.life = 30\r\n\r\n def updateBullet(self):\r\n # Moving\r\n self.x += bullet_speed * math.cos(self.dir * math.pi / 180)\r\n self.y += bullet_speed * math.sin(self.dir * math.pi / 180)\r\n\r\n # Drawing\r\n pygame.draw.circle(gameDisplay, white, (int(self.x), int(self.y)), 3)\r\n\r\n # Wrapping\r\n if self.x > display_width:\r\n self.x = 0\r\n elif self.x < 0:\r\n self.x = display_width\r\n elif self.y > display_height:\r\n self.y = 0\r\n elif self.y < 0:\r\n self.y = display_height\r\n self.life -= 1","sub_path":"astermax/saucer.py","file_name":"saucer.py","file_ext":"py","file_size_in_byte":4114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"127385973","text":"# Copyright 2018 The Sonnet Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Benchmark for various implementations of repeat ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\nimport numpy as np\n\nimport tensorflow as tf\nimport repeat_ops\n\nflags.DEFINE_integer(\"seed\", default=135, help=\"numpy seed value\")\nflags.DEFINE_integer(\n \"max_repeats\", default=2000, help=\"maximum entry in repeats\")\nflags.DEFINE_integer(\n \"size\", default=1000, help=\"size of inputs/repeats\")\nflags.DEFINE_integer(\n \"burn_iters\", default=200, help=\"number of iterations to burn-in\")\nflags.DEFINE_integer(\n \"min_iters\", default=10000, help=\"minimum number of iterations to benchmark\")\n\n\nFLAGS = flags.FLAGS\n\n\ndef repeat_base(repeats):\n \"\"\"Repeat using `tf.while_loop` with `Tensor` concatenation.\"\"\"\n values = tf.equal(tf.range(tf.size(repeats), dtype=tf.int32) % 2, 1)\n return repeat_ops.repeat(values, repeats)\n\n\ndef run_benchmark(f, seed, max_repeats, size, burn_iters, min_iters):\n np.random.seed(seed)\n repeats = np.random.randint(max_repeats, size=size).astype(np.int32)\n graph = tf.Graph()\n name = f.__name__\n with graph.as_default():\n with tf.device(\"/cpu:0\"):\n out = f(repeats)\n\n with tf.Session(graph=graph) as sess:\n tf.logging.info(\"Benchmarking %s\" % name)\n result = tf.test.Benchmark().run_op_benchmark(\n sess, out, burn_iters=burn_iters, min_iters=min_iters, name=name)\n result = {k: v for k, v in result.items()}\n result['time_per_iter'] = result['wall_time'] / result['iters']\n return result\n\n\ndef main(argv):\n results = []\n tf.logging.set_verbosity(tf.logging.INFO)\n for fn in (\n repeat_ops.binary_repeat,\n repeat_base,\n ):\n results.append(run_benchmark(\n fn,\n seed=FLAGS.seed,\n max_repeats=FLAGS.max_repeats,\n size=FLAGS.size,\n burn_iters=FLAGS.burn_iters,\n min_iters=FLAGS.min_iters)\n )\n\n best = min(results, key=lambda r: r[\"time_per_iter\"])\n best_time = best[\"time_per_iter\"]\n longest_name = max(len(r[\"name\"]) for r in results)\n for result in results:\n extra = \"(x %2f)\" % (result[\"time_per_iter\"] / best_time)\n name = result[\"name\"].ljust(longest_name)\n tf.logging.info(\"%s: %.3e %s\" % (name, result[\"time_per_iter\"], extra))\n\n tf.logging.info(\n \"best: %s, time: %.3e\" % (best[\"name\"], best[\"time_per_iter\"]))\n\n\nif __name__ == \"__main__\":\n app.run(main)\n","sub_path":"tf_repeat/python/ops/benchmark_binary_repeat.py","file_name":"benchmark_binary_repeat.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"331902221","text":"\"\"\"\n--- Ångström ---\nTests molecule delete.\n\"\"\"\nfrom angstrom import Molecule\nimport os\n\nbenzene_xyz = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'benzene.xyz')\n\n\ndef test_benzene_delete():\n \"\"\"Tests deleting atoms from benzene molecule\"\"\"\n benzene = Molecule(read=benzene_xyz)\n benzene.delete([0, 2, 4, 6, 8, 10])\n assert len(benzene.atoms) == 6\n assert all([a == 'H' for a in benzene.atoms])\n","sub_path":"tests/test_molecule_delete.py","file_name":"test_molecule_delete.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"432915199","text":"import pyaudio\nimport wave\n\nFORMAT = pyaudio.paInt16\nCHANNELS = 2\nRATE = 44100\nCHUNK = 1024\nRECORD_DURATION = 5\nWAVE_OUTPUT_FILENAME = \"file.wav\"\n\np = pyaudio.PyAudio()\n\n#Set up and start the stream\nstream = p.open(format = FORMAT,\n channels = CHANNELS,\n rate = RATE,\n input = True,\n frames_per_buffer = CHUNK)\n\nprint(\"Recording...\")\nframes = []#for saving signal data\nfor i in range(0,int(RATE/CHUNK*RECORD_DURATION)):\n data = stream.read(CHUNK)#Read the audio data from steam chunk by chunk, each chunk contains 1024 samples.\n frames.append(data)#save the data in frames\n\nprint(\"Done Recording\")\n\nstream.stop_stream() #Stop the stream\nstream.close() #close the stream\np.terminate()\n\n#Write the audio to file\nwaveFile = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\nwaveFile.setnchannels(CHANNELS)\nwaveFile.setsampwidth(p.get_sample_size(FORMAT))\nwaveFile.setframerate(RATE)\nwaveFile.writeframes(b''.join(frames))\nwaveFile.close()","sub_path":"2_SpeechSignalBasic/1_AudioRecording.py","file_name":"1_AudioRecording.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"143903300","text":"import numpy as np\nimport time\nimport tensorflow as tf\nimport attention_input\nfrom tensorflow.python.layers.core import Dense\n\nn_classes = 16\nmy_data = attention_input.Data_Control(\n './data/lipcontrol/cutdata11/', n_classes\n)\n#label集合需要添加三类label,分别是补全类“pad”,开始类“GO”, 结束类“EOS”\nX = my_data.traindata\nX = X.reshape(-1, my_data.traindata.shape[1], my_data.traindata.shape[2], my_data.traindata.shape[3], 1)\nY = my_data.trainlabel\nY = Y.astype(np.int)\nXlen = my_data.trainlen\nYlen = my_data.trainlabel_len\n\nXtest = my_data.testdata\nXtest = Xtest.reshape(-1, my_data.traindata.shape[1], my_data.traindata.shape[2], my_data.traindata.shape[3], 1)\nYtest = my_data.testlabel\nYtest = Ytest.astype(np.int)\nXtestlen = my_data.testlen\nYtestlen = my_data.testlabel_len\n\ndef edit_distance(word1, word2):\n len1 = len(word1)\n len2 = len(word2)\n dp = np.zeros((len1 + 1, len2 + 1))\n for i in range(len1 + 1):\n dp[i][0] = i\n for j in range(len2 + 1):\n dp[0][j] = j\n\n for i in range(1, len1 + 1):\n for j in range(1, len2 + 1):\n delta = 0 if word1[i - 1] == word2[j - 1] else 1\n dp[i][j] = min(dp[i - 1][j - 1] + delta, min(dp[i - 1][j] + 1, dp[i][j - 1] + 1))\n return dp[len1][len2]\n\n\n\ndef get_inputs():\n '''\n 模型输入tensor\n '''\n xs = tf.placeholder(tf.float32,\n [None, my_data.traindata.shape[1], my_data.traindata.shape[2], my_data.traindata.shape[3], 1])\n\n inputs = tf.reshape(xs, [-1, my_data.traindata.shape[2], my_data.traindata.shape[3], 1], name='inputs')\n targets = tf.placeholder(tf.int32, [None, my_data.alllabel.shape[1]], name='targets')\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n istraining = tf.placeholder(tf.bool)\n\n # 定义target序列最大长度(之后target_sequence_length和source_sequence_length会作为feed_dict的参数)\n target_sequence_length = tf.placeholder(tf.int32, (None,), name='target_sequence_length')\n max_target_sequence_length = Y.shape[1]\n source_sequence_length = tf.placeholder(tf.int32, (None,), name='source_sequence_length')\n\n return xs, inputs, targets, learning_rate, target_sequence_length, max_target_sequence_length, source_sequence_length, istraining\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 3, 1, 1], strides=[1, 3, 1, 1], padding='SAME')\n\n\ndef bn_layer(inputs, phase_train, scope=None):\n # 定义Batch Normalization layer\n return tf.cond(phase_train,\n lambda: tf.contrib.layers.batch_norm(inputs, decay=0.9, is_training=True, scale=True,\n updates_collections=None, scope=scope),\n lambda: tf.contrib.layers.batch_norm(inputs, decay=0.9, is_training=False, scale=True,\n updates_collections=None, scope=scope, reuse=True))\n\ndef get_encoder_layer(input_data, rnn_size, num_layers,\n source_sequence_length, istraining, keep_prob):\n '''\n 构造Encoder层,包含CNN和LSTM\n\n 参数说明:\n # - input_data: 输入tensor\n # - rnn_size: rnn隐层结点数量\n # - num_layers: 堆叠的rnn cell数量\n # - source_sequence_length: 源数据的序列长度\n # - istraining: 是否训练,用于BN\n # '''\n\n # CNN\n\n W_conv1 = weight_variable([5, 6, 1, 8])\n b_conv1 = bias_variable([8])\n conv1 = conv2d(input_data, W_conv1) + b_conv1\n BN_out1 = bn_layer(conv1, istraining, scope='BN1')\n h_conv1 = tf.nn.relu(BN_out1)\n h_pool1 = max_pool_2x2(h_conv1)\n\n w_conv2 = weight_variable([5, 3, 8, 16])\n b_conv2 = bias_variable([16])\n conv2 = conv2d(h_pool1, w_conv2) + b_conv2\n BN_out2 = bn_layer(conv2, istraining, scope='BN2')\n h_conv2 = tf.nn.relu(BN_out2)\n h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 3, 2, 1], strides=[1, 3, 2, 1], padding='SAME')\n\n w_conv3 = weight_variable([3, 3, 16, 32])\n b_conv3 = bias_variable([32])\n conv3 = conv2d(h_pool2, w_conv3) + b_conv3\n BN_out3 = bn_layer(conv3, istraining, scope='BN3')\n h_conv3 = tf.nn.relu(BN_out3)\n h_pool3 = tf.nn.max_pool(h_conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n h_conv = h_pool3\n h_conv_shape = h_conv.get_shape().as_list()\n\n full_units = 32\n W_fc1 = weight_variable([h_conv_shape[1] * h_conv_shape[2] * h_conv_shape[3], full_units])\n b_fc1 = bias_variable([full_units])\n h_flat = tf.reshape(h_conv, [-1, h_conv_shape[1] * h_conv_shape[2] * h_conv_shape[3]])\n h_fc1 = tf.nn.relu(tf.matmul(h_flat, W_fc1) + b_fc1)\n convout = tf.reshape(h_fc1, [-1, my_data.traindata.shape[1], full_units])\n def get_lstm_cell(rnn_size):\n lstm_cell = tf.contrib.rnn.LSTMCell(rnn_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))\n return tf.contrib.rnn.DropoutWrapper(lstm_cell, output_keep_prob=keep_prob)\n\n # cell = tf.contrib.rnn.MultiRNNCell([get_lstm_cell(rnn_size[i]) for i in range(num_layers)])\n #\n # encoder_output, _ = tf.nn.dynamic_rnn(cell, convout,\n # sequence_length=source_sequence_length, dtype=tf.float32)\n\n encoder_output, _, _ = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(\n cells_fw=[get_lstm_cell(rnn_size[i]) for i in range(num_layers)],\n cells_bw=[get_lstm_cell(rnn_size[i]) for i in range(num_layers)],\n inputs=convout,\n sequence_length=source_sequence_length,\n time_major=False,\n dtype=tf.float32)\n return encoder_output\n\ndef process_decoder_input(data):\n '''\n 补充,并移除最后一个字符\n '''\n # cut掉最后一个字符\n ending = tf.strided_slice(data, [0, 0], [tf.shape(data)[0], -1], [1, 1])\n decoder_input = tf.concat([tf.fill([tf.shape(data)[0], 1], n_classes+1), ending], 1)\n\n return decoder_input\n\n\ndef decoding_layer(decoding_embedding_size, num_layers, rnn_size,\n target_sequence_length, max_target_sequence_length, encoder_output, decoder_input, keep_prob, source_sequence_length):\n '''\n 构造Decoder层\n\n 参数:\n - decoding_embedding_size: embed向量大小\n - num_layers: 堆叠的RNN单元数量\n - rnn_size: RNN单元的隐层结点数量\n - target_sequence_length: target数据序列长度\n - max_target_sequence_length: target数据序列最大长度\n - encoder_state: encoder端编码的状态向量\n - decoder_input: decoder端输入\n '''\n # 1. Embedding\n decoder_embeddings = tf.Variable(tf.random_uniform([n_classes+3, decoding_embedding_size]))\n decoder_embed_input = tf.nn.embedding_lookup(decoder_embeddings, decoder_input)\n\n # 2. 构造Decoder中的RNN单元\n def get_decoder_cell(rnn_size):\n decoder_cell = tf.contrib.rnn.LSTMCell(rnn_size,\n initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))\n return tf.contrib.rnn.DropoutWrapper(decoder_cell, output_keep_prob=keep_prob)\n\n\n\n def get_attention_cell(rnn_size):\n attention_mechanism = tf.contrib.seq2seq.LuongAttention(num_units=rnn_size, memory=encoder_output, memory_sequence_length=source_sequence_length)\n cell = get_decoder_cell(rnn_size)\n attn_cell = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanism, attention_layer_size=rnn_size,\n output_attention=True)\n return attn_cell\n\n cell = tf.contrib.rnn.MultiRNNCell([get_attention_cell(rnn_size[i]) for i in range(num_layers)])\n # 3. Output全连接层\n output_layer = Dense(n_classes+3,\n kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))\n\n # 4. Training decoder\n with tf.variable_scope(\"decode\"):\n # 得到help对象\n training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=decoder_embed_input,\n sequence_length=target_sequence_length,\n time_major=False)\n # 构造decoder\n training_decoder = tf.contrib.seq2seq.BasicDecoder(cell,\n training_helper,\n cell.zero_state(dtype=tf.float32, batch_size=tf.shape(xs)[0]),\n output_layer)\n training_decoder_output = tf.contrib.seq2seq.dynamic_decode(training_decoder,\n impute_finished=True,\n maximum_iterations=max_target_sequence_length)\n # attention_matrices = final_state.alignment_history.stack(\n # name=\"train_attention_matrix\")\n # 5. Predicting decoder\n # 与training共享参数\n with tf.variable_scope(\"decode\", reuse=True):\n # 创建一个常量tensor并复制为batch_size的大小\n start_tokens = tf.tile(tf.constant([n_classes+1], dtype=tf.int32), [tf.shape(xs)[0]],\n name='start_tokens')\n predicting_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(decoder_embeddings,\n start_tokens,\n n_classes + 2)\n predicting_decoder = tf.contrib.seq2seq.BasicDecoder(cell,\n predicting_helper,\n cell.zero_state(dtype=tf.float32,\n batch_size=tf.shape(xs)[0]),\n output_layer)\n predicting_decoder_output = tf.contrib.seq2seq.dynamic_decode(predicting_decoder,\n impute_finished=True,\n maximum_iterations=max_target_sequence_length)\n\n return training_decoder_output, predicting_decoder_output\n\n\ndef seq2seq_model(input_data, targets, lr, target_sequence_length,\n max_target_sequence_length, source_sequence_length,\n istraining, decoding_embedding_size,\n rnn_size, num_layers, keep_prob):\n # 获取encoder的状态输出\n encoder_output = get_encoder_layer(input_data,\n rnn_size,\n num_layers,\n source_sequence_length,\n istraining,\n keep_prob)\n\n # 预处理后的decoder输入\n decoder_input = process_decoder_input(targets)\n\n # 将状态向量与输入传递给decoder\n training_decoder_output, predicting_decoder_output = decoding_layer(decoding_embedding_size,\n num_layers,\n rnn_size,\n target_sequence_length,\n max_target_sequence_length,\n encoder_output,\n decoder_input,\n keep_prob,\n source_sequence_length)\n\n return training_decoder_output, predicting_decoder_output\n\n\n# Batch Size\nbatch_size = 32\n# RNN Size\nrnn_size = [128, 128]\n# Number of Layers\nnum_layers = 2\n# Embedding Size\ndecoding_embedding_size = 15\n# Learning Rate\nlearning_rate = 0.001\n\n# 构造graph\ntrain_graph = tf.Graph()\n\nwith train_graph.as_default():\n # 获得模型输入\n xs, input_data, targets, lr, target_sequence_length, max_target_sequence_length, source_sequence_length, istraining = get_inputs()\n keep_prob = tf.placeholder(tf.float32)\n training_decoder_output, predicting_decoder_output = seq2seq_model(input_data,\n targets,\n lr,\n target_sequence_length,\n max_target_sequence_length,\n source_sequence_length,\n istraining,\n decoding_embedding_size,\n rnn_size,\n num_layers,\n keep_prob)\n\n training_logits = tf.identity(training_decoder_output[0].rnn_output, 'logits')\n predicting_logits = tf.identity(predicting_decoder_output[0].rnn_output, name='predictions_logits')\n predicting_results = tf.identity(predicting_decoder_output[0].sample_id, name='predictions')\n\n masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')\n # accuracy = tf.contrib.metrics.accuracy(targets, predicting_logits, weights=masks, name='accuracy')\n with tf.name_scope(\"optimization\"):\n # Loss function\n cost = tf.contrib.seq2seq.sequence_loss(\n training_logits,\n targets,\n masks)\n\n # Optimizer\n optimizer = tf.train.AdamOptimizer()\n train_op = optimizer.minimize(cost)\n\n\n\ndef generatebatch(X,step, batch_size):\n start = (step * batch_size) % len(X)\n if start + batch_size >len(X):\n start = ((step+1) * batch_size) % len(X)\n\n end = min(start + batch_size,len(X))\n batch_xs = X[start:end]\n return batch_xs # 生成每一个batch\n\n\ndef get_accuracy(pre_result, target):\n edit_dis = []\n for i in range(0, target.shape[0]):\n pred = []\n for j in pre_result[i]:\n if j == n_classes or j == n_classes+2:\n break\n pred.append(j)\n tar = [j for j in target[i] if j != n_classes and j != n_classes+2]\n edit_dis.append(edit_distance(pred, tar) / len(tar))\n accuracy = 1 - np.mean(edit_dis)\n return accuracy\n# config = tf.ConfigProto(log_device_placement=True)\n# config.gpu_options.allow_growth = True\nwith tf.Session(graph=train_graph) as sess:\n sess.run(tf.global_variables_initializer())\n start_time = time.time()\n last_time = start_time\n best_step = 0\n best_acc = 0\n for step in range(0, 60000):\n sources_batch = generatebatch(X, step, batch_size)\n targets_batch = generatebatch(Y, step, batch_size)\n sources_lengths = generatebatch(Xlen, step, batch_size)\n targets_lengths = generatebatch(Ylen, step, batch_size)\n\n _, loss, pre_result, tra_logits, pre_logits = sess.run(\n [train_op, cost, predicting_results, training_logits, predicting_logits],\n {xs: sources_batch,\n targets: targets_batch,\n lr: learning_rate,\n istraining: True,\n target_sequence_length: targets_lengths,\n source_sequence_length: sources_lengths,\n keep_prob: 0.8})\n\n if step % 40 == 0:\n # 计算validation loss\n tra_acc = get_accuracy(pre_result, targets_batch)\n test_result, tra_logits, test_logits = sess.run(\n [predicting_results, training_logits, predicting_logits],\n {xs: Xtest,\n targets: Ytest,\n lr: learning_rate,\n istraining: False,\n target_sequence_length: Ytestlen,\n source_sequence_length: Xtestlen,\n keep_prob: 1})\n test_acc = get_accuracy(test_result, Ytest)\n now_time = time.time()\n duration1 = now_time - start_time\n duration2 = now_time - last_time\n last_time = now_time\n print(\"step %d, %0f (%0f)s, loss %g, tracc %g, teacc %g\" % (\n step, duration1, duration2, loss, tra_acc, test_acc))\n if test_acc >= best_acc:\n best_acc = test_acc\n best_step = step\n if step % 400 == 0:\n print(\"beststep %d, bestacc %g\" % (best_step, best_acc))\n","sub_path":"attention_sentence.py","file_name":"attention_sentence.py","file_ext":"py","file_size_in_byte":17215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"404834838","text":"from datetime import datetime\n\nimport pytest\nfrom arroyo.backends.kafka import KafkaPayload\nfrom arroyo.types import BrokerValue, Message, Partition, Topic\n\nfrom sentry.conf.server import (\n KAFKA_CLUSTERS,\n KAFKA_SNUBA_GENERIC_METRICS,\n SENTRY_SLICING_CONFIG,\n SENTRY_SLICING_LOGICAL_PARTITION_COUNT,\n SLICED_KAFKA_TOPICS,\n)\nfrom sentry.sentry_metrics.consumers.indexer.routing_producer import RoutingPayload\nfrom sentry.sentry_metrics.consumers.indexer.slicing_router import (\n MissingOrgInRoutingHeader,\n SlicingConfigurationException,\n SlicingRouter,\n _validate_slicing_consumer_config,\n)\n\n\n@pytest.fixture\ndef metrics_message(org_id: int) -> Message[RoutingPayload]:\n return Message(\n BrokerValue(\n payload=RoutingPayload(\n routing_header={\"org_id\": org_id},\n routing_message=KafkaPayload(\n key=b\"\",\n value=b\"{}\",\n headers=[],\n ),\n ),\n partition=Partition(Topic(\"source_topic\"), 0),\n offset=0,\n timestamp=datetime.now(),\n )\n )\n\n\n@pytest.fixture\ndef setup_slicing(monkeypatch) -> None:\n monkeypatch.setitem(\n SENTRY_SLICING_CONFIG, \"sliceable\", {(0, 128): 0, (128, 256): 1} # type: ignore\n )\n monkeypatch.setitem(\n SLICED_KAFKA_TOPICS, # type: ignore\n (KAFKA_SNUBA_GENERIC_METRICS, 0),\n {\"topic\": \"sliced_topic_0\", \"cluster\": \"sliceable_0\"},\n )\n monkeypatch.setitem(\n SLICED_KAFKA_TOPICS, # type: ignore\n (KAFKA_SNUBA_GENERIC_METRICS, 1),\n {\"topic\": \"sliced_topic_1\", \"cluster\": \"sliceable_1\"},\n )\n monkeypatch.setitem(\n KAFKA_CLUSTERS,\n \"sliceable_0\",\n {\"bootstrap.servers\": \"127.0.0.1:9092\"},\n )\n monkeypatch.setitem(\n KAFKA_CLUSTERS,\n \"sliceable_1\",\n {\"bootstrap.servers\": \"127.0.0.1:9092\"},\n )\n\n\n@pytest.mark.parametrize(\"org_id\", [1, 127, 128, 256, 257])\ndef test_with_slicing(metrics_message, setup_slicing) -> None:\n \"\"\"\n With partitioning settings, the SlicingRouter should route to the correct topic\n based on the org_id header.\n \"\"\"\n org_id = metrics_message.payload.routing_header.get(\"org_id\")\n router = SlicingRouter(\"sliceable\")\n route = router.get_route_for_message(metrics_message)\n if int(org_id) % SENTRY_SLICING_LOGICAL_PARTITION_COUNT < 128:\n assert route.topic.name == \"sliced_topic_0\"\n elif int(org_id) % SENTRY_SLICING_LOGICAL_PARTITION_COUNT < 256:\n assert route.topic.name == \"sliced_topic_1\"\n else:\n assert False, \"unexpected org_id\"\n\n\ndef test_with_no_org_in_routing_header(setup_slicing) -> None:\n \"\"\"\n With partitioning settings, the SlicingRouter should route to the correct topic\n based on the org_id header.\n \"\"\"\n message = Message(\n BrokerValue(\n payload=RoutingPayload(\n routing_header={},\n routing_message=KafkaPayload(\n key=b\"\",\n value=b\"{}\",\n headers=[],\n ),\n ),\n partition=Partition(Topic(\"source_topic\"), 0),\n offset=0,\n timestamp=datetime.now(),\n )\n )\n assert message.payload.routing_header.get(\"org_id\") is None\n router = SlicingRouter(\"sliceable\")\n with pytest.raises(MissingOrgInRoutingHeader):\n _ = router.get_route_for_message(message)\n\n\n@pytest.mark.parametrize(\"org_id\", [100])\ndef test_with_misconfiguration(metrics_message, monkeypatch):\n \"\"\"\n Configuring topic override only does not kick in routing logic. So the\n messages should be routed to the logical topic.\n \"\"\"\n monkeypatch.setitem(\n SLICED_KAFKA_TOPICS, # type: ignore\n (KAFKA_SNUBA_GENERIC_METRICS, 0),\n {\"topic\": \"sliced_topic_0\"},\n )\n monkeypatch.setitem(\n SLICED_KAFKA_TOPICS, # type: ignore\n (KAFKA_SNUBA_GENERIC_METRICS, 1),\n {\"topic\": \"sliced_topic_1\"},\n )\n\n with pytest.raises(SlicingConfigurationException):\n _ = SlicingRouter(\"sliceable\")\n\n\ndef test_validate_slicing_consumer_config(monkeypatch) -> None:\n \"\"\"\n Validate that the slicing consumer config is valid.\n \"\"\"\n with pytest.raises(\n SlicingConfigurationException, match=r\"not defined in settings.SENTRY_SLICING_CONFIG\"\n ):\n _validate_slicing_consumer_config(\"sliceable\")\n\n # Let the check for slicing config pass\n monkeypatch.setitem(\n SENTRY_SLICING_CONFIG, \"sliceable\", {(0, 128): 0, (128, 256): 1} # type: ignore\n )\n\n # Create the sliced kafka topics but omit defining the broker config in\n # KAFKA_CLUSTERS\n monkeypatch.setitem(\n SLICED_KAFKA_TOPICS, # type: ignore\n (\"sliceable\", 0),\n {\"topic\": \"sliced_topic_0\", \"cluster\": \"sliceable_0\"},\n )\n monkeypatch.setitem(\n SLICED_KAFKA_TOPICS, # type: ignore\n (\"sliceable\", 1),\n {\"topic\": \"sliced_topic_1\", \"cluster\": \"sliceable_1\"},\n )\n monkeypatch.setitem(\n KAFKA_CLUSTERS, # type: ignore\n \"sliceable_0\",\n {\"bootstrap.servers\": \"127.0.0.1:9092\"},\n )\n with pytest.raises(SlicingConfigurationException, match=r\"Broker configuration missing\"):\n _validate_slicing_consumer_config(\"sliceable\")\n\n # Now add the broker config for the second slice\n monkeypatch.setitem(\n KAFKA_CLUSTERS, # type: ignore\n \"sliceable_1\",\n {\"bootstrap.servers\": \"127.0.0.1:9092\"},\n )\n\n try:\n _validate_slicing_consumer_config(\"sliceable\")\n except SlicingConfigurationException as e:\n assert False, f\"Should not raise exception: {e}\"\n","sub_path":"tests/sentry/sentry_metrics/consumers/test_slicing_router.py","file_name":"test_slicing_router.py","file_ext":"py","file_size_in_byte":5692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"530833257","text":"from discord.ext import commands\nimport random\n\n\nclass Caesar:\n \"\"\"\n This class contains methods to encrypt and decrypt a message.\n It can only decrypt messages that it created.\n\n Attributes\n ----------\n message : str\n The message that is encrypted or decrypted\n factors : dict of str: int\n Random values that will be used to shift letters to the right (adding to ASCII value).\n conversion : dict of str: str\n Corresponding ASCII characters for nums 0-9 to shift back to the left (subtracting to ASCII value).\n factor : int\n Random value from factors that determines how much characters will be shifted to the right.\n Always in the range 0-9 (inclusive).\n times : int\n Random value that determines how many times the message will be encrypted or decrypted.\n Always in the range 0-9 (inclusive).\n This will always be the first letter of the message\n curr_factor : int\n Tracks the current factor for the sophisticated decrypt (when each letter is encrypted with a different factor).\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Creates empty versions of all of the variables.\n Sets up factors and conversions that will shift letters left and right.\n \"\"\"\n\n self.message = \"\"\n self.factors = {\"0\": 115, \"1\": 311, \"2\": 291, \"3\": 424, \"4\": 450,\n \"5\": 192, \"6\": 419, \"7\": 276, \"8\": 345, \"9\": 269}\n self.conversion = {\"£\": \"0\", \"Ũ\": \"1\", \"ŕ\": \"2\", \"Ǜ\": \"3\", \"Ƕ\": \"4\",\n \"õ\": \"5\", \"Ǚ\": \"6\", \"ŋ\": \"7\", \"Ƒ\": \"8\", \"ņ\": \"9\"}\n self.factor = 0\n self.times = 1\n self.curr_factor = 0\n\n def gen_factor(self):\n \"\"\"\n Randomly creates a factor and sets the instance variable.\n\n Returns\n -------\n None\n \"\"\"\n\n rand_digit = str(random.randint(0, 9))\n self.message = rand_digit + self.message\n self.factor = self.factors[rand_digit]\n\n def find_factor(self):\n \"\"\"\n Finds a factor by using the conversion dictionary.\n Stores it in the factor instance variable.\n\n Returns\n -------\n None\n \"\"\"\n\n rand_digit = self.message[0]\n rand_digit = self.conversion[rand_digit]\n self.message = self.message[1:]\n self.factor = self.factors[rand_digit]\n\n def encrypt(self):\n \"\"\"\n Shifts each character in a message by the factor to the right.\n Sets the message instance variable.\n\n Returns\n -------\n None\n \"\"\"\n\n encrypted_message = \"\"\n for letter in self.message:\n char_val = ord(letter)\n char_val += self.factor\n encrypted_message += str(chr(char_val))\n self.message = encrypted_message\n\n def decrypt(self):\n \"\"\"\n Shifts each character in the message to the left using the current factor.\n A factor must first be found in order to decrypt the message.\n Sets the message instance variable.\n\n Returns\n -------\n None\n \"\"\"\n\n self.times = int(self.conversion[self.message[0]])\n self.message = self.message[1:]\n for _ in range(self.times):\n decrypted_message = \"\"\n self.find_factor()\n for letter in self.message:\n char_val = ord(letter)\n char_val -= self.factor\n try:\n decrypted_message += str(chr(char_val))\n except ValueError:\n pass\n self.message = decrypted_message\n\n def soph_encrypt(self):\n \"\"\"\n encrypts each letter with a periodically changing factor.\n Sets the message instance variable.\n Periodically changes the factor instance variable.\n\n Returns\n -------\n None\n \"\"\"\n\n self.curr_factor = str(random.randint(0, 9))\n self.factor = self.factors[self.curr_factor]\n self.message = self.curr_factor + self.message\n encrypted_message = \"\"\n for letter in self.message:\n char_val = ord(letter)\n char_val += self.factor\n encrypted_message += str(chr(char_val))\n self.increment_factor()\n self.message = encrypted_message\n\n def soph_decrypt(self):\n \"\"\"\n Decrypts each letter with a periodically changing factor.\n Sets the message instance variable.\n Periodically changes the factor instance variable.\n\n Returns\n -------\n None\n \"\"\"\n\n self.curr_factor = self.conversion[self.message[0]]\n self.factor = self.factors[self.curr_factor]\n self.message = self.message[1:]\n self.increment_factor()\n decrypted_message = \"\"\n for letter in self.message:\n char_val = ord(letter)\n char_val -= self.factor\n decrypted_message += str(chr(char_val))\n self.increment_factor()\n self.message = decrypted_message\n\n def increment_factor(self):\n \"\"\"\n Increases the factor by 1, wrapping it back to 0 if the current factor is 9.\n\n Returns\n -------\n None\n \"\"\"\n\n if self.curr_factor == \"9\":\n self.curr_factor = \"0\"\n else:\n self.curr_factor = int(self.curr_factor)\n self.curr_factor += 1\n self.curr_factor = str(self.curr_factor)\n self.factor = self.factors[self.curr_factor]\n\n\nclass Encrypting:\n \"\"\"\n This bot has its own encrypting system that it can process.\n \"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n print(\"Encrypt cog loaded.\")\n\n @commands.command(aliases=[\"encode\"])\n async def encrypt(self, ctx, *, message):\n \"\"\"\n Encrypts a message.\n\n Format\n ------\n -encrypt \n -encode \n\n Examples\n --------\n -encrypt You can't understand me mwahaha >:)\n \"\"\"\n\n cipher = Caesar()\n cipher.message = message.strip()\n cipher.times = random.randint(1, 9)\n for _ in range(cipher.times):\n cipher.gen_factor()\n cipher.encrypt()\n times_converted = chr(ord(str(cipher.times)) + cipher.factors[str(cipher.times)])\n cipher.message = times_converted + cipher.message\n cipher.soph_encrypt()\n await ctx.send(cipher.message)\n\n @commands.command(aliases=[\"decode\"])\n async def decrypt(self, ctx, *, message):\n \"\"\"\n Decrypts a message.\n\n Must be a message encrypted by this bot.\n\n Format\n ------\n -decrypt \n\n Examples\n --------\n -decrypt õ˩ɟϾНҐԭٺݪ࢟ࢱদࣵঀࣧन॔॑ৗঢ়ࣦ৉ेळफशग़ो৉জ࣮৅षআࣧरय़ॐ৖ৡࣛু࣮भࣧफ़ॖং৯࣯৊ऻॸहࣧख़ौ৏ৡࣱৌीॾࣧभॖूৎ৯ࣜাलঀनव\n \"\"\"\n\n cipher = Caesar()\n cipher.message = message.strip()\n try:\n cipher.soph_decrypt()\n cipher.decrypt()\n await ctx.send(cipher.message)\n except KeyError:\n await ctx.send(\">>Message was invalid. Perhaps you copy/pasted incorrectly?\")\n\n\ndef setup(bot):\n bot.add_cog(Encrypting(bot))\n","sub_path":"cogs_bots/general_bot/cogs/encrypt.py","file_name":"encrypt.py","file_ext":"py","file_size_in_byte":7324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"353509813","text":"from django.conf import settings\nfrom django.conf.urls import patterns, include, url\nfrom django.conf.urls.static import static\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'web.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n # url(r'^admin/', include(admin.site.urls)),\n url(r'^$', 'web.views.home', name='home'),\n url(r'^contact/', 'web.views.contact', name='contact'),\n url(r'^about/', 'web.views.about', name='about'),\n url(r'^vendor/(\\d+)/$', 'web.views.vendor', name='vendor'),\n url(r'^register/', 'web.views.register', name='register'),\n url(r'^logout/', 'web.views.logout', name='logout'),\n url(r'^login/', 'web.views.login', name='login'),\n url(r'^search/', 'web.views.search', name='search'),\n url(r'^vendor/entree/new', 'web.vendor.newEntree', name='newEntree'),\n )\n\n# only do this in development, not in production\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n # urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"web/web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"207057590","text":"__author__ = 'Yas'\nfrom Tkinter import *\nimport Tkinter as tk\nfrom PIL import Image, ImageTk\nimport os\n'''\nThis is the main funtions of the system and this have maily 4 functional options. They are:\n (1)---Lecture Progress=> This shows the how sucesseful of the system\n (2)---Enrollment Approval => This is the application of hidden markcov model.\n (3)---Student Data => This includes the graphical view of student progress and predidted valu of student athe exam\n (4)---Show Bach Result => This include the individula graph of the students in all bach and finala result of students at the examination\n (5)---Make Warnings => This views the students who gets the lowers predicted marks which is below 35\n'''\nclass HomeWindow():\n def __init__(self, root):\n self.root = root\n self.root.title('Home Page')\n\n # pick an image file you have .bmp .jpg .gif. .png\n # load the file and covert it to a Tkinter image object\n self.imageFile = \"logo3.jpg\"\n self.image1 = ImageTk.PhotoImage(Image.open(self.imageFile))\n\n # get the image size\n self.w = self.image1.width()\n self.h = self.image1.height()\n\n # position coordinates of root 'upper left corner'\n self.x = 300\n self.y = 20\n\n # make the root window the size of the image\n root.geometry(\"%dx%d+%d+%d\" % (self.w, self.h, self.x, self.y))\n\n # root has no image argument, so use a label as a panel\n self.panel1 = tk.Label(self.root, image=self.image1)\n self.panel1.pack(fill='both', expand='yes')\n\n # put a button on the image panel to test it\n self.button6 = tk.Button(self.panel1,width=20,borderwidth=2,foreground=\"dark red\",text='Help',command= self.helloCallBack7)\n self.button6.pack(side=BOTTOM,pady=5)\n self.button6 = tk.Button(self.panel1,width=20,borderwidth=2,foreground=\"dark red\", text='Exit App',command=root.quit())\n self.button6.pack(side=BOTTOM,pady=5)\n self.button6 = tk.Button(self.panel1,width=20,borderwidth=2,foreground=\"dark red\",text='Back',command= self.helloCallBack6)\n self.button6.pack(side=BOTTOM,pady=5)\n self.button2 = tk.Button(self.panel1,width=20,borderwidth=2,foreground=\"dark red\", text='Lecture Progress',command= self.helloCallBack5)\n self.button2.pack(side=BOTTOM,pady=5)\n self.button3 = tk.Button(self.panel1,width=20,borderwidth=2,foreground=\"dark red\", text='Enrollment Approval',command= self.helloCallBack4)\n self.button3.pack(side=BOTTOM,pady=5)\n self.button4 = tk.Button(self.panel1,width=20,borderwidth=2,foreground=\"dark red\", text='Student Data',command= self.helloCallBack3)\n self.button4.pack(side=BOTTOM,pady=5)\n self.button6 = tk.Button(self.panel1,width=20,borderwidth=2,foreground=\"dark red\",text='Show Bach Result',command= self.helloCallBack1)\n self.button6.pack(side=BOTTOM,pady=5)\n self.button6 = tk.Button(self.panel1,width=20,borderwidth=2,foreground=\"dark red\",text='Make Warnings',command= self.helloCallBack)\n self.button6.pack(side=BOTTOM,pady=5)\n\n #set actions to the buttons\n\n def helloCallBack(self):\n self.root.withdraw()\n os.system('python ViewsWarnings.py')\n def helloCallBack1(self):\n self.root.withdraw()\n os.system('python win_Batch.py')\n def helloCallBack3(self):\n self.root.withdraw()\n os.system('python win_StudentData.py')\n def helloCallBack4(self):\n self.root.withdraw()\n os.system('python win1_Mac.py')\n def helloCallBack5(self):\n self.root.withdraw()\n os.system('python win_feedBack.py')\n def helloCallBack6(self):\n self.root.withdraw()\n os.system('python win_HomeWin1.py')\n def helloCallBack7(self):\n self.root.withdraw()\n os.system('python win2_Help.py')\n# run the system\nroot = tk.Tk()\nmy_gui = HomeWindow(root)\nroot.mainloop()\n","sub_path":"final_SFT/win_HomeWindow.py","file_name":"win_HomeWindow.py","file_ext":"py","file_size_in_byte":3917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"31903041","text":"from scipy.optimize import fsolve\nfrom math import sin\n\n\n# f计算方程组的误差,x是一组可能的解\ndef f(x):\n # 转换为标准的浮点数列表\n # tolist()将x转换为Python的标准浮点数列表,\n # 在单个数值运算时, 标准浮点数比NumPy的浮点类型更快,从而缩短计算时间\n x0, x1, x2 = x.tolist()\n return [5 * x1 + 3,\n 4 * x0 * x0 - 2 * sin(x1 * x2),\n x1 * x2 - 1.5]\n\n\n# [1,1,1]是未知数的初始值\nresult = fsolve(f, [1, 1, 1])\n# 输出方程组的解\nprint(result)\n# 输出误差\nprint(f(result))\n","sub_path":"Visualization/48. 非线性方程组求解.py","file_name":"48. 非线性方程组求解.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"409236625","text":"import requests\nfrom requests.exceptions import ConnectionError\n\nfrom requests_mock import Adapter\nfrom requests_mock import MockerCore\nfrom requests_mock.exceptions import NoMockAddress\n\n\nclass HttpAdapter(Adapter):\n\n def get_rules(self):\n return self._matchers\n\n def reset(self):\n self._matchers = []\n\n\n_adapter = HttpAdapter()\n\n\nclass HttpMock(MockerCore):\n\n def __init__(self, *args, **kwargs):\n super(HttpMock, self).__init__(*args, **kwargs)\n self._adapter = _adapter\n\n def is_started(self):\n return self._real_send\n\n def set_allow_external(self, allow):\n \"\"\"Set flag to authorize external calls when no matching mock.\n\n Will raise a ConnectionError otherwhise.\n \"\"\"\n self._real_http = allow\n\n def _patch_real_send(self):\n\n _fake_send = requests.Session.send\n\n def _patched_fake_send(session, request, **kwargs):\n try:\n return _fake_send(session, request, **kwargs)\n except NoMockAddress:\n request = _adapter.last_request\n error_msg = 'Connection refused: {0} {1}'.format(\n request.method,\n request.url\n )\n response = ConnectionError(error_msg)\n response.request = request\n raise response\n\n requests.Session.send = _patched_fake_send\n\n def start(self):\n \"\"\"Overrides default start behaviour by raising ConnectionError instead\n of custom requests_mock.exceptions.NoMockAddress.\n \"\"\"\n super(HttpMock, self).start()\n self._patch_real_send()\n\n\n_http_mock = HttpMock()\n\n__all__ = []\n\n# expose mocker instance public methods\nfor __attr in [a for a in dir(_http_mock) if not a.startswith('_')]:\n __all__.append(__attr)\n globals()[__attr] = getattr(_http_mock, __attr)\n\n# expose adapter instance public methods\nfor __attr in [a for a in dir(_adapter) if not a.startswith('_')]:\n __all__.append(__attr)\n globals()[__attr] = getattr(_adapter, __attr)\n","sub_path":"mock_services/http_mock.py","file_name":"http_mock.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"169998994","text":"# -*- coding: utf-8 -*-\nfrom dash.dependencies import Input, Output\n\nfrom views.index import view_index\n\nurls = {\n '/': view_index,\n '/index': view_index\n}\n\n\ndef register_routes(app):\n \"\"\" Add callback to register app routes \"\"\"\n @app.callback(Output('page-content', 'children'),\n [Input('url', 'pathname')])\n def display_page(pathname):\n if pathname in urls:\n return urls[pathname].layout\n return '404'\n","sub_path":"src/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"230257132","text":"import math\n\ndef slope_intercept_form(a, b):\n \"\"\"Returns line made of two points\"\"\"\n (x0, y0) = a\n (x1, y1) = b\n m = (y1-y0)/(x1-x0)\n b = y0 - m * x0\n return (m, b)\n\ndef normal(point):\n \"\"\"Returns normal line in slope-intercept form at the point of incidence\"\"\"\n (x, y) = point\n return (y/x/4, 3*y/4)\n\ndef reflect_point_across_line(point, line):\n (x, y) = point\n (m, b) = line\n d = (x + m*(y-b))/(1 + m*m)\n return (2*d-x, 2*d*m-y+2*b)\n\ndef quadratic_roots(a, b, c):\n root = math.sqrt(b*b-4*a*c)\n return [(-b+root)/2/a, (-b-root)/2/a]\n\ndef intersection(point, line):\n \"\"\"Returns point of intersection of a given line and the ellipse\"\"\"\n (x, y) = point\n (m, b) = line\n [r0, r1] = quadratic_roots(4+m*m, 2*m*b, b*b-100)\n return (r1, m*r1+b) if abs(r0-x) < abs(r1-x) else (r0, m*r0+b)\n\ndef exited(point):\n (x, y) = point\n return abs(x) <= 0.01 and y > 0\n\ndef run():\n \"\"\"Given ellipse, 4x^2 + y^2 = 100, and initial points, find reflected beam\n and the next point of incidence. Continue until we exit the ellipse. Due\n to inexactness of float comparisons, we use the absolute value for\n comparisons.\"\"\"\n a = (0, 10.1)\n b = (1.4, -9.6)\n hits = 0\n while not exited(b):\n hits += 1\n c = intersection(b, slope_intercept_form(reflect_point_across_line(a, normal(b)), b))\n a = b\n b = c\n return hits\n","sub_path":"src/python/p144.py","file_name":"p144.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"597838450","text":"import os\n\n\n\n# Assign ip address\ndef ip_cmnd():\n \n ip = input('Enter ip address to add on interface : ')\n if len(ip.split('.')) == 4:\n s = os.popen(\n 'ip l | cut -d\":\" -f2 | tr -d \" \" | cut -d\" \" -f1').read()\n interfaces = s.split('\\n')[:-2:2]\n print(interfaces)\n interface_choice = input(\"choose interface: \")\n command = f'sudo ip address add {ip} dev {interface_choice}'\n res = os.popen(command).read()\n print('Ip address assigned successfully')\n\n\n # Delete Ip address\n \ndef ip_cmnd_del():\n \n ip = input('Enter ip address to delete from interface : ')\n if len(ip.split('.')) == 4:\n s = os.popen(\n 'ip l | cut -d\":\" -f2 | tr -d \" \" | cut -d\" \" -f1').read()\n interfaces = s.split('\\n')[:-2:2]\n print(interfaces)\n interface_choice = input(\"choose interfaces: \")\n command = f'sudo ip address del {ip} dev {interface_choice}'\n res = os.popen(command).read()\n print(\"ip adress deleted successfully\")\n \n \n \n #show ip address\n \ndef ip_cmnd_display():\n \n command = f'ip -c -br address'\n res = os.popen(command).read()\n print(res)\n\ndef display_all_interface():\n s = os.popen(\n 'ip l | cut -d\":\" -f2 | tr -d \" \" | cut -d\" \" -f1').read()\n interfaces = s.split('\\n')[:-2:2]\n command = 'ip l'\n res = os.popen(command).read()\n print(f'sudo All interfaces name => {interfaces} Details => {res}')\n\n\ndef configure_routing():\n network_addr = input('Enter network Address with /mask : ')\n getway_ip = input('Enter Gateway ip address : ')\n if len(network_addr.split('.')) == 4 and len(getway_ip.split('.')) == 4:\n cmd = f'sudo ip r add {network_addr} via {getway_ip}'\n res = os.popen(cmd).read()\n print(res)\n print('Roting configuration completed !')\n\n\n\ndef on_off_interface():\n print('1.Turned off interface ')\n print('2.Turned on interface')\n choice = int(input(\"enter your choice: \"))\n \n s = os.popen(\n 'ip l | cut -d\":\" -f2 | tr -d \" \" | cut -d\" \" -f1').read()\n interfaces = s.split('\\n')[:-2:2]\n print(interfaces)\n interface_choice = input(\"Enter your choice: \")\n \n if choice == 1:\n\n cmd = f'ip link set dev {interface_choice} down'\n res = os.popen(cmd).read()\n print(f'{interface_choice} turned off ')\n\n elif choice == 2:\n cmd = f'ip link set dev {interface_choice} up'\n res = os.popen(cmd).read()\n print(f'{interface_choice} turned on ')\n\n else:\n print('Wrong option choosed')\n\n\ndef add_ARP_entry():\n ip = input('Enter ip address : ')\n if len(ip.split('.')) == 4:\n s = os.popen(\n 'ip l | cut -d\":\" -f2 | tr -d \" \" | cut -d\" \" -f1').read()\n interfaces = s.split('\\n')[:-2:2]\n print(interfaces)\n interface_choice = input(\"choose interfaces: \") \n arp_cache = os.popen('ip n show | cut -d \" \" -f5').read()\n cmd = f'sudo ip n add {ip} lladdr {arp_cache} dev {interface_choice} nud permanent'\n res = os.popen(cmd).read()\n print('ARP Entry added successfully ')\n\n\ndef del_arp_entry():\n ip = input('Enter ip address : ')\n if len(ip.split('.')) == 4:\n s = os.popen(\n 'ip l | cut -d\":\" -f2 | tr -d \" \" | cut -d\" \" -f1').read()\n interfaces = s.split('\\n')[:-2:2]\n print(interfaces)\n interface_choice = input(\"choose interfaces: \")\n cmd = f'sudo ip n del {ip} dev {interface_choice}'\n res = os.popen(cmd).read()\n print('ARP Entry deleted successfully ')\n\n\ndef restart_network():\n cmd = 'sudo systemctl restart networking'\n cmd2 = 'sudo systemctl status networking'\n os.popen(cmd).read()\n print('Network services restarted ')\n print(os.popen(cmd2).read())\n\n\ndef change_host_name():\n host_name = input(\"Enter new host name :\")\n cmd = f'hostnamectl set-hostname {host_name}'\n os.popen(cmd).read()\n print(f'sudo new host name {host_name} set successfully ')\n\n\ndef add_dns_server():\n\n print('adding dns server')\n print('first : nameserver 8.8.8.8 write in this format')\n print('second : ctrl + d to exit ')\n cmd = 'sudo cat >> /etc/resolv.conf'\n print(os.popen(cmd).read())\n print('Nameserver added successfully ')\n","sub_path":"Network_managemnet_tool_pro1/net_script.py","file_name":"net_script.py","file_ext":"py","file_size_in_byte":4306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"205563231","text":"from ics import Event\nfrom ics.contentline import lines_to_container\nfrom ics.timezone import Timezone\n\n\ndef test_issue_68_timezone_dropped():\n event = Event.from_container(\n lines_to_container(\n [\"BEGIN:VEVENT\", \"DTSTART;TZID=Europe/Berlin:20151104T190000\", \"END:VEVENT\"]\n )\n )\n tz = event.begin.tzinfo\n assert tz == Timezone.from_tzid(\"Europe/Berlin\")\n assert tz.tzname(event.begin) == \"CET\"\n","sub_path":"tests/issues/gh68.py","file_name":"gh68.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"570902709","text":"import nltk\nimport os\nimport datetime\nimport json\nimport itertools\nimport copy\nimport shutil\nimport pandas as pd\nfrom nltk.corpus import stopwords\nfrom sklearn.metrics import log_loss, balanced_accuracy_score\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import ParameterGrid\nfrom tensorflow.keras.backend import clear_session\nfrom tensorflow.keras.models import Sequential, load_model\nfrom tensorflow.keras.layers import Embedding, Flatten, Dense, Dropout\nfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nimport numpy as np\nfrom gensim.models.phrases import Phraser\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nimport tensorflow\nfrom tensorflow.keras.datasets import imdb\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.models import Model # new!\nfrom tensorflow.keras.layers import Input, concatenate # new!\nfrom tensorflow.keras.layers import Dense, Dropout, Embedding, SpatialDropout1D, Conv1D, GlobalMaxPooling1D\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nimport os\nfrom sklearn.metrics import roc_auc_score\nimport matplotlib.pyplot as plt\n\nnltk.download('punkt')\n\n\nclass Preprocessor:\n sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\n bigram = Phraser.load('bigrams.pkl')\n word_tokenizer = RegexpTokenizer(r\"\\w+|[^\\w\\s]\")\n\n # add hyperparameters for preprocessor here\n def __init__(self, num_unique_words=5000, max_sequence_length=100, trunc_type='pre', pad_type='pre',\n do_clean=False):\n self.num_unique_words = num_unique_words\n self.do_clean = do_clean\n self.max_sequence_length = max_sequence_length\n self.trunc_type = trunc_type\n self.pad_type = pad_type\n self.tokenizer = None\n\n def clean(self, x): # should not contain any other arguments (use fields set in constructor instead).\n \"\"\"\n Clean the strings in 'x' inspired by\n https://github.com/jonkrohn/DLTFpT/blob/master/notebooks/natural_language_preprocessing.ipynb\n\n :param x: A list of strings, one for each message\n :return: A cleaned list of strings\n \"\"\"\n import unidecode\n import re\n import string\n\n def repl(m):\n return chr(int('0x' + m.group(1), 16))\n\n # replace double escaped \"\\\\\" unicode strings with their unicode characters\n x = [re.sub(r'\\\\n', '\\n', message) for message in x]\n x = [re.sub(r'\\\\x([a-f0-9]{2})', repl, message) for message in x]\n x = [re.sub(r'\\\\u([a-f0-9]{4})', repl, message) for message in x]\n\n # replace accented characters with unaccented\n x = [unidecode.unidecode(message) for message in x]\n\n # replace nonascii characters with space\n x = [''.join(character if ord(character) < 128 else ' ' for character in message) for message in x]\n\n # Create sentence structure like nltk gutenberg.sents()\n # list of sentences for each message:\n x = [self.sent_detector.tokenize(message.strip()) for message in x]\n # list of list of words for each message/sentence:\n x = [[self.word_tokenizer.tokenize(sentence) for sentence in message] for message in x]\n\n # lower_sents: lowercase words ignoring punctuation\n x = [[[\n word.lower() for word in sentence if word.lower() not in list(string.punctuation)] for sentence in message]\n for message in x]\n\n # stopwrds: remove stopwords and punctuation\n # x = [[[word for word in sentence if not word.lower() in stopwords.words('english')] for sentence in message]\n # for message in x]\n\n # clean_sents: replace common adjacent words with bigrams\n x = [[self.bigram[sentence] for sentence in message] for message in x]\n # convert back to one string per message (join words into sentences and sentences into messages)\n x = ['\\n'.join(' '.join(sentence) for sentence in message) for message in x]\n return x\n\n def fit(self, x): # takes no other parameters (use fields initialized in constructor instead).\n \"\"\"\n Use training data 'x' to learn the parameters for preprocessing.\n\n :param x: list of messages.\n :return: self\n \"\"\"\n if self.do_clean:\n x = self.clean(x)\n self.tokenizer = Tokenizer(num_words=self.num_unique_words)\n self.tokenizer.fit_on_texts(x)\n # other fitting?\n return self\n\n def transform(self, x): # takes no other parameters (use fields initialized in constructor instead).\n \"\"\"\n Return transformed list of strings into an ndarray ready for input to Keras model.\n\n :param x: list of messages (strings)\n :return: ndarray of data (num_messages, max_sequence_length)\n \"\"\"\n if self.do_clean:\n x = self.clean(x)\n if self.tokenizer is None:\n raise ValueError('Tokenizer has not been initialized.')\n # other transforming to produce tensor for input layer of model\n x = self.tokenizer.texts_to_sequences(x)\n return pad_sequences(x, maxlen=self.max_sequence_length, padding=self.pad_type, truncating=self.trunc_type,\n value=0)\n\n\ndef get_performance(model, data_sets, set_names):\n \"\"\"\n Helper function to compute the performance (balanced accuracy and balanced log_loss).\n\n :param model: A Keras model.\n :param data_sets: A list of datasets: [(x1, y1), (x2, y2), ...]\n :param set_names: A list of names: ['train', 'valid', ...]\n :return: results dict: {'name': {'accuracy': accuracy, 'loss': loss}, ...}\n \"\"\"\n results = {}\n for (x, y), name in zip(data_sets, set_names):\n y_hat = model.predict(x)\n sample_weight = get_sample_weight(y)\n # should be same as balanced accuracy\n # acc = accuracy_score(y_true=y, y_pred=y_hat.argmax(axis=1), sample_weight=sample_weight)\n acc = balanced_accuracy_score(y_true=y, y_pred=y_hat.argmax(axis=1))\n loss = log_loss(y_true=y, y_pred=y_hat.astype(np.float64), sample_weight=sample_weight)\n results[name] = {\n 'accuracy': acc,\n 'loss': loss,\n }\n return results\n\n\ndef check_model(output_dir):\n \"\"\"\n Load and check model from its output directory.\n\n :param output_dir: The directory that stores model.h5, params.json, and roatan.py.\n :return: None\n \"\"\"\n import importlib.util\n\n model_file = os.path.join(output_dir, 'model.h5')\n model = load_model(model_file, compile=True)\n\n with open(os.path.join(output_dir, 'params.json'), 'r') as fp:\n params = json.load(fp)\n\n spec = importlib.util.spec_from_file_location('Preprocessor', os.path.join(output_dir, 'Preprocessor.py'))\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n this_preprocessor = getattr(mod, 'Preprocessor')\n classes, data_sets, set_names = get_xy(this_preprocessor(**params['preprocessor'],\n **params['model_preprocessor']))\n results = get_performance(model, data_sets, set_names)\n\n # Check that matching data set names report same performance as in params.json\n for name in results.keys():\n if name in params['results']:\n assert params['results'][name] == results[name], \\\n f'Loaded model reports different results than stored: {params[\"results\"][name]} != {results[name]}'\n\n print(output_dir)\n print(pd.DataFrame(data=results).T)\n\n\ndef get_xy(preprocessor, target='Coding:Level1'):\n \"\"\"\n Get the data from CSV files depending on target.\n\n :param preprocessor: A Preprocessor object ready to be fit to training data.\n :param target: The target 'Coding:Level1', 'Coding:Level2', other column\n :return: list of class names: ['label1', ...],\n list of datasets: [(x_train, y_train), (x_valid, y_valid)],\n list of names: ['train', 'valid']\n \"\"\"\n set_names = ['train', 'valid']\n dfs = [pd.read_csv(f'data/roatan_{s}.csv') for s in set_names]\n\n # fit preprocessor with training set\n preprocessor.fit(dfs[0]['message'])\n # transform all data sets\n xs = [preprocessor.transform(df['message']) for df in dfs]\n\n # encode labels as integers 0 ... n-1 using training set\n le = LabelEncoder().fit(dfs[0][target])\n # transform labels for all data sets\n ys = [le.transform(df[target]) for df in dfs]\n\n classes = le.classes_\n data_sets = list(zip(xs, ys))\n return classes, data_sets, set_names\n\n\ndef get_sample_weight(y):\n \"\"\"\n Return sample weights so that each class counts equally. For unbalanced data sets,\n say 90% positive and 10% negative, this ensures that both classes are equally important.\n\n :param y: The list of class labels (integers)\n :return: A weight for each sample that corresponds to its class label\n \"\"\"\n class_counts = np.bincount(y)\n class_weight = 1 / class_counts\n sample_weight = class_weight[y]\n sample_weight = sample_weight / sample_weight.sum() * len(y)\n return sample_weight\n\n\n# noinspection PyTypeChecker\ndef assemble_results(output_root):\n \"\"\"\n Helper function to traverse output root directory to assemble and save a CSV file with results.\n\n :param output_root: The directory that contains all the output model directories\n :return: A list of parameter sets (dicts), the dict for the best validation loss parameter set.\n \"\"\"\n all_params = []\n for run in sorted(os.listdir(output_root)):\n run_dir = os.path.join(output_root, run)\n if os.path.isdir(run_dir):\n r = {'dir': run}\n json_file = os.path.join(run_dir, f'params.json')\n try:\n with open(json_file, 'r') as fp:\n d = json.load(fp)\n r.update(d)\n except (FileNotFoundError, KeyError) as e:\n print(str(e))\n print(f'removing {run_dir}')\n shutil.rmtree(run_dir)\n all_params.append(r)\n\n data = [pd.json_normalize(d, sep='__').to_dict(orient='records')[0] for d in all_params]\n\n # save CSV file of all results\n csv_file = os.path.join(output_root, 'results.csv')\n pd.DataFrame(data).to_csv(csv_file, index=False)\n\n # assemble list of params to check what's been done\n best_val_loss = float('inf')\n best_params = None\n all_params2 = []\n for d in all_params:\n if 'results' in d:\n # noinspection PyTypeChecker\n if d['results']['valid']['loss'] < best_val_loss:\n best_val_loss = d['results']['valid']['loss']\n best_params = copy.deepcopy(d)\n del d['results']\n del d['dir']\n all_params2.append(d)\n\n if best_params is not None:\n print(f'best params: {best_params}')\n print(f'best val loss: {best_params[\"results\"][\"valid\"][\"loss\"]:.6f}')\n print(f'best val acc: {best_params[\"results\"][\"valid\"][\"accuracy\"]:.4%}')\n return all_params2, best_params\n\n\ndef build_fn(name=None, num_classes=None, optimizer=None, num_unique_words=None, embedded_dims=None,\n max_sequence_length=None, num_dense=None, dropout=None, spatial_dropout=None, n_conv_1=None,\n n_conv_2=None, n_conv_3=None, k_conv_1=None, k_conv_2=None, k_conv_3=None):\n \"\"\"\n Build and compile a model based on the input parameters.\n\n :param spatial_dropout: The percent dropout in the convolutional layer.\n :param k_conv_3: convolutional layer size.\n :param k_conv_2: convolutional layer size.\n :param k_conv_1: convolutional layer size.\n :param n_conv_3: kernel size.\n :param n_conv_2: kernel size.\n :param n_conv_1: kernel size.\n :param name: name of the model.\n :param num_classes: number of outputs for the model.\n :param optimizer: string representation of the optimizer.\n :param num_unique_words: number of words in the vocabulary for the embedding.\n :param max_sequence_length: the number of dimensions for input data (num_messages, max_sequence_length)\n :param embedded_dims: the number of dimensions to embed the tokens (words) in.\n :param num_dense: The number of neurons in the dense layer\n :param dropout: The percent dropout for dense layer.\n :return: a Keras model ready to be fit on data with these dimensions.\n \"\"\"\n clear_session()\n\n\n if name == 'dense':\n model = Sequential()\n model.add(Embedding(num_unique_words, embedded_dims, input_length=max_sequence_length))\n model.add(Flatten())\n model.add(Dense(num_dense, activation='relu'))\n model.add(Dropout(dropout))\n elif name == 'mcn':\n # convolutional layer architecture:\n\n input_layer = Input(shape=(max_sequence_length,),\n dtype='int16', name='input')\n\n # embedding:\n embedding_layer = Embedding(num_unique_words, embedded_dims,\n name='embedding')(input_layer)\n drop_embed_layer = SpatialDropout1D(spatial_dropout,\n name='drop_embed')(embedding_layer)\n\n # three parallel convolutional streams:\n conv_1 = Conv1D(n_conv_1, k_conv_1,\n activation='relu', name='conv_1')(drop_embed_layer)\n maxp_1 = GlobalMaxPooling1D(name='maxp_1')(conv_1)\n\n conv_2 = Conv1D(n_conv_2, k_conv_2,\n activation='relu', name='conv_2')(drop_embed_layer)\n maxp_2 = GlobalMaxPooling1D(name='maxp_2')(conv_2)\n\n # conv_3 = Conv1D(n_conv_3, k_conv_3,\n # activation='relu', name='conv_3')(drop_embed_layer)\n # maxp_3 = GlobalMaxPooling1D(name='maxp_3')(conv_3)\n\n # concatenate the activations from the three streams:\n concat = concatenate([maxp_1, maxp_2])\n\n # dense hidden layers:\n dense_layer = Dense(num_dense,\n activation='relu', name='dense')(concat)\n drop_dense_layer = Dropout(dropout, name='drop_dense')(dense_layer)\n dense_2 = Dense(int(num_dense / 2),\n activation='relu', name='dense_2')(drop_dense_layer)\n dropout_2 = Dropout(dropout, name='drop_dense_2')(dense_2)\n\n # sigmoid output layer:\n predictions = Dense(3, activation='softmax', name='output')(dropout_2)\n # model.add(Dense(num_classes, activation='softmax'))\n\n # create model:\n model = Model(input_layer, predictions)\n elif name == 'mcn_complex':\n # convolutional layer architecture:\n\n input_layer = Input(shape=(max_sequence_length,),\n dtype='int16', name='input')\n\n # embedding:\n embedding_layer = Embedding(num_unique_words, embedded_dims,\n name='embedding')(input_layer)\n drop_embed_layer = SpatialDropout1D(spatial_dropout,\n name='drop_embed')(embedding_layer)\n\n # three parallel convolutional streams:\n conv_1 = Conv1D(n_conv_1, k_conv_1,\n activation='relu', name='conv_1')(drop_embed_layer)\n maxp_1 = GlobalMaxPooling1D(name='maxp_1')(conv_1)\n\n conv_2 = Conv1D(n_conv_2, k_conv_2,\n activation='relu', name='conv_2')(drop_embed_layer)\n maxp_2 = GlobalMaxPooling1D(name='maxp_2')(conv_2)\n\n conv_3 = Conv1D(n_conv_3, k_conv_3,\n activation='relu', name='conv_3')(drop_embed_layer)\n maxp_3 = GlobalMaxPooling1D(name='maxp_3')(conv_3)\n\n # concatenate the activations from the three streams:\n concat = concatenate([maxp_1, maxp_2, maxp_3])\n\n # dense hidden layers:\n dense_layer = Dense(num_dense,\n activation='relu', name='dense')(concat)\n drop_dense_layer = Dropout(dropout, name='drop_dense')(dense_layer)\n dense_2 = Dense(int(num_dense / 2),\n activation='relu', name='dense_2')(drop_dense_layer)\n dropout_2 = Dropout(dropout, name='drop_dense_2')(dense_2)\n\n # sigmoid output layer:\n predictions = Dense(3, activation='softmax', name='output')(dropout_2)\n\n # create model:\n model = Model(input_layer, predictions)\n else:\n raise ValueError(f'Unknown model name: {name}')\n\n model.summary()\n model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer, weighted_metrics=['accuracy'])\n\n return model\n\n\n# noinspection PyTypeChecker\ndef main():\n \"\"\"\n Example of how to set up a grid search.\n\n :return: None\n \"\"\"\n output_root = 'problem_5_output'\n if not os.path.exists(output_root):\n os.makedirs(output_root, exist_ok=True)\n\n # dictionary of parameter grids, one for each process\n param_grids = {\n 'early_stopping': ParameterGrid([\n {\n 'patience': [15, 30]\n },\n ]),\n 'fit': ParameterGrid([\n {\n 'batch_size': [128, 256],\n 'epochs': [20, 40],\n },\n ]),\n 'model_preprocessor': ParameterGrid([\n {\n 'num_unique_words': [5000, 6000],\n 'max_sequence_length': [100, 150],\n },\n ]),\n 'model': ParameterGrid([\n {\n 'name': ['mcn', 'mcn_complex'],\n 'embedded_dims': [64, 128],\n 'num_dense': [64, 128],\n 'dropout': [0.5, 0.2],\n 'spatial_dropout': [0.2, 0.2],\n 'n_conv_1': [64, 128],\n 'n_conv_2': [64, 128],\n 'n_conv_3': [64, 128],\n 'k_conv_1': [3, 3],\n 'k_conv_2': [2, 3],\n 'k_conv_3': [4, 3],\n 'optimizer': ['adam', 'nadam'],\n },\n ]),\n 'preprocessor': ParameterGrid([\n {\n 'pad_type': ['post'],\n 'trunc_type': ['pre'],\n 'do_clean': [True]\n },\n ])\n }\n\n def prod(a):\n if len(a) == 0:\n return 1\n return a[0] * prod(a[1:])\n\n num_models = prod([len(pg) for pg in param_grids.values()])\n\n param_grid_names = list(param_grids.keys())\n param_grid_list = [param_grids[k] for k in param_grid_names]\n\n all_params, best_params = assemble_results(output_root)\n\n for i, params in enumerate(itertools.product(*param_grid_list)):\n params = {k: v for k, v in zip(param_grid_names, params)}\n print(f'\\n{i + 1}/{num_models}: {params}\\n')\n\n if params in all_params:\n # skip this one because we already ran it.\n continue\n\n if best_params is not None:\n # print best performance so far\n print(f'best params: {best_params}')\n print(f'best val loss: {best_params[\"results\"][\"valid\"][\"loss\"]:.6f}')\n print(f'best val acc: {best_params[\"results\"][\"valid\"][\"accuracy\"]:.4%}')\n\n # create a new output directory with path to model file.\n date = datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H.%M.%S.%f'\")\n output_dir = os.path.join(output_root, date)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_file = os.path.join(output_dir, 'model.h5')\n\n # get the preprocessed training and validation data\n classes, data_sets, set_names = get_xy(Preprocessor(**params['preprocessor'], **params['model_preprocessor']))\n ((x_train, y_train), (x_valid, y_valid)) = data_sets\n\n # build and compile model\n model = build_fn(num_classes=len(classes), **params['model'], **params['model_preprocessor'])\n\n # setup callbacks\n early_stopping = EarlyStopping(monitor='val_loss', verbose=1, **params['early_stopping'])\n model_checkpoint = ModelCheckpoint(\n filepath=model_file,\n save_weights_only=False, save_freq='epoch',\n save_best_only=True, monitor='val_loss', verbose=1)\n callbacks = [early_stopping, model_checkpoint]\n\n # Use sample weights to treat classes equally in loss and accuracy.\n sample_weight = get_sample_weight(y_train)\n sample_weight_valid = get_sample_weight(y_valid)\n\n # fit the model\n model.fit(x=x_train, y=y_train, sample_weight=sample_weight, verbose=1,\n validation_data=(x_valid, y_valid, sample_weight_valid), callbacks=callbacks, **params['fit'])\n\n # load the best model (last one saved)\n model = load_model(model_file, compile=True)\n\n # compute results\n results = get_performance(model, data_sets, set_names)\n print(pd.DataFrame(data=results).T)\n params['results'] = results\n\n # save params and results\n with open(os.path.join(output_dir, 'params.json'), 'w') as fp:\n json.dump(params, fp)\n\n # save a copy of *this* Python file.\n shutil.copyfile(__file__, os.path.join(output_dir, 'roatan.py'))\n\n # for convenience, show the validation loss and accuracy in a file name in the same directory.\n result_file_name = f'{params[\"results\"][\"valid\"][\"loss\"]:.6f}_{params[\"results\"][\"valid\"][\"accuracy\"]:.4f}.out'\n with open(os.path.join(output_dir, result_file_name), 'w'):\n pass\n\n # check_model(output_dir)\n\n if best_params is None or (params['results']['valid']['loss'] < best_params['results']['valid']['loss']):\n best_params = params\n\n # assemble results from all runs into one CSV file in output root.\n assemble_results(output_root)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"roatan_helpers.py","file_name":"roatan_helpers.py","file_ext":"py","file_size_in_byte":21792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"73596917","text":"__author__ = 'tinglev@kth.se'\n\nimport unittest\nfrom test import mock_test_data\nfrom modules.steps.report_success import ReportSuccess\nfrom modules.util import data_defs\n\nclass TestReportSuccess(unittest.TestCase):\n\n def test_get_version(self):\n step = ReportSuccess()\n service = {data_defs.S_IMAGE: {\n data_defs.IMG_IS_SEMVER: True,\n data_defs.IMG_BEST_SEMVER_MATCH: '1.3.1_abc321',\n data_defs.IMG_VERSION: '12.4.5_zxc444'}}\n result = step.get_version(service)\n self.assertEqual(result, '1.3.1_abc321')\n service[data_defs.S_IMAGE][data_defs.IMG_IS_SEMVER] = False\n result = step.get_version(service)\n self.assertEqual(result, '12.4.5_zxc444')\n\n def test_get_application_path(self):\n step = ReportSuccess()\n service = {data_defs.S_DEPLOY_LABELS: ['traefik.frontend.rule=PathPrefix:/test/url']}\n result = step.get_application_path(service)\n self.assertEqual(result, '/test/url')\n service = {}\n result = step.get_application_path(service)\n self.assertIsNone(result)\n\n def test_get_service_labels(self):\n step = ReportSuccess()\n service = mock_test_data.get_pipeline_data()[data_defs.SERVICES][0]\n expected = {\n 'slackChannels': '#one,#two',\n 'publicNameSwedish': 'Testnings API',\n 'publicNameEnglish': 'Test API',\n 'descriptionSwedish': 'API för kurssystem',\n 'descriptionEnglish': 'API for course system',\n 'importance': 'medium',\n 'detectifyProfileTokens': 'abc123,zxc456',\n 'monitorUrl': '/_monitor'\n }\n result = step.get_service_labels({}, service)\n self.assertEqual(result, expected)\n","sub_path":"test/unit/test_report_success.py","file_name":"test_report_success.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"536555988","text":"import unittest\nfrom pypost.optimizer import SciPyBoxConstrained, SciPyBoxConstrainedAlgorithms\nfrom scipy.optimize import rosen, rosen_der\nimport pypost.common.SettingsManager as SettingsManager\nimport numpy as np\n\n\n# Also responsible for testing (abstract) 'boxConstrained' class\n\nclass testSciPyBoxConstrained(unittest.TestCase):\n def setUp(self):\n self.optimizerName = 'myOptimizer'\n self.settings = SettingsManager.getDefaultSettings()\n self.settings.setProperty(self.optimizerName + 'maxNumIterations', 100)\n self.settings.setProperty(self.optimizerName + 'method', SciPyBoxConstrainedAlgorithms.L_BFGS_B)\n self.optimizer = SciPyBoxConstrained(2, optimizationName=self.optimizerName)\n\n def testLowerBound(self):\n lower_bound = np.asarray([5, 5])\n params, value, iterations = self.optimizer.optimize(rosen, rosen_der,\n x0=np.asarray([8, 8]), lowerBound=lower_bound)\n self.assertTrue((params >= lower_bound).all())\n\n def testUpperBound(self):\n upper_bound = np.asarray([-5, -5])\n params, value, iterations = self.optimizer.optimize(rosen, rosen_der,\n x0=np.asarray([-8, -8]), upperBound=upper_bound)\n\n self.assertTrue((params <= upper_bound).all())","sub_path":"src/pypost/tests/optimizer/testSciPyBoxConstrained.py","file_name":"testSciPyBoxConstrained.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"374055733","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 9 10:49:09 2018\n\n@author: OrangeBoy\n\"\"\"\n\nimport os\n\nsuffixs = [\".fr-ca.resx\", \".zh-cn.resx\", \".zh-tw.resx\"]\n# def list_dir(path, res):\n# for i in os.listdir(path):\n# temp_dir = os.path.join(path,i)\n# if os.path.isdir(temp_dir):\n# temp = {'dir':temp_dir, 'child_dirs' : [] , 'files' : []}\n# res['child_dirs'].append(list_dir(temp_dir, temp))\n# else:\n# res['files'].append(i)\n# return res\n\n# def get_config_dirs():\n# res = {'dir':'root', 'child_dirs' : [] , 'files' : []}\n# return list_dir('C:\\\\Projects\\\\MVCWebApp\\\\CIK_MVC\\\\App_GlobalResources\\\\CustomerCare',res)\n\ndef create_resx(path):\n for i in os.listdir(path):\n temp_dir = os.path.join(path,i)\n if os.path.isdir(temp_dir):\n create_resx(temp_dir)\n else:\n prefix = i.split(\".\")[0]\n for suffix in suffixs:\n new_file = prefix + suffix\n file_dir = temp_dir.rsplit('\\\\', 1)[0] + '\\\\'\n if not os.path.exists(file_dir + new_file):\n f = open(file_dir + new_file,'x')\n print(file_dir + new_file)\n f.close()\n return\n\ncreate_resx('C:\\\\Projects\\\\MVCWebApp\\\\CIK_MVC\\\\App_GlobalResources\\\\CustomerCare\\\\')","sub_path":"scripts/autoCreateResx.py","file_name":"autoCreateResx.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"396225083","text":"import pandas as pd\nimport os\nfrom requests import get\n\n\nclass PreProcessing(object):\n def __init__(self, channels_df, user_df, chat_df,\n input_user_gender, genderize_api=None):\n\n self.channels_df = channels_df.rename(columns={'id': 'channel_id'})\n self.channels_df = self.channels_df.rename(\n columns={'name_normalized': 'channel_name',\n 'num_members': 'channel_num_members',\n 'purpose': 'channel_purpose',\n 'topic': 'channel_topic'})\n\n self.user_df = user_df\n self.user_df = self.user_df.rename(columns={'id': 'user'})\n\n self.chat_df = chat_df\n self.chat_df = self.chat_df[self.chat_df.text.apply(type) == str]\n\n self.chat_df = pd.merge(self.chat_df, self.channels_df[\n ['channel_id', 'channel_name', 'channel_num_members',\n 'channel_purpose', 'channel_topic']\n ], on='channel_id')\n\n self.get_activity_counts()\n\n self.input_user_genders = input_user_gender\n if genderize_api is None:\n try:\n src_dir = os.path.dirname(os.path.abspath(__file__))\n except NameError:\n src_dir = os.getcwd()\n self.name_csv = pd.read_csv(f'{src_dir}/../data/name_gender.csv')\n self.get_gender_from_csv()\n else:\n self.name_api_key = genderize_api\n self.get_gender_from_api()\n\n def get_gender_from_csv(self):\n\n self.user_df[['gender', 'p_gender']] = self.user_df.apply(\n lambda x: pd.Series(self.gender_lookup_csv(\n x['real_name'], x['user'])),\n axis=1)\n\n def get_gender_from_api(self):\n\n self.user_df[['gender', 'p_gender']] = self.user_df.apply(\n lambda x: pd.Series(self.gender_lookup_api(\n x['real_name'], x['user'])),\n axis=1)\n\n def gender_lookup_csv(self, real_name, user_id):\n\n gender = 'Unknown'\n prob = 1.\n if self.input_user_genders and user_id in self.input_user_genders.slack_id.values:\n gender, prob = self.input_user_genders[\n self.input_user_genders.slack_id == user_id\n ].gender_identity.iloc[0], 1.\n elif type(real_name) == str:\n name = real_name.split(' ')[0].capitalize()\n r = self.name_csv[self.name_csv.name == name]\n try:\n gender = r.gender.iloc[0]\n prob = r.probability.iloc[0]\n except:\n pass\n return gender, prob\n\n def gender_lookup_api(self, real_name, user_id):\n\n gender = 'Unknown'\n prob = 1.\n if self.input_user_genders and user_id in self.input_user_genders.slack_id.values:\n gender, prob = self.input_user_genders[\n self.input_user_genders.slack_id == user_id\n ].gender_identity.iloc[0], 1.\n elif type(real_name) == str:\n name = real_name.split(' ')[0].capitalize()\n g = get(\n f'https://api.genderize.io/?apikey='\n f'{self.name_api_key}&name={name}').json()\n try:\n gender = str(g[\"gender\"])\n if gender == 'male':\n gender = 'M'\n if gender == 'female':\n gender = 'F'\n prob = float(g[\"probability\"])\n except:\n pass\n return gender, prob\n\n def get_activity_counts(self):\n\n self.user_df['n_messages'] = self.user_df.user.apply(\n lambda x: len(self.chat_df[self.chat_df.user == x])\n )\n if len(self.chat_df.replies.dropna()) > 0:\n self.user_df['n_replies'] = self.user_df.user.apply(\n lambda x: self.chat_df.replies.dropna().sum().count(x)\n )\n else:\n self.user_df['n_replies'] = 0.\n if len(self.chat_df.reactions.dropna()) > 0:\n self.user_df['n_reactions'] = self.user_df.user.apply(\n lambda x: self.chat_df.reactions.dropna().sum().count(x)\n )\n else:\n self.user_df['n_reactions'] = 0.\n self.user_df = self.user_df[\n (self.user_df.n_messages > 0.) |\n (self.user_df.n_replies > 0.) |\n (self.user_df.n_reactions > 0.)\n ]\n\n def run(self):\n\n df = pd.merge(self.chat_df, self.user_df[\n ['user', 'gender', 'p_gender', 'tz', 'profile',\n 'is_bot']], on='user')\n df.drop_duplicates(subset='client_msg_id')\n self.user_df.drop_duplicates(subset='user')\n\n return self.user_df, df\n","sub_path":"processing/slack_backend/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":4633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"459211317","text":"import torch, time, sys\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.optim as optim\nfrom lib import *\n\nclass Trainer:\n def __init__(self, model, lr, epoch, save_fn, avg, std):\n self.epoch = epoch\n self.model = model\n self.lr = lr\n self.save_fn = save_fn\n self.Xavg, self.Xstd = Variable(torch.from_numpy(avg).cuda()), Variable(torch.from_numpy(std).cuda())\n\n print('Start Training #Epoch:%d'%(epoch))\n \n def fit(self, tr_loader, weight, name):\n st = time.time()\n #save dict\n save_dict = {}\n\n for e in range(1, self.epoch+1):\n #learning rate and optimizer\n lr = self.lr / (((e//(70*1))*2)+1) \n print( '\\n==> Training Epoch #%d lr=%4f'%(e, lr))\n\n # Training\n for batch_idx, _input in enumerate(tr_loader):\n self.model.train()\n data, target = Variable(_input[0].cuda()), [Variable(_input[1].cuda()), Variable(_input[2].cuda()), Variable(_input[3].cuda())]\n \n opt = optim.SGD(self.model.parameters(), lr=lr, momentum=0.9, weight_decay=1e-4)\n opt.zero_grad()\n\n predict = self.model(data, self.Xavg, self.Xstd, name, False)\n loss = loss_func(predict, target, weight, name, False)\n total_loss = [l for l in loss if (l>0).all()]\n sum(total_loss).backward()\n opt.step()\n\n loss_adv = loss\n if ('prePN' in name) or ('preIN' in name):\n if 'UnetAE' in name: params = self.model.encode.parameters()\n if 'DuoAE' in name: params = list(self.model.pitch_encode.parameters())+list(self.model.inst_encode.parameters())\n opt_adv = optim.SGD(params, lr=lr, momentum=0.9, weight_decay=1e-4)\n opt_adv.zero_grad() \n\n predict_adv = self.model(data, self.Xavg, self.Xstd, name, True)\n loss_adv = loss_func(predict_adv, target, weight, name, True)\n total_loss_adv = [l for l in loss_adv if (l>0).all()]\n sum(total_loss_adv).backward()\n opt_adv.step()\n \n sys.stdout.write('\\r')\n sys.stdout.write('| Epoch [%3d/%3d] Iter[%4d/%4d] Inst:%3f Pitch:%3f Inst-N:%3f Pitch-N:%3f Roll:%3f Time %d'\n %(e, self.epoch, batch_idx+1, len(tr_loader), loss[0].data, loss[1].data, loss_adv[2].data,loss_adv[3].data,loss[4].data, time.time() - st))\n sys.stdout.flush()\n print ('\\n')\n save_dict['state_dict'] = self.model.state_dict()\n torch.save(save_dict, self.save_fn+'e_%d'%(e))","sub_path":"v3/disentangled training/fit.py","file_name":"fit.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"16085391","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport requests\nfrom bs4 import BeautifulSoup\ndef parse_url_to_html(url):\n response = requests.get(url)\n soup = BeautifulSoup(response.content,\"html.parser\")\n body = soup.find_all(class_=\"x-wiki-content\")[0]\n html = str(body)\n with open(\"a.html\",\"wb\") as f:\n f.write(html)\n","sub_path":"basic/pac_exer.py","file_name":"pac_exer.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"182139736","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport csv\nimport h5py\nimport os\nfrom tqdm import tqdm\n\n\ndef main():\n\n root_dir = os.getcwd()\n\n albedo_table = load_albedo(root_dir, 'albedo.csv')\n\n # Uncomment the function that you want to run\n eight_categories(root_dir, albedo_table)\n # three_categories_random(root_dir, albedo_table)\n # three_categories_determine(root_dir, albedo_table)\n\n\ndef eight_categories(write_dir, albedo_table):\n num_obs = 64000\n surf_frac = sample_surf_dist(num_obs, 3)\n\n # Remove all generated fractions that have an absolute or relative melt pond fraction\n # higher than 60%\n s = 0\n while s < len(surf_frac):\n if surf_frac[s][1] > .65:\n surf_frac = np.delete(surf_frac, s, 0)\n elif surf_frac[s][1] / (surf_frac[s][1] + surf_frac[s][0]) > 0.65:\n surf_frac = np.delete(surf_frac, s, 0)\n else:\n s += 1\n\n num_obs = len(surf_frac)\n\n obs_refl = np.zeros((num_obs, 6)) # 6 MODIS BANDS USED\n\n for i in tqdm(range(num_obs)):\n\n fi, fp, fo = surf_frac[i]\n # Find the subcategory distributions\n sub_frac_ice, sub_frac_pond = calc_subfrac_8cat()\n\n for b in range(6): # 7 MODIS 8 WV\n # Find the three category reflectances using the subcategory distributions\n ice_refl, pond_refl, ocean_refl = parse_8cat(b, albedo_table, sub_frac_ice, sub_frac_pond)\n obs_refl[i, b] = ((ice_refl * fi) + (pond_refl * fp) + (ocean_refl * fo))\n\n tds_name = os.path.join(write_dir, 'modis_artificial_tds_8cat.hdf')\n write_hdf(tds_name, obs_refl, surf_frac)\n\n\ndef parse_8cat(b, albedo_table, sub_frac_ice, sub_frac_pond):\n # Sample possible subsurfaces to find ice reflectance\n ice_refl = 0\n ice_refl += (albedo_table[b, 0] * sub_frac_ice[0]) # Cold Snow\n ice_refl += (albedo_table[b, 1] * sub_frac_ice[1]) # Melting Snow\n ice_refl += (albedo_table[b, 2] * sub_frac_ice[2]) # Deter. Melting Ice\n ice_refl += (albedo_table[b, 3] * sub_frac_ice[3]) # Dirty Ice\n\n # Sample possible subsurfaces to find pond reflectance\n pond_refl = 0\n pond_refl += (albedo_table[b, 4] * sub_frac_pond[0]) # Bright ponds\n pond_refl += (albedo_table[b, 5] * sub_frac_pond[1]) # Early Ponds\n pond_refl += (albedo_table[b, 6] * sub_frac_pond[2]) # Late Ponds\n\n # Ocean has no subsurfaces\n ocean_refl = albedo_table[b, 7]\n\n return ice_refl, pond_refl, ocean_refl\n\n\ndef calc_subfrac_8cat():\n # Subsurface distribution\n sub_frac_ice = sample_surf_dist(1, 4)[0]\n # Limit the amount of dirty or deteriorating ice.\n while sub_frac_ice[3] > 0.1 or sub_frac_ice[2] > 0.2:\n sub_frac_ice = sample_surf_dist(1, 4)[0]\n sub_frac_pond = sample_surf_dist(1, 3)[0]\n\n return sub_frac_ice, sub_frac_pond\n\n\ndef three_categories_random(write_dir, albedo_table):\n num_obs = 64000\n surf_frac = sample_surf_dist(num_obs, 3)\n\n # Remove all generated fractions that have an absolute or relative melt pond fraction\n # higher than 60%\n s = 0\n while s < len(surf_frac):\n if surf_frac[s][1] > .65:\n surf_frac = np.delete(surf_frac, s, 0)\n elif surf_frac[s][1] / (surf_frac[s][1] + surf_frac[s][0]) > 0.65:\n surf_frac = np.delete(surf_frac, s, 0)\n else:\n s += 1\n\n num_obs = len(surf_frac)\n\n obs_refl = np.zeros((num_obs, 8)) ##6 for MODIS\n\n for i in tqdm(range(num_obs)):\n\n fi, fp, fo = surf_frac[i]\n\n for b in range(6):\n # Find the three category reflectances using the subcategory distributions\n ice_refl, pond_refl, ocean_refl = parse_3cat(b, albedo_table)\n obs_refl[i, b] = ((ice_refl * fi) + (pond_refl * fp) + (ocean_refl * fo))\n\n tds_name = os.path.join(write_dir, 'modis_artificial_tds_3cat_random.hdf')\n write_hdf(tds_name, obs_refl, surf_frac)\n\n\ndef three_categories_determine(write_dir, albedo_table):\n '''\n Creates the training dataset with three surface categories\n (using all possible fraction combinations)\n :param write_dir:\n :return:\n '''\n surf_frac = exhaustive_3cat_dist()\n num_obs = len(surf_frac)\n\n obs_refl = np.zeros((num_obs, 6))\n\n for i in tqdm(range(num_obs)):\n fi, flp, fo = surf_frac[i]\n for b in range(6):\n ir, lpr, ocr = parse_3cat(b, albedo_table)\n obs_refl[i, b] = ((ir * fi) + (lpr * flp) + (ocr * fo))\n\n tds_name = os.path.join(write_dir, 'modis_artificial_tds_3cat_determine.hdf')\n write_hdf(tds_name, obs_refl, surf_frac)\n\n\n# All possibilities with 3 surfaces\ndef exhaustive_3cat_dist():\n\n surf_frac = []\n for i in range(101):\n for m in range(101-i):\n if (m) / float(m+i+0.1) > 0.65:\n continue\n o = 100 - (i + m)\n surf_frac.append([i, m, o])\n\n surf_frac = np.divide(surf_frac, 100)\n print(np.shape(surf_frac))\n\n return surf_frac\n\n\ndef parse_3cat(b, albedo_table):\n ice_refl = albedo_table[b, 0] # Cold Snow\n pond_refl = albedo_table[b, 5] # Early Ponds\n ocean_refl = albedo_table[b, 7]\n\n return ice_refl, pond_refl, ocean_refl\n\n\n\ndef calc_subfrac_v1():\n # Subsurface distribution\n sub_frac_ice = sample_surf_dist(1, 5)[0]\n while sub_frac_ice[4] > 0.3 or sub_frac_ice[3] > 0.2:\n sub_frac_ice = sample_surf_dist(1, 5)[0]\n sub_frac_pond = sample_surf_dist(1, 4)[0]\n\n return sub_frac_ice, sub_frac_pond\n\ndef parse_v1(b, albedo_table, sub_frac_ice, sub_frac_pond):\n # Sample possible subsurfaces to find ice reflectance\n ice_refl = 0\n ice_refl += (albedo_table[b, 0] * sub_frac_ice[0]) # Cold Snow\n ice_refl += (albedo_table[b, 1] * sub_frac_ice[1]) # Melting Snow\n ice_refl += (albedo_table[b, 2] * sub_frac_ice[2]) # Deter. Melting Ice\n ice_refl += (albedo_table[b, 3] * sub_frac_ice[3]) # Undetr Melting Ice\n ice_refl += (albedo_table[b, 7] * sub_frac_ice[4]) # Dirty Ice\n\n # Sample possible subsurfaces to find pond reflectance\n pond_refl = 0\n pond_refl += (albedo_table[b, 4] * sub_frac_pond[0]) # B-G ice\n pond_refl += (albedo_table[b, 5] * sub_frac_pond[1]) # EMP\n pond_refl += (albedo_table[b, 6] * sub_frac_pond[2]) # LMP\n # Add some more darkness to the pond options\n pond_refl += (albedo_table[b, 8] * sub_frac_pond[3]) * 2. # Ocean\n\n # Ocean has no subsurfaces\n ocean_refl = albedo_table[b, 8]\n\n return ice_refl, pond_refl, ocean_refl\n\n# All possibilities with 4 surfaces\ndef sample_surf_dist_v4():\n\n surf_frac = []\n for i in range(101):\n for lm in range(101-i):\n for dm in range(101-lm-i):\n if (lm+dm) / float(lm+dm+i+0.1) > 0.65:\n continue\n o = 100 - (i + lm + dm)\n surf_frac.append([i, lm, dm, o])\n\n surf_frac = np.divide(surf_frac, 100)\n print(np.shape(surf_frac))\n for i in range(2400,2410):\n print(surf_frac[i], sum(surf_frac[i]))\n\n return surf_frac\n\n\n\ndef parse_v4(b, albedo_table):\n # Sample possible subsurfaces to find ice reflectance\n ice_refl = (albedo_table[b, 1]) # Melting Snow\n\n # Sample possible subsurfaces to find pond reflectance\n lpond_refl = (albedo_table[b, 4]) # Light Ponds\n\n dpond_refl = (albedo_table[b, 7])\n\n # Ocean has no subsurfaces\n ocean_refl = albedo_table[b, 8]\n\n return ice_refl, lpond_refl, dpond_refl, ocean_refl\n\n\ndef sample_surf_dist(num_obs, num_surf):\n '''\n Establishes the sample surface distribution\n :param num_obs: Number of sample observations\n :return: List of randomly generated surface distributions\n '''\n surf_frac = np.zeros((num_obs, num_surf))\n for i in range(num_obs):\n rlist = [0 for s in range(num_surf + 1)]\n rlist[0] = 0\n rlist[-1] = 1000\n\n # Establish the sampled surface distribution\n for s in range(num_surf - 1):\n rlist[s+1] = random.randint(0, 1000)\n\n rlist.sort()\n\n for s in range(num_surf - 1):\n surf_frac[i, s] = (rlist[s + 2] - rlist[s + 1]) / 1000\n\n surf_frac[i, num_surf-1] = rlist[1] / 1000.\n\n return surf_frac\n\n\ndef load_albedo(root_dir, filename):\n\n # Read albedo data 7 MODIS 8 WV\n albedo_table = np.zeros((8, 9))\n csvfname = os.path.join(root_dir, filename)\n with open(csvfname) as csvfile:\n csv_reader = csv.reader(csvfile, delimiter=',')\n for row in csv_reader:\n b = int(row[0])-1\n for c in range(1, len(row)):\n albedo_table[b, c - 1] = float(row[c])\n\n print(albedo_table)\n return albedo_table\n\n\ndef write_hdf(fname, features, labels):\n tds_file = h5py.File(fname, 'w')\n tds_file.create_dataset(\"features\", data=features)\n tds_file.create_dataset(\"labels\", data=labels)\n tds_file.close()\n\n\nif __name__ == '__main__':\n main()","sub_path":"create_sim_tds.py","file_name":"create_sim_tds.py","file_ext":"py","file_size_in_byte":8855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"652084789","text":"import click\n\nGLOBAL_VARABLE = 100\n\n\n@click.command()\n@click.option('--count', default=1, help='Number of greetings.')\n@click.option('--name', prompt='Your name',\n help='The person to greet.')\n\n@click.option('--do_magic', is_flag=True,\n help='Enables magic option')\ndef hello(count, name, do_magic):\n \"\"\"Simple program that greets NAME for a total of COUNT times.\"\"\"\n if do_magic:\n print('magic option enabled!')\n\n for x in range(count):\n click.echo('Hello %s!' % name)\n\n\n\n\n\nif __name__ == '__main__':\n x= 5\n hello()\n\n print(GLOBAL_VARABLE)","sub_path":"my_stuff/click_parser.py","file_name":"click_parser.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"304570065","text":"#\n# COPYRIGHT:\n#\t The Leginon software is Copyright 2003\n#\t The Scripps Research Institute, La Jolla, CA\n#\t For terms of the license agreement\n#\t see http://ami.scripps.edu/software/leginon-license\n#\nimport calibrator\nimport calibrationclient\nimport event, leginondata\nimport node\nimport gui.wx.DoseCalibrator\nimport time\nfrom wx import CallAfter\n\nclass DoseCalibrator(calibrator.Calibrator):\n\t'''\n\tcalibrate the camera sensitivity and other dose measurements\n\t'''\n\tpanelclass = gui.wx.DoseCalibrator.Panel\n\tsettingsclass = leginondata.DoseCalibratorSettingsData\n\tdefaultsettings = calibrator.Calibrator.defaultsettings\n\tdefaultsettings.update({\n\t\t'beam diameter': 0.16,\n\t\t'scale factor': 0.88,\n\t})\n\tdef __init__(self, id, session, managerlocation, **kwargs):\n\t\tcalibrator.Calibrator.__init__(self, id, session, managerlocation, **kwargs)\n\t\tself.calclient = calibrationclient.DoseCalibrationClient(self)\n\t\tself.results = {}\n\t\tself.sens = None\n\t\tself.start()\n\n\tdef uiMeasureDoseRate(self):\n\t\tif self.initInstruments():\n\t\t\treturn\n\t\ttry:\n\t\t\tself.instrument.tem.MainScreenPosition = 'down'\n\t\t\ttime.sleep(2)\n\t\t\tself.logger.info('screen down')\n\t\texcept:\n\t\t\tself.logger.info('screen down failed (may be unsupported)')\n\t\tstatus = self.getCurrentAndMag()\n\t\tif status == 'error':\n\t\t\te = 'Unable to measure dose rate: unable to access instrument'\n\t\t\tself.logger.error(e)\n\t\t\treturn\n\n\t\tscreen_mag = self.results['screen magnification']\n\t\tbeam_current = self.results['beam current']\n\t\tbeam_diameter = self.settings['beam diameter']\n\t\tdoserate = self.calclient.dose_from_screen(screen_mag, beam_current, beam_diameter)\n\t\tself.results['dose rate'] = doserate\n\t\tCallAfter(self.panel.dialog.scrsettings._setDoseResults, self.results)\n\n\tdef getCurrentAndMag(self):\n\t\tif self.initInstruments():\n\t\t\treturn 'error'\n\t\ttry:\n\t\t\tscope = self.instrument.getData(leginondata.ScopeEMData)\n\t\texcept:\n\t\t\treturn 'error'\n\t\tmag = scope['main screen magnification']\n\t\tcurrent = scope['screen current']\n\t\tscale = self.settings['scale factor']\n\t\tself.results['screen magnification'] = mag\n\t\tself.results['beam current'] = current * scale\n\t\treturn 'ok'\n\n\tdef acquireImage(self):\n\t\tif self.initInstruments():\n\t\t\tself.panel.acquisitionDone()\n\t\t\treturn\n\t\ttry:\n\t\t\tself.instrument.tem.MainScreenPosition = 'up'\n\t\t\ttime.sleep(2)\n\t\t\tself.logger.info('screen up')\n\t\texcept:\n\t\t\tself.logger.info('screen up failed (may be unsupported)')\n\t\treturn calibrator.Calibrator.acquireImage(self)\n\n\tdef uiCalibrateCamera(self):\n\t\timdata = self.acquireImage()\n\t\tif 'dose rate' not in self.results or self.results['dose rate'] is None:\n\t\t\te = 'Unable to calibrate camera sensitivity: no dose measurement'\n\t\t\tself.logger.error(e)\n\t\t\treturn\n\t\ttry:\n\t\t\tsens = self.calclient.sensitivity_from_imagedata(imdata,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.results['dose rate'])\n\t\texcept ValueError:\n\t\t\te = 'Unable to calibrate camera sensitivity: invalid dose measurement'\n\t\t\tself.logger.error(e)\n\t\t\treturn\n\n\t\tself.sens = sens\n\t\tht = imdata['scope']['high tension']\n\t\tself.calclient.storeSensitivity(ht, sens)\n\t\tCallAfter(self.panel.dialog.scrsettings._setSensitivityResults, sens)\n\n\tdef onSetSensitivity(self,sens):\n\t\ttry:\n\t\t\tscope = self.instrument.getData(leginondata.ScopeEMData)\n\t\texcept:\n\t\t\treturn None\n\t\tht = scope['high tension']\n\t\tif sens:\n\t\t\tself.calclient.storeSensitivity(ht,sens)\n\t\t\tself.logger.info('Camera sensitivity saved as %.3f counts/e and %d kV' %(sens,ht/1000))\n\t\telse:\n\t\t\tself.logger.warning('Enter a pre-measured value before saving')\n\t\treturn\n\tdef abortCalibration(self):\n\t\traise NotImplementedError\n\n\tdef screenDown(self):\n\t\t# check if screen is down\n\t\tself.instrument.MainScreenPosition = 'down'\n\t\ttime.sleep(1)\n\n\tdef screenUp(self):\n\t\t# check if screen is down\n\t\tself.instrument.MainScreenPosition = 'up'\n\t\ttime.sleep(1)\n\n","sub_path":"leginon/dosecalibrator.py","file_name":"dosecalibrator.py","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"273721351","text":"import logging\n\nimport bar\n\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\nhandler = logging.StreamHandler()\nhandler.setLevel(logging.DEBUG)\n\nformatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\n\nlogger.addHandler(handler)\n\nlogger.debug('msg')\nbar.log()\n","sub_path":"standard-library/logging/foo.py","file_name":"foo.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"401199580","text":"import socket\nimport functions\nimport json\nrec_sock = socket.socket()\nrec_sock.bind(('', 8888))\nrec_sock.listen(100)\n\nwhile True:\n conn, addr = rec_sock.accept()\n print(addr)\n data = conn.recv(2048)\n print(data)\n if not data:\n break\n request = json.loads(data)\n if request['request_type'] == 'checkName':\n if functions.checkName(request['checkName']['nickname']):\n conn.send(b'true')\n else:\n conn.send(b'false')\n elif request['request_type'] == 'registration':\n functions.registration(request['registration']['nickname'], request['registration']['email'])\n conn.send(b'registration confirmed')\n elif request['request_type'] == 'getNearest':\n a = 1\n\n\n\n\n\n\n\n\n\n\n\n answer = input()\n conn.send(answer.encode(\"utf-8\"))\n conn.close()\n","sub_path":"srever.py","file_name":"srever.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"351459348","text":"from collections import defaultdict\nimport glob\nimport logging\nimport os\nimport tempfile\n\nfrom PIL import Image, ImageColor\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\nfrom eulfedora.util import RequestFailed\nimport magic\nfrom pdfminer.pdfparser import PDFParser\nfrom pdfminer.pdfdocument import PDFDocument, PDFNoOutlines\nfrom pdfminer.pdfpage import PDFPage\n\nfrom readux.books import digwf\nfrom readux.books.models import PageV1_0\nfrom readux.collection.models import Collection\nfrom readux.fedora import ManagementRepository\nfrom readux.utils import md5sum\n\nlogger = logging.getLogger(__name__)\n\n\nclass BasePageImport(BaseCommand):\n '''Local extension of :class:`django.core.management.base.BaseCommand` with\n common logic for importing covers and book pages'''\n\n #: flag to indicate dry run/noact mode; set by :meth:`setup` based on args\n dry_run = False\n #: verbosity level; set by :meth:`setup` based on command-line arguments\n verbosity = None\n #: normal verbosity level\n v_normal = 1\n #: :class:`readux.books.digwf.Client`, initialized in :meth:`setup`\n digwf_client = None\n #: :class:`readux.fedora.ManagementRepository`, initialized in :meth:`setup`\n repo = None\n #: defaultdict to track stats about what has been done, errors, etc\n stats = defaultdict(int)\n\n # optional override paths for image and ocr input (single volume only)\n image_path = None\n ocr_path = None\n pdf_path = None\n\n def setup(self, **options):\n '''common setup: initialze :attr:`digwf_client` and :attr:`repo` and\n set verbosity level based on user options.'''\n self.dry_run = options.get('dry_run', False)\n self.verbosity = int(options.get('verbosity', self.v_normal))\n\n if not hasattr(settings, 'DIGITIZATION_WORKFLOW_API'):\n raise CommandError('DIGITIZATION_WORKFLOW_API is not configured in Django settings')\n\n self.digwf_client = digwf.Client(settings.DIGITIZATION_WORKFLOW_API)\n self.repo = ManagementRepository()\n # double-check the repo connection here so we can report the error cleanly,\n # rather than trying to catch the first time fedora is hit\n try:\n self.repo.api.describeRepository()\n except Exception as err:\n raise CommandError('Error connecting to Fedora at %s: %s' % \\\n (settings.FEDORA_ROOT, err))\n\n def pids_by_collection(self, pid):\n coll = self.repo.get_object(pid, type=Collection)\n if not coll.exists:\n self.stdout.write('Collection %s does not exist or is not accessible' % \\\n pid)\n\n if not coll.has_requisite_content_models:\n self.stdout.write('Object %s does not seem to be a collection' % \\\n pid)\n\n # NOTE: this approach may not scale for large collections\n # if necessary, use a sparql query to count and possibly return the objects\n # or else sparql query query to count and generator for the objects\n # this sparql query does what we need:\n # select ?vol\n # WHERE {\n # ?book .\n # ?vol ?book\n #}\n volumes = []\n for book in coll.book_set:\n volumes.extend(book.volume_set)\n\n return volumes\n\n\n def is_usable_volume(self, vol):\n # if object does not exist or cannot be accessed in fedora, skip it\n if not vol.exists:\n self.stats['errors'] += 1\n if self.verbosity >= self.v_normal:\n self.stdout.write('%s does not exist' % vol.pid)\n return False\n # if object is not a volume, skip it\n elif not vol.has_requisite_content_models:\n self.stats['errors'] += 1\n if self.verbosity >= self.v_normal:\n self.stdout.write('%s is not a Volume object' % vol.pid)\n return False\n\n return True\n\n def find_page_images(self, vol):\n '''Look up an item in the DigWf by pid (noid) and find display\n images. Returns a tuple of the list of images found (empty if none\n were found) and the info returned by :class:`readux.books.digwf.Client`.\n\n Raises :class:`TooManyDigWFRecords` or :class:`NoDigWFRecords` if the\n digwf api returns too many or not enough records.\n '''\n images = []\n vol_info = None\n\n # if all path overrides are set, don't bother querying digwf\n if self.image_path and self.ocr_path and self.pdf_path:\n vol_info = digwf.Item()\n\n else:\n # lookup in digwf by pid (noid)\n info = self.digwf_client.get_items(pid=vol.noid)\n if info.count != 1:\n if self.verbosity >= self.v_normal:\n if info.count > 1:\n self.stdout.write(\"Error: Found more than one (%d) DigWF record for %s. This shouldn't happen!\" \\\n % (info.count, vol.pid))\n else:\n if self.verbosity >= self.v_normal:\n self.stdout.write(\"Error: No information found in DigWF for %s\" % vol.pid)\n\n # nothing to do\n return images, vol_info\n\n vol_info = info.items[0]\n logger.debug(\"image path for %s : %s\",\n vol.pid, vol_info.display_image_path)\n\n if not vol_info.display_image_path:\n self.stdout.write('Error: no display image path set for %s' % vol.pid)\n # no images can possibly be found\n return [], vol_info\n\n # override/populate volume info based on any override paths\n if self.image_path:\n vol_info.display_image_path = self.image_path\n if self.ocr_path:\n vol_info.ocr_file_path = self.ocr_path\n if self.pdf_path:\n vol_info.pdf = self.pdf_path\n\n image_path = vol_info.display_image_path\n # look for JPEG2000 images first (preferred format)\n images = glob.glob(os.path.join(image_path, '*.jp2'))\n # if not found in base display path, check for a JP2 subdir\n if not len(images):\n images = glob.glob(os.path.join(image_path, 'JP2', '*.jp2'))\n # if jp2s were not found in either location, look for tiffs\n if not len(images):\n images = glob.glob('%s/*.tif' % image_path)\n\n # tif variant - in some cases, extension is upper case\n if not len(images):\n images = glob.glob('%s/*.TIF' % image_path)\n\n # if neither jp2s nor tiffs were found, look for jpgs\n if not len(images):\n images = glob.glob('%s/*.jpg' % image_path)\n\n # make sure the files are sorted; images are expected to be named\n # so that they are ordered in page-sequence when sorted\n images.sort()\n\n if not len(images):\n self.stdout.write('Error on %s: no files matching *.jp2, *.tif, *.TIF, or *.jpg found for %s' % \\\n (vol.pid, image_path))\n\n # images could be empty list if no matches were found\n return images, vol_info\n\n #: how many pages in to look for a cover (0-based)\n cover_range = 7\n\n def identify_cover(self, images, pdf):\n '''Attempt to identify the image that should be used as the primary image\n for this volume. Use PDF outline information when avialable; otherwise,\n look through the first few images and select the first non-blank one.\n\n Returns a tuple of the image filename and the index where it\n was found.\n\n :param images: list of image file paths for this volume\n :param pdf: path to the pdf file for this volume\n '''\n coverindex = self.pdf_cover(pdf, images)\n # if a cover file index was identified via PDF outline, use that\n if coverindex is not None:\n return images[coverindex], coverindex\n\n coverfile = coverindex = None\n\n for index in range(0, self.cover_range):\n imgfile = images[index]\n # first non-blank page should be the cover\n if not self.is_blank_page(imgfile):\n coverfile = imgfile\n coverindex = index\n break\n\n return coverfile, coverindex\n\n def pdf_cover(self, pdf, images):\n '''Attempt to use embedded outline information in the PDF to determine\n which image to use as the cover or primary image for the volume.\n\n :param pdf: path to the pdf file for this volume\n :param images: list of image file paths for this volume\n '''\n with open(pdf, 'rb') as pdf_file:\n parser = PDFParser(pdf_file)\n document = PDFDocument(parser)\n try:\n outlines = document.get_outlines()\n logger.debug('PDF %s includes outline information, using for cover identification',\n pdf)\n except PDFNoOutlines:\n logger.debug('PDF %s does not include outline information', pdf)\n return None\n\n # generate a dictionary of page object id and zero-based page number\n pages = dict((page.pageid, pageno) for (pageno, page)\n in enumerate(PDFPage.create_pages(document)))\n\n possible_coverpages = []\n page_count = 0\n for (level, title, dest, a, se) in outlines:\n\n # NOTE: some LSDI PDFs trigger a maximum recursion error in\n # pdfminer; try to avoid this by bailing out after processing\n # a set number of outline items\n # caveat: outline entries are not necessarily returned in order\n page_count += 1\n if page_count > 15:\n break\n\n # title is the label of the outline element\n\n # dest is the target page object; apparently in some cases this can be None ?\n # if so, skip it\n if dest is None:\n continue\n\n # we can probably use either Cover or Title Page; there\n # may be multiple Covers (for back cover)\n if title.lower() in ['cover', 'title page']:\n # determine page number for the reference\n page_num = pages[dest[0].objid]\n\n # check if the page is blank, as seems to be happening in some\n # cases for what is labeled as the cover\n try:\n img = images[page_num]\n except IndexError:\n logger.error('Not enough images for requested page number %s',\n page_num)\n continue\n\n if self.is_blank_page(img):\n logger.debug('PDF outline places %s at page %s but it is blank', title, page_num)\n # do NOT include as a possible cover page\n else:\n # non-blank: include as possible cover page\n logger.debug('PDF outline places %s at page %s', title, page_num)\n possible_coverpages.append(page_num)\n\n if possible_coverpages:\n # for now, just return the lowest page number, which should be\n # the first cover or title page if cover is blank\n return sorted(possible_coverpages)[0]\n\n def is_blank_page(self, imgfile):\n '''Check whether or not a specified image is blank. Currently uses\n :mod:`Pillow` to determine if the image has a percentage of white over\n some threshold *or* if the image is 100% black (which seems to occur\n in some cases with two-tone jpeg2000 images).\n\n :param imgfile: path to the image to be checked\n :returns: boolean\n '''\n\n # in some cases, there are empty files; consider empty == blank\n if os.path.getsize(imgfile) == 0:\n logger.debug('%s is an empty file; considering blank', imgfile)\n return True\n\n img = Image.open(imgfile, mode='r')\n try:\n colors = img.getcolors()\n except Exception as err:\n logger.error('Error loading image %s: %s', imgfile, err)\n # for now, going to return true/blank if the image can't be read\n # (but this is an assumption)\n return True\n\n # returns a list of (count, color)\n # getcolors returns None if maxcolors (default=256) is exceeded\n if colors is None:\n colors = img.getcolors(1000000) # set maxcolors ridiculously high\n # NOTE: colors still could be none at this point\n # For now, assuming if we can't get colors that the image is *not* blank\n if colors is None:\n logger.warn('%s has too many colors for retrieval; assuming non-blank', imgfile)\n return False\n\n # color white, in various formats, for comparing against colors pulled from the image\n whites = [\n 255, # not sure how this matches anything, but must be in some images\n ImageColor.colormap['white'],\n ImageColor.getrgb('white')\n ]\n\n blacks = [\n 0, # shows up as color value in some black/white images\n ImageColor.colormap['black'],\n ImageColor.getrgb('black')\n ]\n\n # percent of the page that needs to be white for it to be\n # considered blank\n blank_page_threshold = 100.0\n\n total, white_total, black_total = 0, 0, 0\n for count, color in colors:\n total += count\n if color in whites:\n white_total = count\n if color in blacks:\n black_total = count\n percent_white = (float(white_total) / float(total)) * 100\n percent_black = (float(black_total) / float(total)) * 100\n logger.debug('%s is %.1f%% percent white and %.1f%% black', imgfile,\n percent_white, percent_black)\n\n # if percent white is over configured threshold OR if image is\n # completely black, consider it to be blank\n if percent_white >= blank_page_threshold or percent_black == 100.0:\n return True\n else:\n return False\n\n def convert_to_jp2(self, imgfile):\n '''Convert an image file to JPEG2000 (if it isn't already a JP2).\n\n Returns tuple of image file path and boolean indicating if the\n path refers to a tempfile that should be deleted after processing\n is done.\n '''\n img = Image.open(imgfile, mode='r')\n # if already jpeg200, do nothing\n if img.format == 'JPEG2000':\n return imgfile, False\n\n # 1-bit tiffs need to be converted before they can be saved as jp2\n if img.format == 'TIFF' and img.mode == '1':\n img = img.convert(mode='L')\n # generate tempfile to save new jp2\n tmp = tempfile.NamedTemporaryFile(prefix='readux-img-', suffix='.jp2',\n delete=False)\n img.save(tmp, format='jpeg2000')\n\n return tmp.name, True\n\n\n def ingest_page(self, imgfile, vol, vol_info, cover=False,\n pageindex=1, update=False, page=None):\n 'Create and ingest a page object *or* update an existing page image'\n\n # create the page image object and associate with volume\n # calculate text & position file names\n imgbase = os.path.basename(imgfile)\n imgbasename, sep, suffix = imgbase.partition('.')\n txtfile = os.path.join(vol_info.ocr_file_path, imgbasename) + '.txt'\n posfile = os.path.join(vol_info.ocr_file_path, imgbasename) + '.pos'\n\n # make sure text and position files exist; if they don't, bail out\n if not os.path.exists(txtfile):\n if self.verbosity >= self.v_normal:\n self.stdout.write('Error: text file %s does not exist; skipping\\n' % txtfile)\n return\n if not os.path.exists(posfile):\n if self.verbosity >= self.v_normal:\n self.stdout.write('Error: position %s file does not exist; skipping \\n' % posfile)\n return\n # if the image file is zero-size (which apparently happens sometimes?), skip\n if os.path.getsize(imgfile) == 0:\n if self.verbosity >= self.v_normal:\n self.stdout.write('Error: image file %s is zero-size; skipping \\n' % imgfile)\n return\n\n\n # If image is not already jpeg200, convert it before ingest\n imgfile, jp2_tmpfile = self.convert_to_jp2(imgfile)\n # NOTE: jp2 support in Pillow requires additional libraries,\n # TODO: document openjpeg installation!\n\n if self.verbosity > self.v_normal:\n self.stdout.write('Ingesting page %s' % imgfile)\n self.stdout.write(' text: %s\\n' % txtfile)\n\n if page is None:\n page = self.repo.get_object(type=PageV1_0)\n # object label based on volume label (ocm# + volume info)\n page.label = '%s page %d' % (vol.label, pageindex)\n # set the relation to the volume object\n page.volume = vol\n # set a dc:title based on volume title\n page.dc.content.title = '%s page %d' % (vol.dc.content.title, pageindex)\n\n # set page order\n page.page_order = pageindex\n logger.debug('page %d rels-ext:%s',\n pageindex, page.rels_ext.content.serialize(pretty=True))\n\n if not self.dry_run:\n # calculate checksums and mimetypes for ingest\n m = magic.Magic(mime=True)\n\n dsfiles = {\n page.image: imgfile,\n page.text: txtfile,\n page.position: posfile\n }\n open_files = []\n\n for ds, filepath in dsfiles.iteritems():\n # calculate checksum\n checksum = md5sum(filepath)\n # if this an update and the checksums match, don't modify the datastream\n if update and checksum == ds.checksum:\n continue\n ds.checksum = md5sum(filepath)\n logger.debug('checksum for %s is %s',\n filepath, ds.checksum)\n ds.checksum_type = 'MD5'\n\n # make sure image mimetype gets set correctly (should be image/jp2)\n # most reliable, general way to do this *should* be to\n # set mimetype based on mime magic\n mimetype = m.from_file(filepath)\n mimetype, separator, options = mimetype.partition(';')\n # If JPEG2000 is recognized as generic mimetype, override it\n if mimetype == 'application/octet-stream' and ds.id == PageV1_0.image.id:\n mimetype = 'image/jp2'\n\n # set datastream content\n openfile = open(filepath)\n open_files.append(openfile)\n ds.content = openfile\n\n # NOTE: removed code from readux v1 for optional file-uri based ingest\n\n try:\n # if this is not an update OR if the existing object has been\n # modified, ingest/update in Fedora\n ingested = False\n if not update or any([page.image.isModified(),\n page.text.isModified(), page.position.isModified()]):\n\n ingested = page.save('ingesting page image %d for %s' \\\n % (pageindex, page.volume.pid))\n verb = 'updated' if update else 'ingested'\n logger.debug('page %s %s', page.pid, verb)\n self.stats['pages'] += 1\n\n elif update:\n if self.verbosity >= self.v_normal:\n self.stdout.write('No updates needed for %s' % page.pid)\n\n # if a temporary file was created, remove it\n if jp2_tmpfile:\n logger.debug('removing temporary JPEG2000 file %s', imgfile)\n os.remove(imgfile)\n\n except RequestFailed as rf:\n self.stats['errors'] += 1\n self.stdout.write('Failed to ingest page image: %s\\n' % rf)\n ingested = False\n self.stats['page_errors'] += 1\n\n finally:\n # close any local files that were opened for ingest\n for of in open_files:\n of.close()\n\n # if ingesting a cover and ingest succeeded, update volume\n # object with cover relation *unless* this is an update\n if cover and ingested and not update:\n try:\n # set current page as primary image for this volume\n vol.primary_image = page\n vol.rels_ext.save('adding relation to cover page object')\n self.stats['updated'] += 1\n if self.verbosity > self.v_normal:\n self.stdout.write('Updated Volume %s with primary image relation to %s' % \\\n (vol.pid, page.pid))\n\n except RequestFailed as rf:\n self.stats['errors'] += 1\n self.stdout.write('Failed to update volume %s with relation to cover %s : %s' \\\n % (vol.pid, page.pid, rf))\n","sub_path":"readux/books/management/page_import.py","file_name":"page_import.py","file_ext":"py","file_size_in_byte":21649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"81272053","text":"\"\"\"\nLook at step sizes, conditioned on stuff.\n\n\"\"\"\n__author__ = \"Jack Goffinet\"\n__date__ = \"November 2018\"\n\nimport numpy as np\nimport umap\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LinearRegression\nplt.switch_backend('agg')\n\n\n\ndef get_embeddings_times(loader, model, alg='umap'):\n\t# First get latent representations.\n\tlatent = model.get_latent(loader, n=10**9)\n\t# Then collect times.\n\tassert(len(latent) == len(loader.dataset))\n\ttimes = np.zeros(len(latent))\n\ti = 0\n\tfor temp in loader:\n\t\tbatch_times = temp['time'].detach().numpy()\n\t\ta = np.min(batch_times)\n\t\tb = np.max(batch_times)\n\t\tif b > 26:\n\t\t\tprint('here', a, b)\n\t\t\tquit()\n\t\ttimes[i:i+len(batch_times)] = batch_times\n\t\ti += len(batch_times)\n\tperm = np.random.permutation(len(latent))\n\tlatent = latent[perm]\n\ttimes = times[perm]\n\t# Fit UMAP on a random subset, get embedding.\n\tif alg == 'umap':\n\t\ttransform = umap.UMAP(n_components=2, n_neighbors=20, min_dist=0.1, metric='euclidean')\n\telif alg == 'pca':\n\t\ttransform = PCA(n_components=10)\n\telse:\n\t\tprint('unidentified algorithm: ', alg)\n\t\tquit()\n\tprint(\"fitting gif\")\n\ttransform.fit(latent[:9000])\n\tprint(\"done\")\n\tembeddings = transform.transform(latent)\n\treturn embeddings, times\n\n\n\ndef make_step_size_plot(loader, model):\n\txmin, xmax, ymin, ymax, tmin, tmax = -7, 15, -7, 15, 15, 26\n\tembeddings, times = get_embeddings_times(loader, model, alg='umap')\n\tpca_embeddings, _ = get_embeddings_times(loader, model, alg='pca')\n\tp = np.argsort(times)\n\tembeddings = embeddings[p]\n\tpca_embeddings = pca_embeddings[p]\n\ttemp_embed = []\n\tfor i, embedding in enumerate(embeddings):\n\t\tif embedding[1] > 6.:\n\t\t\ttemp_embed.append(np.copy(pca_embeddings[i]))\n\tembeddings = np.array(temp_embed)\n\tdistances = np.sqrt(np.sum(np.power(np.diff(embeddings, axis=0), 2), axis=1))\n\tX = np.array(range(len(distances))).reshape(-1,1)\n\tplt.scatter(range(len(distances)), distances, c='k', s=1, alpha=0.08)\n\treg = LinearRegression().fit(X, distances)\n\tplt.plot([[0],[len(distances)]],reg.predict([[0],[len(distances)]]))\n\tplt.savefig('temp.pdf')\n\tplt.close('all')\n","sub_path":"plotting/step_sizes.py","file_name":"step_sizes.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"444143399","text":"# FR-EN translator using multiple methods\n\nimport sys\nimport os\n\ntry:\n from urllib.request import urlopen\nexcept ImportError:\n from urllib2 import urlopen\ntry:\n import simplejson as json\nexcept (ImportError):\n import json\nfrom urllib.parse import quote\nfrom bs4 import BeautifulSoup\nimport string\n# from wiktionaryparser import WiktionaryParser\nimport requests\n\nDICT_LANGUAGE = {\"de\": \"German\",\n \"en\": \"English\",\n \"fr\": \"French\",\n \"es\": \"Spanish\",\n \"pt\": \"Portuguese\",\n \"it\": \"Italian\",\n \"ru\": \"Russian\",\n \"ja\": \"Japanese\",\n \"zh\": \"Chinese\",\n \"pl\": \"Polish\",\n \"nl\": \"Dutch\",\n \"sv\": \"Swedish\",\n \"da\": \"Danish\",\n \"fi\": \"Finnish\",\n \"el\": \"Greek\",\n \"cs\": \"Czech\",\n \"ro\": \"Romanian\",\n \"hu\": \"Hungarian\",\n \"sk\": \"Slovak\",\n \"bg\": \"Bulgarian\",\n \"sl\": \"Slovene\",\n \"lt\": \"Lithuanian\",\n \"lv\": \"Latvian\",\n \"et\": \"Estonian\",\n \"mt\": \"Maltese\"\n }\n\nTYPES = {\"noun\": [\"noun, masculine\", \"noun, neuter\", \"noun, feminine\", \"noun\", 'noun,\\xa0masculine', \"noun,\\xa0neuter\",\n \"noun,\\xa0feminine\"],\n \"adjective\": [\"adjective\"],\n \"verb\": [\"verb\"]}\n\n\ndef linguee(word, from_l, to_l, type=None):\n \"\"\"\n crawl through linguee for a definition\n\n :param word: str\n :param from_l: str\n :param to_l: str\n :param type:\n :return:\n \"\"\"\n word = word.lower()\n from_l = DICT_LANGUAGE[from_l].lower() if len(from_l) == 2 else from_l.lower()\n to_l = DICT_LANGUAGE[to_l].lower() if len(to_l) == 2 else to_l.lower()\n\n short_from = from_l if len(from_l) == 2 else list(DICT_LANGUAGE.keys())[\n list(DICT_LANGUAGE.values()).index(from_l.capitalize())]\n short_to = to_l if len(to_l) == 2 else list(DICT_LANGUAGE.keys())[\n list(DICT_LANGUAGE.values()).index(to_l.capitalize())]\n\n linguee_link = \"http://www.linguee.com/{}-{}/search?source=auto&query={}\".format(from_l, to_l, quote(word))\n page = urlopen(linguee_link)\n soup = BeautifulSoup(page, \"lxml\")\n\n definitions = list()\n\n def1 = soup.find_all('a', class_=\"dictLink featured\")\n for element in def1:\n if bool(element.find_parent(class_='lemma_content')) * bool(element.find_parent(attrs={\n \"data-source-lang\": short_from.upper()})):\n if bool(type) and any(bool(element.parent.find(attrs={\"title\": t})) for t in TYPES[type]):\n definitions.append(element.get_text())\n elif not bool(type):\n definitions.append(element.get_text())\n\n def2 = soup.find_all('a', class_=\"dictLink\")\n for element in def2:\n if bool(element.find_parent(class_='lemma_content')) * bool(element.find_parent(\n attrs={\"data-source-lang\": short_from.upper()})) * bool(element.find_parent(\n class_='translation_group')) * bool(element.find_parent(class_='exact')):\n if bool(type) and any(bool(element.parent.find(attrs={\"title\": t})) for t in TYPES[type]):\n definitions.append(element.get_text())\n elif not bool(type):\n definitions.append(element.get_text())\n\n return definitions\n\n\nif __name__ == \"__main__\":\n lang_from = \"de\"\n lang_to = \"en\"\n key = \"Messwert\"\n\n definitions1 = linguee(key, lang_from, lang_to, \"noun\")\n definitions2 = list()\n for definition in definitions1:\n print(\"{},{},{}\".format(definition, lang_to, lang_from))\n definition_ = linguee(definition, lang_to, lang_from, \"noun\")\n definitions2.append(definition_)\n\n print(definitions2)\n","sub_path":"linguee2.py","file_name":"linguee2.py","file_ext":"py","file_size_in_byte":3856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"339517273","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 28 16:41:06 2018\n\n@author: kazuki.onodera\n\"\"\"\n\nimport gc, os\nfrom tqdm import tqdm\nimport pandas as pd\nimport sys\nsys.path.append(f'/home/{os.environ.get(\"USER\")}/PythonLibrary')\nimport lgbextension as ex\nimport lightgbm as lgb\nfrom multiprocessing import cpu_count, Pool\n#from glob import glob\nimport count\nimport utils_cat\nimport utils\nutils.start(__file__)\n#==============================================================================\n\nHEAD = 160000\n\nSEED = 71\n\nparam = {\n 'objective': 'binary',\n 'metric': 'auc',\n 'learning_rate': 0.01,\n 'max_depth': 6,\n 'num_leaves': 63,\n 'max_bin': 255,\n \n 'min_child_weight': 10,\n 'min_data_in_leaf': 150,\n 'reg_lambda': 0.5, # L2 regularization term on weights.\n 'reg_alpha': 0.5, # L1 regularization term on weights.\n \n 'colsample_bytree': 0.7,\n 'subsample': 0.5,\n 'nthread': 16,\n# 'nthread': cpu_count(),\n 'bagging_freq': 1,\n 'verbose':-1,\n 'seed': SEED\n }\n\n\n\nuse_files = ['train_f0', 'train_f101']\n\n#REMOVE_FEATURES = ['f023', 'f024']\n\n# =============================================================================\n# load\n# =============================================================================\n\nfiles = utils.get_use_files(use_files, True)\n\n#tmp = []\n#for f in files:\n# sw = False # skip switch\n# for r in REMOVE_FEATURES:\n# if r in f:\n# sw = True\n# break\n# if not sw:\n# tmp.append(f)\n#files = tmp\n\nprint('features:', len(files))\n\n\n\nX = pd.concat([\n pd.read_feather(f).head(HEAD) for f in tqdm(files, mininterval=60)\n ], axis=1)\ny = utils.read_pickles('../data/label').head(HEAD).TARGET\n\n\nif X.columns.duplicated().sum()>0:\n raise Exception(f'duplicated!: { X.columns[X.columns.duplicated()] }')\nprint('no dup :) ')\nprint(f'X.shape {X.shape}')\n\n#X = X.rank(method='dense')\ngc.collect()\n\nCAT = list( set(X.columns)&set(utils_cat.ALL))\n\n# =============================================================================\n# imp\n# =============================================================================\ndtrain = lgb.Dataset(X, y, categorical_feature=CAT )\n#model = lgb.train(param, dtrain, len(ret['auc-mean']))\nmodel = lgb.train(param, dtrain, 2000)\nimp = ex.getImp(model).sort_values(['gain', 'feature'], ascending=[False, True])\n\n\nimp.to_csv(f'LOG/imp_{__file__}.csv', index=False)\n\n#def multi_touch(arg):\n# os.system(f'touch \"../feature_unused/{arg}.f\"')\n#\n#\n#col = imp[imp['split']==0][imp['feature'].str.startswith('f1')]['feature'].tolist()\n#pool = Pool(cpu_count())\n#pool.map(multi_touch, col)\n#pool.close()\n\n\n#==============================================================================\nutils.end(__file__)\n#utils.stop_instance()\n\n\n\n\n","sub_path":"Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/py/181_imp.py","file_name":"181_imp.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"643507113","text":"\"\"\"Test ONVIF diagnostics.\"\"\"\nfrom homeassistant.core import HomeAssistant\n\nfrom . import (\n FIRMWARE_VERSION,\n MAC,\n MANUFACTURER,\n MODEL,\n SERIAL_NUMBER,\n setup_onvif_integration,\n)\n\nfrom tests.components.diagnostics import get_diagnostics_for_config_entry\nfrom tests.typing import ClientSessionGenerator\n\n\nasync def test_diagnostics(\n hass: HomeAssistant, hass_client: ClientSessionGenerator\n) -> None:\n \"\"\"Test generating diagnostics for a config entry.\"\"\"\n\n entry, _, _ = await setup_onvif_integration(hass)\n\n diag = await get_diagnostics_for_config_entry(hass, hass_client, entry)\n\n assert diag == {\n \"config\": {\n \"entry_id\": \"1\",\n \"version\": 1,\n \"domain\": \"onvif\",\n \"title\": \"Mock Title\",\n \"data\": {\n \"name\": \"TestCamera\",\n \"host\": \"**REDACTED**\",\n \"port\": 80,\n \"username\": \"**REDACTED**\",\n \"password\": \"**REDACTED**\",\n \"snapshot_auth\": \"digest\",\n },\n \"options\": {\"extra_arguments\": \"-pred 1\", \"rtsp_transport\": \"tcp\"},\n \"pref_disable_new_entities\": False,\n \"pref_disable_polling\": False,\n \"source\": \"user\",\n \"unique_id\": \"aa:bb:cc:dd:ee\",\n \"disabled_by\": None,\n },\n \"device\": {\n \"info\": {\n \"manufacturer\": MANUFACTURER,\n \"model\": MODEL,\n \"fw_version\": FIRMWARE_VERSION,\n \"serial_number\": SERIAL_NUMBER,\n \"mac\": MAC,\n },\n \"capabilities\": {\n \"snapshot\": False,\n \"events\": False,\n \"ptz\": True,\n \"imaging\": True,\n },\n \"profiles\": [\n {\n \"index\": 0,\n \"token\": \"dummy\",\n \"name\": \"profile1\",\n \"video\": {\n \"encoding\": \"any\",\n \"resolution\": {\"width\": 640, \"height\": 480},\n },\n \"ptz\": None,\n \"video_source_token\": None,\n }\n ],\n },\n \"events\": {\n \"pullpoint_manager_state\": {\n \"__type\": \"\",\n \"repr\": \"\",\n },\n \"webhook_manager_state\": {\n \"__type\": \"\",\n \"repr\": \"\",\n },\n },\n }\n","sub_path":"tests/components/onvif/test_diagnostics.py","file_name":"test_diagnostics.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"149458822","text":"user_input = ''\nwhile user_input != 'q':\n try:\n weight = int(input('Enter weight (in pounds): '))\n if weight < 0:\n raise ValueError('Invalid weight.')\n\n height = int(input('Enter height (in inches): '))\n if height < 0:\n raise ValueError('Invalid height.')\n\n bmi = (float(weight) / float(height * height)) * 703\n print('BMI:', bmi)\n print('(CDC: 18.6-24.9 normal)\\n')\n # Source www.cdc.gov\n\n except ValueError as excpt:\n print(excpt)\n print('Could not calculate health info.\\n')\n\n except ZeroDivisionError as excpt:\n print(excpt)\n print('Could not calculate health info. \\n')\n \n user_input = input(\"Enter any key ('q' to quit): \")\n","sub_path":"DavidBChap10/10.3.2.py","file_name":"10.3.2.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"136660820","text":"import sys\nimport os\n\naliensh = \"/home/odjuvsla/Workspace/alice/alien/api/bin/aliensh\" # path to aliensh\ngridPath = \"/alice/sim/LHC10a18/\" # general path to data (in alien) (without run number)\nstdfileName = \"AliESDs.root\" # Filename to download\n\nrunNb = sys.argv[1] \nnumberOfEvents = int(sys.argv[2])\n\npath = gridPath + runNb\n\ntmpFilename = \"tmp_\" + runNb +\".txt\"\nscriptFilename =\"script_\" + runNb + \".txt\"\n\nif os.system(\"ls \" + runNb) != 0:\n command = \"mkdir \" + runNb\n os.system(command)\n\ncommand = aliensh + \" -c 'ls \" + path + \"' > \" + tmpFilename\n\n\nos.system(command)\n\ntmpfile = open(tmpFilename, \"r\")\n\nevNb = -1\n\nscriptFile = open(scriptFilename, \"w\")\n\ndirname = \"\"\n\nn = 0\nwhile 1:\n evNb = tmpfile.readline()\n if evNb == \"\" :\n break\n dirname = runNb + \"/\" + evNb.strip()\n if os.system(\"ls \" + dirname) != 0:\n command = \"mkdir \" + dirname\n os.system(command)\n command = \"cp \" + path + \"/\" + evNb.strip() + \"/\" + stdfileName+ \" file:\" + dirname + \"/\" + stdfileName+ \" \\n\"\n scriptFile.write(command)\n n = n+1\n if n > numberOfEvents:\n break\ntmpfile.close()\nscriptFile.close() \n\ncommand = aliensh + \" file:\" + scriptFilename\nos.system(command)\n\n","sub_path":"tools/getESDs.py","file_name":"getESDs.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"542992349","text":"# -*- coding: utf-8 -*-\n# $Id: setup.py 246084 2011-11-01 22:45:18Z glenfant $\n\"\"\"Packaging and distributing aws.zope2zcmldoc\"\"\"\n\nfrom setuptools import setup, find_packages\nimport os\n\n\ndef read(*names):\n here = os.path.dirname(os.path.abspath(__file__))\n path = os.path.join(here, *names)\n return open(path, 'r').read().strip()\n\nsetup(name='aws.zope2zcmldoc',\n version=read('aws', 'zope2zcmldoc', 'version.txt'),\n description=\"ZCML documentation browser for Zope 2\",\n long_description=(read('README.txt') + \"\\n\\n\" +\n read('docs', 'HISTORY.txt')),\n # Get more strings from\n # http://pypi.python.org/pypi?:action=list_classifiers\n classifiers=[\n \"Programming Language :: Python\",\n \"Framework :: Zope2\",\n \"Intended Audience :: Developers\",\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: GNU General Public License (GPL)\",\n \"Topic :: Software Development :: Documentation\"\n ],\n keywords='zope2 zcml documentation',\n author='Gilles lenfant',\n author_email='gilles.lenfant@alterway.fr',\n url='http://pypi.python.org/pypi/aws.zope2zcmldoc',\n license='GPL',\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['aws'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n # -*- Extra requirements: -*-\n ],\n entry_points=\"\"\"\n # -*- Entry points: -*-\n [z3c.autoinclude.plugin]\n target = plone\n \"\"\",\n )\n","sub_path":"pypi_install_script/aws.zope2zcmldoc-1.1.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"203257875","text":"# This program can be used to convert temperatures from Celsius to Fahrenheit and vice versa\r\n# Formulae : C = (F - 32) / 1.8\r\n# C = Temperature in Celsius\r\n# F = Temperature in Fahrenheit\r\n\r\ndef TempConvert():\r\n print (\"\\n\")\r\n print (\"Temprature Converter Application\")\r\n print (\"Designed & Developed by : Vishal Farakate\")\r\n\r\n opt = input (\"\"\"\r\n Select your option : \r\n 1. Celcius to Farenhite\r\n 2. Farenhite to Celcius\r\n \r\n \"\"\")\r\n\r\n if int(opt) == 1:\r\n C = input(\"Enter Temprature in Celcius (degree) : \")\r\n F = ((int(C))*1.8)+32\r\n print (\"Temprature in Farenhite is\",F,\"degree\")\r\n\r\n elif int(opt) == 2:\r\n F = input(\"Enter Temprature in Farenhite (degree) : \")\r\n C = ((int(F))-32)/1.8\r\n print (\"Temprature in Celcius is\",C,\"degree\")\r\n\r\n else:\r\n print (\"Invalid Input\")\r\n\r\nTempConvert()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Temprature_Converter.py","file_name":"Temprature_Converter.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"87273329","text":"import sys\ndef moon_weight():\n print('Please enter your current Earth weight')\n earth_weight = int(sys.stdin.readline())\n print('Please enter the amount your weight might increase each year')\n weightyear = float(sys.stdin.readline())\n print('Please enter the number of years')\n year = int(sys.stdin.readline())\n moon_multiplier = 0.165\n for x in range(0, year):\n moon_weight = (earth_weight+(weightyear*x))*moon_multiplier \n print('year %s = %s' % (x, moon_weight) )\n \nmoon_weight()\n","sub_path":"ch7ex3a.py","file_name":"ch7ex3a.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"569726775","text":"#modulos\nimport pygame\nfrom pygame.locals import *\n#constantes\nancho=600\nalto=400\nblanco=(255,255,255)\n\n\n#clases\n#----------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n#----------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n#funciones\n#----------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\ndef main():\n pygame.init()\n#creamos la ventana\n pantalla=pygame.display.set_mode((ancho,alto))\n pygame.display.set_caption(\"El Pingüino Loco\")\n#cargamos imagen de fondo\n fondo=pygame.image.load(\"fondo.jpg\").convert()\n hormiga=pygame.image.load(\"tux.png\").convert_alpha()\n#indicamos posicion de hormiga\n tux_y=320\n tux_x=0\n#indicamos donde va\n pantalla.blit(fondo,(0,0))\n pantalla.blit(hormiga,(tux_x,tux_y))\n#mostramos cambios en pantalla\n pygame.display.flip()\n#bucle principal del juego\n salir=False\n while salir!=True:\n for eventos in pygame.event.get():\n if eventos.type==pygame.QUIT:\n salir=True\n if eventos.type==pygame.KEYDOWN:\n if eventos.key==pygame.K_RIGHT:\n if tux_x<530:\n tux_x=tux_x+10\n pantalla.blit(fondo,(0,0))\n pantalla.blit(hormiga,(tux_x,tux_y))\n pygame.display.flip()\n if eventos.key==pygame.K_LEFT:\n if tux_x>0:\n tux_x=tux_x-10\n pantalla.blit(fondo,(0,0))\n pantalla.blit(hormiga,(tux_x,tux_y))\n pygame.display.flip()\n if eventos.key==pygame.K_UP:\n if tux_y>0:\n tux_y=tux_y-10\n pantalla.blit(fondo,(0,0))\n pantalla.blit(hormiga,(tux_x,tux_y))\n pygame.display.flip()\n if eventos.key==pygame.K_DOWN:\n if tux_y<320:\n tux_y=tux_y+10\n pantalla.blit(fondo,(0,0))\n pantalla.blit(hormiga,(tux_x,tux_y))\n pygame.display.flip()\n\n\n \n pygame.quit()\n\nif __name__=='__main__':\n pygame.init()\n main()\n\n","sub_path":"pygame1.py","file_name":"pygame1.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"442003655","text":"#!/usr/bin/python3\n\n# @Project = step_LeetCode\n# @File : 1019_Next_Greater_Node_In_Linked_List\n# @Author : TCY\n# @Time : 2019/4/25 15:38\n# @Email : tangcaiyuan@hust.edu.cn\n# @Software: PyCharm\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n\nclass Solution(object):\n def nextLargerNodes(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: List[int]\n \"\"\"\n \"\"\"维护逆序栈\"\"\"\n shu = []\n # 链表不方便维护先变list\n while head:\n shu.append(head.val)\n head = head.next\n # 逆序栈对应元素的位置\n sta = []\n res = [0 for _ in range(len(shu))]\n # 新值入栈,若小于栈顶,则结果指针跳过;若大于栈顶,则将该值对应的结果置为当前值。\n for i in range(len(shu)):\n while (len(sta)>0) and (shu[i] > shu[sta[-1]]):\n res[sta[-1]] = shu[i]\n sta.pop()\n sta.append(i)\n return res","sub_path":"Weekly_Contest/Weekly_Contest_130/1019_Next_Greater_Node_In_Linked_List.py","file_name":"1019_Next_Greater_Node_In_Linked_List.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"12908816","text":"import pandas as pd\nimport numpy as np\n\nraw = pd.read_csv(r\"cobe/data.csv\")\nkobe = raw[pd.notnull(raw['shot_made_flag'])]\n\ndrops = ['shot_id', 'team_id', 'team_name', 'shot_zone_area', 'shot_zone_range', 'shot_zone_basic',\n 'matchup', 'lon', 'lat', 'seconds_remaining', 'minutes_remaining',\n 'shot_distance', 'loc_x', 'loc_y', 'game_event_id', 'game_id', 'game_date']\nfor drop in drops:\n raw = raw.drop(drop, 1)\n\ncategorical_vars = ['action_type', 'combined_shot_type', 'shot_type', 'opponent', 'period', 'season']\nfor var in categorical_vars:\n # 将新生成的位示图数据加入raw\n # 1:连接方式,保持列数不变的列连接\n raw = pd.concat([raw, pd.get_dummies(raw[var], prefix=var)], 1)\n raw = raw.drop(var, 1)\n","sub_path":"1.the-life-of-kobe/__init2__.py","file_name":"__init2__.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"39840277","text":"# coding:utf-8\n\"\"\"\nOpenCV 对电脑摄像头开启,成像,关闭操作\n\"\"\"\n\nimport cv2\n\ncamera = cv2.VideoCapture(0)\n\nwhile True:\n ret, frame = camera.read()\n cv2.imshow('FRAME', frame)\n\n key = cv2.waitKey(1) & 0xFF\n if key == ord('q'):\n break\ncamera.release()\ncv2.destroyAllWindows()\n","sub_path":"cam_operates.py","file_name":"cam_operates.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"398013810","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\n\nimport csv\nimport statistics\nimport collections\nimport datetime\n\n\nclass Tableau:\n def __init__(self, entetes, data):\n self.entetes = entetes\n self.data = data\n\n @classmethod\n def from_csv(cls, fichier, contient_entetes=True, premiere_ligne=0):\n \"\"\"Charge l'ensemble d'un csv dans un Tableau.\n premiere_ligne 1ère ligne à prendre en compte dans le Tableau \n (entête ou donnée pure)\n contient_entetes ... \n \"\"\"\n try:\n with open(fichier, \"r\", encoding=\"utf-8\") as f:\n reader = csv.reader(f)\n data = [ligne for ligne in reader]\n except UnicodeDecodeError:\n with open(fichier, \"r\", encoding=\"iso-8859-15\") as f:\n reader = csv.reader(f)\n data = [ligne for ligne in reader]\n if contient_entetes:\n return cls(data[premiere_ligne], data[premiere_ligne+1:])\n else:\n return cls([\"\"*len(data[premiere_ligne])], data[premiere_ligne:])\n\n def to_csv(self, fichier):\n \"Sauvegarde un tableau dans fichier\"\n with open(fichier, \"w\") as f:\n writer = csv.writer(f, fieldnames=self.entetes)\n writer.writerows([ligne for ligne in self.data])\n \n ###\n # Manipuler le Tableau\n ###\n def filtrer_colonne(self, colonne, predicat):\n \"Filtrer le Tableau selon une colonne. Supprime si predicat(val) is True\"\n self.data = [ligne for ligne in self.data if predicat(ligne[colonne])]\n\n def filtrer(self, predicat):\n \"Filtrer le tableau en utilisant des critères sur une ligne\"\n self.data = [ligne for ligne in data if predicat(ligne)]\n\n def trier_par(self, colonne, reverse=False):\n \"Trier le tableau par une colonne\"\n self.data = sorted(self.data, key=lambda col: col[colonne], reverse=reverse)\n\n def traiter_colonne(self, colonne, fonction):\n \"\"\"Appliquer le traitement 'fonction' à toute la colonne 'colonne'.\n 'fonction' a pour argument le contenu et renvoie le résultat.\"\"\"\n for i in range(len(self.data)):\n self.data[i][colonne] = fonction(self.data[i][colonne])\n \n def add_colonne(self, entete, position=None, default=None):\n \"\"\"Ajouter une nouvelle colonne.\n Position : 0-> début, 1-> deuxième, etc…\"\"\"\n if position is None:\n position = len(self.entetes)\n self.entetes[position:position] = [entete]\n for i in range(len(self.data)):\n self.data[i][position:position] = [default]\n\n def del_colonne(self, colonne):\n \"Supprimer une colonne\"\n del self.entetes[colonne]\n for i in range(len(self.data)):\n del self.data[i][colonne]\n\n def move_colonne(self, de, a):\n if de==a:\n return\n self.add_colonne(self.entetes[de], position=a)\n if a \")\r\n# \r\n# if input == \"remove\":\r\n# graph.remove_edge(int(raw_input(\"cmd:>>>Enter edge ID: \")))\r\n# visualise.clear_figure()\r\n# visualise.draw_graph(graph)\r\n# path = graph.shortest_path(pathStart)\r\n# if path != -1:\r\n# visualise.draw_edges(path)\r\n# else: \r\n# print \"No route exists to any of the exits.\"\r\n# elif input == \"print\":\r\n# graph.print_graph()\r\n# elif input == \"graph\":\r\n# visualise.draw_graph(graph)\r\n# elif input == \"path\":\r\n# path = graph.shortest_path(pathStart)\r\n# if path != -1:\r\n# visualise.draw_edges(path)\r\n# else: \r\n# print \"No route exists to any of the exits.\"\r\n# elif input == \"begin\":\r\n# pathStart = raw_input(\"cmd:>>>Enter new start node: \")\r\n# elif input == \"reset\":\r\n# graph.reset_graph()\r\n# visualise.clear_figure()\r\n# visualise.draw_graph(graph)\r\n# elif input == \"clear\":\r\n# visualise.clear_figure()\r\n# elif input == \"exit\":\r\n# pass\r\n# else: \r\n# print \"Error: unknown command.\"\r\n# \r\n#visualise.close()\r\n\r\n","sub_path":"assignment2OO.pyw","file_name":"assignment2OO.pyw","file_ext":"pyw","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"89531604","text":"import datetime\nfrom model.account import Account\nfrom model.transaction import Transaction\n\n\ndef parse_client_header(record):\n record_type = record[0:2]\n account_no_hex = record[2:6]\n account_no_dec = int(account_no_hex, 16)\n transaction_records = int(record[6:])\n\n account = Account(account_no_dec, transaction_records)\n\n return account\n\ndef parse_client_transaction(record):\n record_type = record[0:2]\n transaction_date_str = record[2:17]\n\n transaction_date = datetime.datetime(\n int(transaction_date_str[4:8]), # year\n int(transaction_date_str[2:4]), # month\n int(transaction_date_str[0:2]), # day\n int(transaction_date_str[9:11]), # hour\n int(transaction_date_str[11:13]), # minute\n int(transaction_date_str[13:15]) # seconds\n )\n\n transaction_type = record[17]\n transaction_amount = float(record[18:37])\n third_party_account = int(record[37:])\n\n transaction = Transaction(transaction_date, transaction_type, transaction_amount, third_party_account)\n\n return transaction\n\n\n#\n# -- Test\n#\n\naccount1 = parse_client_header('CH1ABC0002')\n\ntransaction1 = parse_client_transaction('CT19032019:080414D000000000023.5500001431759372813465')\naccount1.add_transaction(transaction1)\n\ntransaction2 = parse_client_transaction('CT19032019:132508D000000000147.1400001948847238383813')\naccount1.add_transaction(transaction2)\n\nprint(account1)\n\n\n","sub_path":"FastTrack/BankingProject/16-packages/controller/transaction_parser.py","file_name":"transaction_parser.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"71156524","text":"__author__ = 'mike'\nimport sqlite3\nfrom utils import throw_exception\n\nclass Database:\n #Variables\n db = sqlite3.Connection\n cursor = sqlite3.Cursor\n wordsCached = False\n sourcesCached = False\n docacheWordsAns = False\n filling = False\n source_ID = {}\n ID_source = {}\n sourceID = 1\n word_ID = {}\n ID_word = {}\n wordIDs = []\n #Functions\n def connect(self, filename):\n try:\n self.db = sqlite3.connect(filename)\n self.cursor = self.db.cursor()\n except sqlite3.Error as e:\n throw_exception(e)\n return\n\n def set_source(self, source): #TODO: Минимизаровать кэш слов, в зависимости от выбраного текста\n if type(source) == str:\n if source == \"all\":\n sourceID = -1\n else:\n sourceID = self.get_source_id(source)\n if type(source) == int:\n sourceID = source\n self.sourceID = sourceID\n self.build_cache(\"words\")\n return\n\n def add_source(self, sourcename):\n if not self.exists(\"sources\", \"sourcename\", sourcename):\n id = self.last_id(\"sources\") + 1\n sql = \"INSERT INTO sources VALUES(\" + str(id) \\\n + \", \\'\" + sourcename + \"\\')\"\n try:\n self.cursor.execute(sql)\n self.db.commit()\n except sqlite3.Error as e:\n throw_exception(e)\n self.build_cache(\"sources\")\n else:\n print(\"This source already exists.\")\n return\n\n def add_word(self, word):\n sourceID = self.sourceID\n id = self.last_id(\"words\") + 1\n upper = 0\n dot = 0\n if str(word[0]).isupper():\n upper = 1\n if word[-1] in ['.', '?', '!', ';']:\n dot = 1\n sql = \"INSERT INTO words VALUES(\"\\\n + str(id) + \", \"\\\n + str(sourceID) + \", '\"\\\n + word + \"', \"\\\n + str(upper) + \", \"\\\n + str(dot) + \")\"\n try:\n self.cursor.execute(sql)\n self.db.commit()\n except sqlite3.Error as e:\n throw_exception(e)\n self.wordsCached = False\n return\n\n def add_chain(self, parent, chain):\n upper = 0\n dot = 0\n if str(chain[0]).isupper():\n upper = 1\n if chain[-1] in ['.', '?', '!', ';']:\n dot = 1\n parentID = 0\n id = self.last_id(\"chains\") + 1\n if type(parent) == str:\n parentID = self.get_word_id(parent)\n elif type(parent) == int:\n parentID = parent\n\n sql = \"INSERT INTO chains VALUES(\"\\\n + str(id) + \", \"\\\n + str(parentID) + \", \"\\\n + str(self.sourceID) + \", \"\\\n + \"\\'\" + chain + \"\\', \"\\\n + str(upper) + \", \"\\\n + str(dot) + \")\"\n try:\n self.cursor.execute(sql)\n self.db.commit()\n except sqlite3.Error as e:\n throw_exception(e)\n return\n\n def get_chains(self, parent, upper = False, dot = False):\n return\n\n def get_parents(self, chain, upper = False, dot = False):\n parents = []\n u = int(0 if upper == False else 1)\n d = int(0 if dot == False else 1)\n sql = \"SELECT ID_words FROM chains \"\\\n + \"WHERE ID_sources = \" + str(self.sourceID)\\\n +\" AND uppercase = \" + str(u)\\\n +\" AND dot = \" + str(d)\n try:\n for row in self.cursor.execute(sql):\n parents.append(row)\n except sqlite3.Error as e:\n throw_exception(e)\n return parents\n\n def get_words(self, id = \"-1\", upper = False, dot = False):\n u = int(0 if upper == False else 1)\n d = int(0 if dot == False else 1)\n words = []\n sql = \"SELECT * FROM words \"\n if(id != \"-1\"):\n sql += \"WHERE ID = \" + str(id)\n sql += \" AND uppercase = \" + str(u)\n sql += \" AND dot = \" + str(d)\n else:\n sql += \"WHERE uppercase = \" + str(u)\n sql += \" AND dot = \" + str(d)\n sql += \" AND ID_sources = \" + str(self.sourceID)\n try:\n for row in self.cursor.execute(sql):\n words.append(row[2])\n except sqlite3.Error as e:\n throw_exception(e)\n return words\n\n def get_source_id(self, source):\n if not self.sourcesCached:\n self.build_cache(\"sources\")\n res = 0\n try:\n res = self.source_ID[source]\n except IndexError as e:\n print(\"No source \\\"\"+source+\"\\\" in database. Falling back to 1:\\\"\" + str(self.ID_source[1]) + \"\\\"\\n\")\n throw_exception(e)\n return 1\n return self.source_ID[source]\n\n def get_word_id(self, word):\n wordID = -1\n if not self.filling:\n if self.wordsCached:\n return self.word_ID[word]\n else:\n self.build_cache(\"words\")\n return self.word_ID[word]\n else:\n sql = \"SELECT * FROM words WHERE word = \\'\" + word + \"\\'\"\n try:\n for row in self.cursor.execute(sql):\n wordID = row[0]\n except sqlite3.Error as e:\n throw_exception(e)\n return wordID\n\n def build_cache(self, table = \"all\"):\n ##########################\n ## making sources cache ##\n if table == \"sources\" or table == \"all\":\n self.ID_source.clear()\n self.source_ID.clear()\n sql = \"SELECT * FROM sources\"\n try:\n for row in self.cursor.execute(sql):\n self.source_ID[row[1]] = row[0]\n self.ID_source[row[0]] = row[1]\n except sqlite3.Error as e:\n throw_exception(e)\n self.sourcesCached = True\n ##########################\n ## making words cache ####\n if table == \"words\" or table == \"all\":\n self.ID_word.clear()\n self.word_ID.clear()\n self.wordIDs.clear()\n sql = \"SELECT * FROM words WHERE ID_sources = \" + str(self.sourceID)\n try:\n for row in self.cursor.execute(sql):\n self.word_ID[row[2]] = row[0]\n self.ID_word[row[0]] = row[2]\n self.wordIDs.append(row[0])\n except sqlite3.Error as e:\n throw_exception(e)\n self.wordsCached = True\n return\n\n def last_id(self, table):\n id = 0\n sql = \"SELECT MAX(ID) FROM \" + table\n try:\n for row in self.cursor.execute(sql):\n id = row[0]\n except sqlite3.Error as e:\n throw_exception(e)\n return int(id or 0) #id gets NoneType None if there are no rows in table\n\n def exists(self, table, column, value):\n word = False\n if table != \"sources\":\n word = True\n sql = \"SELECT * FROM \" + table + \" WHERE \" + column + \" = \\'\" + value + \"\\'\"\n if (self.sourceID > 0) and word :\n sql += \" AND ID_sources = \" + str(self.sourceID)\n\n try:\n for row in self.cursor.execute(sql):\n return True\n except sqlite3.Error as e:\n throw_exception(e)\n return False\n\n\n\n\n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":7417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"77303499","text":"#Customize this file and copy it to local_settings.py\n\n#Database - SERVER\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': './hh_db.sqlite', # Or path to database file if using sqlite3.\n 'USER': '', # Not used with sqlite3.\n 'PASSWORD': '', # Not used with sqlite3.\n 'HOST': '', # Set to empty string for localhost. Not used with sqlite3.\n 'PORT': '', # Set to empty string for default. Not used with sqlite3.\n }\n}\n\n\n#Don't send error reports... debug on page\nDEBUG = True\nSEND_BROKEN_LINK_EMAILS = False\n\nSITE_URL = 'http://127.0.0.1:8000' #Used for emails and the like\n\n#These must exist in SITE_DICT - see settings.py. Used to determine SITE_NAME, SITE_ID, SITE_URL, templates, etc.\nSITE_DOMAIN = 'hackerhouses.org'\n\nDEFAULT_FROM_EMAIL = 'human@hackerhouses.com'\nSERVER_EMAIL = 'admin@hackerhouses.com'\nEMAIL_HOST = 'bluwiki.com' \nEMAIL_USE_TLS = False\nEMAIL_PORT = 25\n\n\nINTERNAL_IPS = ('127.0.0.1','192.168.0.143','192.168.0.179', '67.180.35.30') \n\n#######\n#Cache#\n#######\n#CACHE_BACKEND = 'file:///var/tmp/django_cache?timeout=18000&cull_percentage=4'\nCACHE_BACKEND = 'locmem:///?max_entries=1000'\n","sub_path":"trunk/hackerhouses/local_settings.example.py","file_name":"local_settings.example.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"437772879","text":"class DataLoad:\n CHAR_SIZE = 24 * 3\n\n def ReadNumData():\n result = []\n rf = open('TFnum.mydat','rb')\n\n for n in range(1857):\n tmp = [0 for m in range(CHAR_SIZE * 8)]\n for i in range(CHAR_SIZE):\n foo = 128\n byte = ord(rf.read(1))\n for j in range(8):\n tmp[(i * 8) + j] = byte / foo\n byte %= foo\n foo /= 2\n result.append(tmp)\n rf.close()\n return result\n\n def ReadLabelData():\n result = []\n rf = open('TFnumL.mydat','rb')\n\n tmp = [0 for i in range(1857)]\n for n in range(1857):\n tmp[n] = ord(rf.read(1))\n result.append(tmp)\n rf.close()\n\n return result\n","sub_path":"NeuralNet/LearningData/DataLoad.py","file_name":"DataLoad.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"427226076","text":"# -*- coding:utf8 -*-\n\nimport random\nimport tensorflow as tf\nimport numpy as np\nimport sys,os\n\nfrom utils.LogHandler import LogHandler\nfrom utils.utils import load_train_valid_labels, batch_iter, valid_iter\n\nclass PALE_LIN(object):\n\n\tdef __init__(self, learning_rate, batch_size, n_input, device, files, log_file):\n\t\tif os.path.exists('log/'+log_file+'.log'):\n\t\t\tos.remove('log/'+log_file+'.log')\n\t\tself.logger = LogHandler(log_file)\n\n\t\tself.device = device\n\n\t\t# Parameters\n\t\tself.learning_rate = learning_rate\n\t\tself.batch_size = batch_size\n\t\tself.valid_prop = .9\n\t\tself.valid_sample_size = 9\n\n\t\tself.cur_epoch = 1\n\n\t\t# Network Parameters\n\t\tself.n_input = n_input # size of node embeddings\n\n\t\t# Set Train Data\n\t\tif not isinstance(files, list) and len(files)<3:\n\t\t\tself.logger.info('The alogrihtm needs files like [First Graph File, Second Graph File, Label File]')\n\t\t\treturn\n\n\t\t# tf Graph input\n\t\tself.lookup_f = dict()\n\t\tself.lookup_g = dict()\n\t\tself.look_back_f = list()\n\t\tself.look_back_g = list()\n\t\tself._read_train_dat(files[0], files[1], files[2]) # douban, weibo, label files\n\t\tself.valid_sample_size = min(min(self.valid_sample_size, len(self.look_back_f)-1), len(self.look_back_g)-1)\n\n\t\t# TF Graph Building\n\t\tself.sess = tf.Session()\n\t\tcur_seed = random.getrandbits(32)\n\t\tinitializer = tf.contrib.layers.xavier_initializer(uniform=False, seed=cur_seed)\n\t\twith tf.device(self.device):\n\t\t\twith tf.variable_scope(\"model\", reuse=None, initializer=initializer):\n\t\t\t\tself.mlp_weights()\n\t\t\t\tself.build_train_graph()\n\t\t\t\tself.build_valid_graph()\n\t\t\tself.sess.run(tf.global_variables_initializer())\n\n\tdef _read_labels(self, label_file):\n\t\tlabels = list()\n\t\twith open(label_file, 'r') as lb_handler:\n\t\t\tfor ln in lb_handler:\n\t\t\t\tln = ln.strip()\n\t\t\t\tif not ln:\n\t\t\t\t\tbreak\n\t\t\t\tlabels.append(ln.split())\n\t\treturn labels\n\n\tdef _read_embeddings(self, embed_file, lookup, look_back):\n\t\tembedding = list()\n\t\twith open(embed_file, 'r') as emb_handler:\n\t\t\tidx = 0\n\t\t\tfor ln in emb_handler:\n\t\t\t\tln = ln.strip()\n\t\t\t\tif ln:\n\t\t\t\t\telems = ln.split()\n\t\t\t\t\tif len(elems)==2:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tembedding.append(map(float, elems[1:]))\n\t\t\t\t\tlookup[elems[0]] = idx\n\t\t\t\t\tlook_back.append(elems[0])\n\t\t\t\t\tidx += 1\n\t\treturn np.array(embedding), lookup, look_back\n\n\tdef _read_train_dat(self, embed1_file, embed2_file, label_file):\n\t\tself.L = load_train_valid_labels(label_file, self.valid_prop)\n\t\tself.X, self.lookup_f, self.look_back_f = self._read_embeddings(embed1_file, self.lookup_f, self.look_back_f)\n\t\tself.Y, self.lookup_g, self.look_back_g = self._read_embeddings(embed2_file, self.lookup_g, self.look_back_g)\n\n\tdef mlp_weights(self):\n\t\t# Store layers weight & bias\n\t\tself.weights = dict()\n\t\tself.biases = dict()\n\t\tself.weights['out'] = tf.Variable(tf.random_normal([self.n_input, self.n_input]))\n\t\tself.biases['b_out'] = tf.Variable(tf.zeros([self.n_input]))\n\n\tdef build_lin_code_graph(self, inputs):\n\n\t\t# Output fully connected layer with a neuron\n\t\tcode = tf.matmul(tf.reshape(inputs,[-1,self.n_input]), self.weights['out']) + self.biases['b_out']\n\n\t\treturn code\n\n\tdef build_train_graph(self):\n\n\t\tself.cur_batch_size = tf.placeholder('float32', name='batch_size')\n\n\t\tself.pos_f_inputs = tf.placeholder('float32', [None, self.n_input])\n\t\tself.pos_g_inputs = tf.placeholder('float32', [None, self.n_input])\n\n\t\tself.PF = self.build_lin_code_graph(self.pos_f_inputs) # batch_size*n_input\n\n\t\t# train loss\n\t\tself.loss = tf.reduce_mean(.5*tf.square(self.PF-self.pos_g_inputs))\n\n\t\toptimizer = tf.train.AdamOptimizer(self.learning_rate)\n\t\tself.train_op = optimizer.minimize(self.loss)\n\n\tdef build_valid_graph(self):\n\n\t\t# validation\n\t\tself.valid_f_inputs = tf.placeholder('float32', [None, self.valid_sample_size, self.n_input])\n\t\tself.valid_g_inputs = tf.placeholder('float32', [None, self.valid_sample_size, self.n_input])\n\n\t\tvalid_f = tf.reshape(\n\t\t\t\tself.build_lin_code_graph(self.valid_f_inputs)\n\t\t\t\t, [-1, self.valid_sample_size, self.n_input]\n\t\t\t\t) # batch_size*neg_ratio*n_input\n\t\tself.dot_dist = tf.reduce_sum(tf.pow(valid_f-self.valid_g_inputs,2.),axis=2)\n\t\t# self.hamming_dist = tf.reduce_sum(\n\t\t# \t\t\t\t\t\ttf.clip_by_value(tf.sign(tf.multiply(tf.sign(valid_f),tf.sign(valid_g))),.0,1.)\n\t\t# \t\t\t\t\t\t\t, axis=2\n\t\t# \t\t\t\t\t\t)\n\n\tdef train_one_epoch(self):\n\t\tsum_loss = 0.0\n\n\t\t# train process\n\t\t# with tf.device(self.device):\n\t\tbatches = batch_iter(self.L, self.batch_size, 0\\\n\t\t\t\t\t\t\t\t\t\t, self.lookup_f, self.lookup_g, 'f', 'g')\n\t\tbatch_id = 0\n\t\tfor batch in batches:\n\t\t\tpos_f,pos_g,neg_f,neg_g = batch\n\t\t\tif not len(pos_f)==len(pos_g):\n\t\t\t\tself.logger.info('The input label file goes wrong as the file format.')\n\t\t\t\tcontinue\n\t\t\tbatch_size = len(pos_f)\n\t\t\tfeed_dict = {\n\t\t\t\tself.pos_f_inputs:self.X[pos_f,:],\n\t\t\t\tself.pos_g_inputs:self.Y[pos_g,:],\n\t\t\t\tself.cur_batch_size:batch_size\n\t\t\t}\n\t\t\t_, cur_loss = self.sess.run([self.train_op, self.loss],feed_dict)\n\n\t\t\tsum_loss += cur_loss\n\t\t\t# self.logger.info('Finish processing batch {} and cur_loss={}'\n\t\t # .format(batch_id, cur_loss))\n\t\t\tbatch_id += 1\n\t\t# valid process\n\t\tvalid_f, valid_g = valid_iter(self.L, self.valid_sample_size, self.lookup_f, self.lookup_g, 'f', 'g')\n\t\t# print valid_f,valid_g\n\t\tif not len(valid_f)==len(valid_g):\n\t\t\tself.logger.info('The input label file goes wrong as the file format.')\n\t\t\treturn\n\t\tvalid_size = len(valid_f)\n\t\tfeed_dict = {\n\t\t\tself.valid_f_inputs:self.X[valid_f,:],\n\t\t\tself.valid_g_inputs:self.Y[valid_g,:]\n\t\t}\n\t\tvalid_dist = self.sess.run(self.dot_dist,feed_dict)\n\t\t# valid_dist = self.sess.run(self.hamming_dist,feed_dict)\n\t\tmrr = .0\n\t\tfor i in range(valid_size):\n\t\t\tfst_dist = valid_dist[i][0]\n\t\t\tpos = 1\n\t\t\tfor k in range(1,len(valid_dist[i])):\n\t\t\t\tif fst_dist>=valid_dist[i][k]:\n\t\t\t\t\tpos+=1\n\t\t\t# print pos\n\t\t\t# self.logger.info('dist:{},pos:{}'.format(fst_dist,pos))\n\t\t\t# print valid_dist[i]\n\t\t\tmrr += 1./pos\n\t\tself.logger.info('Epoch={}, sum of loss={!s}, mrr={}'\n\t\t\t\t\t\t\t.format(self.cur_epoch, sum_loss/batch_id, mrr/valid_size))\n\t\t# print 'mrr:',mrr/valid_size\n\t\t# self.logger.info('Epoch={}, sum of loss={!s}, valid_loss={}'\n\t\t# .format(self.cur_epoch, sum_loss/batch_id, valid_loss))\n\t\tself.cur_epoch += 1\n\n\tdef _write_in_file(self, filename, vec, tag):\n\t\twith open(filename, 'aw') as res_handler:\n\t\t\tif len(vec.shape)>1:\n\t\t\t\tcolumn_size = vec.shape[1]\n\t\t\telse:\n\t\t\t\tcolumn_size = 1\n\t\t\treshape_vec = vec.reshape(-1)\n\t\t\tvec_size = len(reshape_vec)\n\t\t\tres_handler.write(tag+'\\n')\n\t\t\tfor i in range(0,vec_size,column_size):\n\t\t\t\tres_handler.write('{}\\n'.format(' '.join([str(reshape_vec[i+k]) for k in range(column_size)])))\n\n\tdef save_models(self, filename):\n\t\tif os.path.exists(filename):\n\t\t\tos.remove(filename)\n\t\tfor k,v in self.weights.iteritems():\n\t\t\tself._write_in_file(filename, v.eval(self.sess), k)\n\t\tfor k,v in self.biases.iteritems():\n\t\t\tself._write_in_file(filename, v.eval(self.sess), k)\n","sub_path":"src/lib_dcnh/pale_lin.py","file_name":"pale_lin.py","file_ext":"py","file_size_in_byte":6817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"425728287","text":"import cv2\nimport numpy as np\n\nimg=cv2.imread(r'C:\\Users\\99577\\Desktop\\meng.jpg')\n#img=cv2.resize(img,(176,123))\n#cv2.imwrite(r'C:\\Users\\99577\\Desktop\\meng.jpg',img)\ncv2.imshow('img',img)\n\ndef conv(kern,name):\n img2=cv2.filter2D(img,-1,kern)\n cv2.imshow(name,img2)\n cv2.imwrite(r'C:\\Users\\99577\\Desktop\\{}.jpg'.format(name),img2)\n \n\n\nkern=np.ones((3,3))/9\nconv(kern,'mean')\n\nkern=np.array([\n [1/16,2/16,1/16],\n [2/16,4/16,2/16],\n [1/16,2/16,1/16],\n ])\nconv(kern,'gaosi')\n\nkern=np.array([\n [-1,-1,-1],\n [-1,9,-1],\n [-1,-1,-1],\n ])\nconv(kern,'ruihua')\n\nkern=np.array([\n [-1,0,1],\n [-2,0,2],\n [-1,0,1],\n ])\nconv(kern,'shuiping')\n\nkern=np.array([\n [1,2,1],\n [0,0,0],\n [-1,-2,-1]\n ])\nconv(kern,'chuizhi')\n\nkern=np.array([\n [-1,-1,-1],\n [-1,8,-1],\n [-1,-1,-1]\n ])\nconv(kern,'bianyuan')\n\nimg2=cv2.filter2D(img,-1,kern)\ncv2.imshow('img2',img2)\n","sub_path":"python/spyder/ml/icon_recognition/tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"97214302","text":"import boto3\nimport aws_cdk.aws_dynamodb as dynamodb\nimport aws_cdk.aws_ec2 as ec2\nimport aws_cdk.aws_ecr as ecr\nimport aws_cdk.aws_ecs as ecs\nimport aws_cdk.aws_elasticloadbalancingv2 as elb\nimport aws_cdk.aws_iam as iam\nimport aws_cdk.aws_logs as logs\nimport aws_cdk.aws_route53 as route53\nimport aws_cdk.aws_route53_targets as route53_targets\nfrom aws_cdk import core as cdk\n\n\nclass MasterDeployStack(cdk.Stack):\n def __init__(self, scope: cdk.Construct, id: str, **kwargs) -> None:\n super().__init__(scope, id, **kwargs)\n\n self.master_port = 27900\n self.master_healthcheck_port = 8080\n\n self.vpc, self.cluster = self.gather_shared_resources()\n\n self.table = dynamodb.Table.from_table_name(self, \"server\", \"server\")\n\n self.task = self.create_master_task()\n self.container = self.create_task_container()\n self.nlb = self.create_network_load_balancer()\n self.create_service_and_nlb()\n self.create_route53_record()\n\n def create_table(self):\n return dynamodb.Table(\n self,\n \"server-table\",\n partition_key=dynamodb.Attribute(\n name=\"address\", type=dynamodb.AttributeType.STRING\n ),\n sort_key=dynamodb.Attribute(\n name=\"game\", type=dynamodb.AttributeType.STRING\n ),\n billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,\n )\n\n def create_master_task(self):\n \"\"\"\n Create master task\n \"\"\"\n task = ecs.FargateTaskDefinition(self, \"task\", memory_limit_mib=512, cpu=256)\n\n task.add_to_task_role_policy(self.create_dynamodb_access_policy())\n task.add_to_task_role_policy(self.create_xray_access_policy())\n\n return task\n\n def create_dynamodb_access_policy(self):\n return iam.PolicyStatement(\n resources=[self.table.table_arn],\n actions=[\n \"dynamodb:BatchGetItem\",\n \"dynamodb:GetRecords\",\n \"dynamodb:GetShardIterator\",\n \"dynamodb:Query\",\n \"dynamodb:GetItem\",\n \"dynamodb:Scan\",\n \"dynamodb:BatchWriteItem\",\n \"dynamodb:PutItem\",\n \"dynamodb:UpdateItem\",\n \"dynamodb:DeleteItem\",\n \"dynamodb:DescribeTable\",\n ],\n )\n\n def create_xray_access_policy(self):\n return iam.PolicyStatement(\n resources=[\"*\"],\n actions=[\n \"xray:GetGroup\",\n \"xray:GetGroups\",\n \"xray:GetSampling*\",\n \"xray:GetTime*\",\n \"xray:GetService*\",\n \"xray:PutTelemetryRecords\",\n \"xray:PutTraceSegments\",\n ],\n )\n\n def define_container_image(self):\n master_ecr = ecr.Repository.from_repository_name(\n self, \"ECR\", \"quakeservices_master\"\n )\n\n return ecs.ContainerImage.from_ecr_repository(master_ecr, tag=\"latest\")\n\n def create_task_container(self):\n \"\"\"\n Create container\n \"\"\"\n ecs_healthcheck = ecs.HealthCheck(\n command=[\"CMD\", \"curl\", \"-f\", \"http://localhost:8080\"]\n )\n\n log_settings = ecs.LogDrivers.aws_logs(\n stream_prefix=\"master\",\n log_retention=logs.RetentionDays.TWO_WEEKS,\n )\n\n container = self.task.add_container(\n \"master\",\n health_check=ecs_healthcheck,\n start_timeout=cdk.Duration.seconds(15),\n stop_timeout=cdk.Duration.seconds(15),\n image=self.define_container_image(),\n logging=log_settings,\n memory_reservation_mib=256,\n )\n\n container.add_port_mappings(\n ecs.PortMapping(container_port=self.master_port, protocol=ecs.Protocol.UDP)\n )\n container.add_port_mappings(\n ecs.PortMapping(\n container_port=self.master_healthcheck_port, protocol=ecs.Protocol.TCP\n )\n )\n\n return container\n\n def create_service(self):\n \"\"\"\n Create service\n \"\"\"\n return ecs.FargateService(\n self, \"service\", cluster=self.cluster, task_definition=self.task\n )\n\n def create_network_load_balancer(self):\n \"\"\"\n Create Network Load Balancer\n \"\"\"\n return elb.NetworkLoadBalancer(\n self,\n \"nlb\",\n vpc=self.vpc,\n internet_facing=True,\n cross_zone_enabled=True,\n load_balancer_name=\"master\",\n )\n\n def create_listener(self):\n return self.nlb.add_listener(\n \"UDPListener\", port=self.master_port, protocol=elb.Protocol.UDP\n )\n\n def create_service_and_nlb(self):\n service = self.create_service()\n listener = self.create_listener()\n\n nlb_healthcheck = elb.HealthCheck(\n port=str(self.master_healthcheck_port), protocol=elb.Protocol.HTTP\n )\n\n listener.add_targets(\n \"ECS\",\n port=self.master_port,\n targets=[\n service.load_balancer_target(\n container_name=\"master\",\n container_port=self.master_port,\n protocol=ecs.Protocol.UDP,\n )\n ],\n proxy_protocol_v2=True,\n health_check=nlb_healthcheck,\n )\n\n # self.add_udp_overrides(listener, target_group)\n\n def add_udp_overrides(self, listener, target_group):\n \"\"\"\n At the time of writing Protocol would be set to TCP without these overrides\n \"\"\"\n # Required overrides as Protocol never gets set correctly\n cfn_listener = listener.node.default_child\n cfn_listener.add_override(\"Properties.Protocol\", \"UDP\")\n\n # Required overrides as Protocol never gets set correctly\n cfn_target_group = target_group.node.default_child\n cfn_target_group.add_override(\"Properties.Protocol\", \"UDP\")\n\n def create_route53_record(self):\n \"\"\"\n Create Route53 entries\n \"\"\"\n zone = route53.HostedZone.from_lookup(\n self, \"quake_services\", domain_name=\"quake.services\"\n )\n\n target = route53.AddressRecordTarget.from_alias(\n route53_targets.LoadBalancerTarget(self.nlb)\n )\n\n route53.ARecord(self, \"alias\", zone=zone, record_name=\"master\", target=target)\n\n def gather_shared_resources(self):\n client = boto3.client(\"ssm\", region_name=\"ap-southeast-2\")\n r = client.get_parameter(Name=\"/common/shared_vpc_id\")\n if \"Parameter\" in r.keys():\n vpc_id = r[\"Parameter\"][\"Value\"]\n\n vpc = ec2.Vpc.from_lookup(self, \"SharedVPC\", vpc_id=vpc_id)\n\n cluster = ecs.Cluster.from_cluster_attributes(\n self, \"ECS\", cluster_name=\"SharedECSCluster\", vpc=vpc, security_groups=[]\n )\n\n return vpc, cluster\n","sub_path":"master_deploy/master_deploy_stack.py","file_name":"master_deploy_stack.py","file_ext":"py","file_size_in_byte":6936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"261938259","text":"from ..pyimagesearch.SlidingWindow import sliding_window, pyramid\nimport argparse\nimport time\nimport cv2\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required=True, help=\"Path to the image\")\nargs = vars(ap.parse_args())\n\nimage = cv2.imread(args[\"image\"])\n(winW, winH) = (128, 128)\nfor resized in pyramid(image, scale=1.5):\n for (x, y, window) in sliding_window(resized, stepSize=32, windowSize=(winW, winH)):\n if window.shape[0] != winH or window.shape[1] != winW:\n continue\n clone = resized.copy()\n cv2.rectangle(clone, (x, y), (x + winW, y + winH), (0, 255, 0), 2)\n cv2.imshow(\"Window\", clone)\n cv2.waitKey(1)\n time.sleep(0.025)","sub_path":"DataPreprocessing/pyimagesearch/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"543744451","text":"import config\nfrom redis import Redis\nfrom redis import ConnectionPool, exceptions\n\nEXPIRES_TIME = None # Redis过期时间,不设置则默认不过期\n\nclass MyRedis:\n\n IS_RUN_REDIS = 0 # 0 1 2 0:未检测 1:已运行 2:未运行\n\n @staticmethod\n def redis_connection_pool():\n return ConnectionPool(**config.REDIS_DB_URL)\n\n def redis_connect(self):\n r = Redis(connection_pool=self.redis_connection_pool())\n if self.IS_RUN_REDIS == 2:\n print('redis未连接!')\n return None\n if self.IS_RUN_REDIS == 0:\n try:\n key, value = 'testRedisConnect', '测试Redis链接'\n r.set(key, value)\n r.delete(key)\n except exceptions.ConnectionError:\n self.IS_RUN_REDIS = 2\n print('测试 redis 链接失败!')\n return None\n self.IS_RUN_REDIS = 1\n return r\n\n def get_redis_data(self, key):\n conn = self.redis_connect()\n if conn:\n data = conn.get(key)\n if not data:\n return None\n return str(data, 'utf-8')\n return None\n\n def set_redis_data(self, key, value):\n conn = self.redis_connect()\n data = value\n if conn:\n conn.set(\n name=key,\n value=data,\n ex=EXPIRES_TIME # 第三个参数表示Redis过期时间,不设置则默认不过期\n )\n\n def incr_redis(self, key='access_sum'):\n conn = self.redis_connect()\n if conn:\n conn.incr(key)\n\n def get_hash_data(self, name, key=None):\n conn = self.redis_connect()\n if conn:\n d = conn.hget(name, key)\n if not d:\n return None\n return str(d, 'utf-8')\n\n def set_hash_data(self, obj):\n conn = self.redis_connect()\n if conn:\n conn.hset()\n\n\nmy_redis = MyRedis()","sub_path":"app/utils/myRedis.py","file_name":"myRedis.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"174585579","text":"\"\"\"\n给定一个整数数组,你需要寻找一个连续的子数组,如果对这个子数组进行升序排序,那么整个数组都会变为升序排序。\n\n你找到的子数组应是最短的,请输出它的长度。\n\n示例 1:\n\n输入: [2, 6, 4, 8, 10, 9, 15]\n输出: 5\n解释: 你只需要对 [6, 4, 8, 10, 9] 进行升序排序,那么整个表都会变为升序排序。\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n 当前数组与排序后的数组具有不同数值的索引的最大值与最小值得差值,再加上一即为答案\n \"\"\"\n def findUnsortedSubarray(self, nums):\n sort_nums = sorted(nums)\n res = []\n for i, (x, y) in enumerate(zip(sort_nums, nums)):\n if x != y:\n res.append(i)\n # 这里要加上len(res)的原因是要排除nums本身是有序数组的情况\n return len(res) and max(res) - min(res) + 1\n","sub_path":"数组/581-最短无序连续子数组.py","file_name":"581-最短无序连续子数组.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"497426597","text":"from GeoPoint import GeoPoint\nfrom ConditionsFetcher import MetaweatherFetcher\nfrom Chart import Chart\nfrom Converter import Converter\nfrom typing import Tuple, List\n\nimport logging\nimport simplekml\n\nRAINCLOUD_ALTITUDE = 800\nHELLMANN_EXPONENT = 0.34\n\nclass ConditionPredictor:\n\n def line_points(self, startPoint: \"GeoPoint\", windDir: float, deltaD: float, N: int) -> List[\"GeoPoint\"]:\n\n point_set = [startPoint]\n \n for i in range(N):\n point_set.append(point_set[-1].distance_bearing_away(distance = deltaD, bearing = windDir))\n\n return point_set[1:]\n\n \n def line_point_arcs(self, startPoint: \"GeoPoint\", windConds: Tuple[float, float], deltaTsec = 10*60, N = 6, dirPlusMinus = 12.0, Nsamples = 4):\n\n aprx_cloud_speed = windConds[0] * (RAINCLOUD_ALTITUDE/10) ** HELLMANN_EXPONENT\n delta_dist = deltaTsec * aprx_cloud_speed\n\n ang_spacing = 2 * dirPlusMinus / Nsamples\n min_ang = (windConds[1] - dirPlusMinus) % 360\n\n point_set = [[startPoint]]\n\n for j in range(Nsamples):\n angle = (min_ang + j*ang_spacing) % 360\n point_set.append(self.line_points(startPoint=startPoint, windDir=angle, deltaD=delta_dist, N=N))\n\n return point_set\n\n def point_set_to_kml(self, point_set):\n\n kml = simplekml.Kml()\n\n for i in point_set:\n if type(i) == list:\n for gp in i:\n kml.newpoint(name=\"[%0.4f,%0.4f]\" % (gp.lat, gp.lon), coords=[(gp.lon, gp.lat)])\n else:\n kml.newpoint(name=\"[%0.4f,%0.4f]\" % (i.lat, i.lon), coords=[(i.lon, i.lat)])\n \n kml.save(\"point_set.kml\")\n \n def circle_points(self, centerPoint: \"GeoPoint\", radiusM: float, Npoints: int) -> List[\"GeoPoint\"]:\n\n point_set = []\n ang_spacing = 360 / Npoints\n\n for n in range(Npoints):\n point_set.append(centerPoint.distance_bearing_away(distance = radiusM, bearing = n * ang_spacing))\n \n return point_set\n\n def check_point_value(self, point: \"GeoPoint\", chartObj: \"Chart\", convObj: \"Converter\") -> float:\n\n precise_x = convObj.lonToX_precise(point.lon)\n precise_y = convObj.latToY_precise(point.lat)\n\n return round(abs(chartObj.f(precise_x, precise_y)[0][0]), 15)\n\n def check_values_around_circle(self, centerPoint: \"GeoPoint\", radiusM: float, Npoints: int, chartObj: \"Chart\", convObj: \"Converter\"):\n\n point_set = self.circle_points(centerPoint = centerPoint, radiusM = radiusM, Npoints = Npoints)\n\n for point in point_set:\n print(point, \"%0.1f m, heading %0.1f value =\" % (centerPoint.distance(point), centerPoint.initial_heading(point)), self.check_point_value(point, chartObj = chartObj, convObj = convObj))\n","sub_path":"ConditionPredictor.py","file_name":"ConditionPredictor.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"270538492","text":"# _*_ coding: utf-8 _*_\n\nimport json\n\n\nf_props = open('../properties.json')\n_properties = json.load(f_props)\nf_props.close()\n\nf_prov = open('../provinces.json')\n_provinces = json.load(f_prov)\nf_prov.close()\n","sub_path":"spotippos/spotippos/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"281555003","text":"\"\"\"CP1404/CP5632 Practical - Client code to use the Car class.\"\"\"\n# Note that the import has a folder (module) in it.\n\nfrom car import Car\n\n\ndef main():\n \"\"\"Demo test code to show how to use car class.\"\"\"\n my_car = Car(180)\n my_car.drive(30)\n my_car.get_name_of_vehicle()\n my_car.print_results()\n\n my_limo = Car(120)\n my_limo.drive(115)\n my_limo.get_name_of_vehicle()\n my_limo.print_results()\n\n\nmain()\n","sub_path":"Pracitcal 6/used_cars.py","file_name":"used_cars.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"177668763","text":"from PIL import Image\n\n### Opening and loading the image from the desired directory\nimage = Image.open(\"Sources\\Map\\Map2.png\")\nmap = image.load()\n\n### Storing the image height and width for further use\nmap_width = image.size[0]\nmap_height = image.size[1]\n\nnum_of_values = 1\n\nclass Map(object):\n\n def __init__(self):\n values_txt = open('Sources\\Map\\Map_Values.txt', 'r')\n\n for count, string in enumerate(values_txt):\n pass\n\n num_of_values = count + 1\n values_txt = open('Sources\\Map\\Map_Values.txt', 'r')\n\n # Plain\n self.terrains = []\n self.signs = []\n self.reds = []\n self.greens = []\n self.blues = []\n self.alpha = 255\n\n # read the values from file and store them in lists\n for x in range(0, 9): ##### When adding new you have to increase the range of the for loop in map.assign_values() #####\n self.terrains.append(values_txt.readline())\n self.signs.append(values_txt.readline())\n self.reds.append(values_txt.readline())\n self.greens.append(values_txt.readline())\n self.blues.append(values_txt.readline())\n\n values_txt.close()\n\n # Normalize the lists for use\n self.terrains = [x.strip() for x in self.terrains]\n self.signs = [x.strip() for x in self.signs]\n self.reds = [x.strip() for x in self.reds]\n self.reds = [int(x) for x in self.reds]\n self.greens = [x.strip() for x in self.greens]\n self.greens = [int(x) for x in self.greens]\n self.blues = [x.strip() for x in self.blues]\n self.blues = [int(x) for x in self.blues]\n\n @staticmethod\n def convert_y(y):\n return map_height - y - 1\n\n # Draw the representation of the map into the console\n def display(self, x, y):\n\n y = self.convert_y(y)\n\n # Iterating through the map image\n for row in range(0, map_height): # Rows\n\n for column in range(0, map_width): # Columns\n\n if row == y and column == x:\n print('5', end=' ')\n elif map[column, row] == (self.reds[0], self.greens[0], self.blues[0], self.alpha):\n print(self.signs[0], end=' ')\n elif map[column, row] == (self.reds[1], self.greens[1], self.blues[1], self.alpha):\n print(self.signs[1], end=' ')\n elif map[column, row] == (self.reds[2], self.greens[2], self.blues[2], self.alpha):\n print(self.signs[2], end=' ')\n elif map[column, row] == (self.reds[3], self.greens[3], self.blues[3], self.alpha):\n print(self.signs[3], end=' ')\n elif map[column, row] == (self.reds[4], self.greens[4], self.blues[4], self.alpha):\n print(self.signs[4], end=' ')\n elif map[column, row] == (self.reds[5], self.greens[5], self.blues[5], self.alpha):\n print(self.signs[5], end=' ')\n elif map[column, row] == (self.reds[6], self.greens[6], self.blues[6], self.alpha):\n print(self.signs[6], end=' ')\n elif map[column, row] == (self.reds[7], self.greens[7], self.blues[7], self.alpha):\n print(self.signs[7], end=' ')\n elif map[column, row] == (self.reds[8], self.greens[8], self.blues[8], self.alpha):\n print(self.signs[8], end=' ')\n else:\n print('#', end=' ')\n\n else:\n print('')\n\n\n def particular_objects(self):\n pass\n\n # Print the legend for the map into the console\n def print_legend(self):\n print('Legend:')\n print(' 5 -- You')\n print(' ' + self.signs[0] + ' -- ' + self.terrains[0])\n print(' ' + self.signs[1] + ' -- ' + self.terrains[1])\n print(' ' + self.signs[2] + ' -- ' + self.terrains[2])\n print(' ' + self.signs[3] + ' -- ' + self.terrains[3])\n print(' ' + self.signs[4] + ' -- ' + self.terrains[4])\n print(' ' + self.signs[5] + ' -- ' + self.terrains[5])\n print(' ' + self.signs[6] + ' -- ' + self.terrains[6])\n print(' ' + self.signs[7] + ' -- ' + self.terrains[7])\n print(' ' + self.signs[8] + ' -- ' + self.terrains[8])","sub_path":"Map.py","file_name":"Map.py","file_ext":"py","file_size_in_byte":4313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"420086754","text":"\"\"\"API views for account's balance.\"\"\"\nfrom rest_framework.viewsets import ViewSet\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.decorators import detail_route\nfrom rest_framework.response import Response\n\nfrom payment_cards.models import Account\n\n\nclass BalanceViewSet(ViewSet):\n \"\"\"Balances endpoint.\"\"\"\n\n @detail_route(methods=['GET'])\n def ledger(self, request, pk=None):\n \"\"\"\n A method to retrieve an ledger balance for an account.\n\n :param request: HTTP request object\n :type request: rest_framework.request.Request\n :param pk: Account PK (id)\n :type pk: int\n :return: {\n \"id\": int,\n \"ledger_balance\": Decimal,\n \"currency\": str,\n }\n :rtype: rest_framework.response.Response\n \"\"\"\n data = get_object_or_404(\n Account.objects.values('id', 'ledger_balance', 'currency'),\n pk=pk\n )\n return Response(data)\n\n @detail_route(methods=['GET'])\n def available(self, request, pk=None):\n \"\"\"\n A method to retrieve an available balance for an account.\n\n :param request: HTTP request object\n :type request: rest_framework.request.Request\n :param pk: Account PK (id)\n :type pk: int\n :return: {\n \"id\": int,\n \"available_balance\": Decimal,\n \"currency\": str,\n }\n :rtype: rest_framework.response.Response\n \"\"\"\n data = get_object_or_404(\n Account.objects.values('id', 'available_balance', 'currency'),\n pk=pk\n )\n return Response(data)\n","sub_path":"payment_cards/views/balance.py","file_name":"balance.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"302359036","text":"import os\nimport utils\nimport logging\nimport kaptan\nfrom yaml.parser import ParserError, ScannerError\nfrom schema import Schema, Use, And, SchemaError\n\n\ndef load_conf():\n \"\"\"\n Reads environment looking for token and returns config.\n \"\"\"\n token = os.environ.get('PACKAGECLOUD_TOKEN')\n if token is None:\n utils.abort('PACKAGECLOUD_TOKEN environment variable is not set. '\n 'Set this token to use packagecloud-curator.')\n http_scheme = 'https'\n api_domain = 'packagecloud.io'\n api_version = 'v1'\n return {\n 'url_base': '{}://{}:@{}/api/{}'.format(\n http_scheme, token, api_domain, api_version),\n 'token': token\n }\n\n\ndef validate_spec(spec):\n \"\"\"\n Lightly validates a delete spec\n \"\"\"\n schema = Schema({\n 'ext': And(str, len, Use(str.lower),\n lambda s: s in ('deb', 'rpm', 'gem'),\n error=\"ext must be one of deb, rpm or gem\"),\n 'distro': And(str, len, Use(str.lower),\n error=\"missing or invalid distro\"),\n 'older_than': And(int, lambda i: i >= 0,\n error=\"older_than must be an integer >= 0\")\n })\n for pkg_def in spec.itervalues():\n try:\n schema.validate(pkg_def)\n except SchemaError as ex:\n utils.abort(\"Invalid spec file: {}. \"\n \"See README for spec file details.\".format(ex.message))\n return\n\n\ndef load_delete_spec(spec_path):\n \"\"\"\n Load delete spec from yaml file.\n \"\"\"\n delete_spec = kaptan.Kaptan(handler='yaml')\n\n if not os.path.isfile(spec_path):\n utils.abort(\"Spec file not found: {}\".format(spec_path))\n\n logging.debug('Loading spec file: {}'.format(spec_path))\n try:\n delete_spec.import_config(spec_path)\n if not isinstance(delete_spec.configuration_data, dict):\n raise ValueError\n except (ValueError, ParserError, ScannerError):\n utils.abort(\"{} is not a valid yaml file.\".format(spec_path))\n\n validate_spec(delete_spec.configuration_data)\n return delete_spec.configuration_data\n","sub_path":"packagecloudcurator/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"334883722","text":"import json\nimport data\n\ndef convert_from_py_to_json(py_file):\n with open('data.json', 'w', encoding='utf-8') as fp:\n json.dump(py_file, fp, ensure_ascii=False)\n\nif __name__ == \"__main__\":\n convert_from_py_to_json(data.teachers)\n print('JSON file saved, {} elements.'.format(len(data.teachers)))","sub_path":"py_to_json.py","file_name":"py_to_json.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"272363582","text":"\n# -*- coding: utf-8 -*-\n\nu'''Functions L{parseUTMUPS5}, L{toUtmUps8}, L{UtmUps} and\nL{utmupsZoneBand5} to handle both I{Universal Transverse Mercator\n(U{UTM})}\nand I{Universal Polar Stereographic\n(U{UPS})}\ncoordinates.\n\nA pure Python implementation, partially transcribed from C++ class U{UTMUPS\n}\nby I{Charles Karney}.\n'''\n\nfrom datum import Datums\nfrom dms import RangeError\nfrom ellipsoidalBase import _to4lldn, _to3zBhp, \\\n _UPS_ZONE, _UPS_ZONE_STR, \\\n _UTMUPS_ZONE_MIN, _UTMUPS_ZONE_MAX\nfrom lazily import _ALL_LAZY\nfrom utily import OK\nfrom ups import parseUPS5, toUps8, Ups, UPSError, upsZoneBand5\nfrom utm import parseUTM5, toUtm8, Utm, UTMError, utmZoneBand5\n\n# all public contants, classes and functions\n__all__ = _ALL_LAZY.utmups\n__version__ = '19.04.26'\n\n_MGRS_TILE = 100e3 # block size (C{meter})\n\n_UPS_N_MAX = 27 * _MGRS_TILE\n_UPS_N_MIN = 13 * _MGRS_TILE\n_UPS_S_MAX = 32 * _MGRS_TILE\n_UPS_S_MIN = 8 * _MGRS_TILE\n\n_UTM_C_MAX = 9 * _MGRS_TILE\n_UTM_C_MIN = 1 * _MGRS_TILE\n_UTM_N_MAX = 95 * _MGRS_TILE\n_UTM_N_MIN = 0 * _MGRS_TILE\n_UTM_S_MAX = 100 * _MGRS_TILE\n_UTM_S_MIN = 10 * _MGRS_TILE\n\n_UTM_N_SHIFT = _UTM_S_MAX - _UTM_N_MIN # South minus North UTM northing\n\n\nclass _UpsMinMax(object):\n # UPS ranges for South, North pole\n eMax = _UPS_S_MAX, _UPS_N_MAX\n eMin = _UPS_S_MIN, _UPS_N_MIN\n nMax = _UPS_S_MAX, _UPS_N_MAX\n nMin = _UPS_S_MIN, _UPS_N_MIN\n\n\nclass _UtmMinMax(object):\n # UTM ranges for South-, Northern hemisphere\n eMax = _UTM_C_MAX, _UTM_C_MAX\n eMin = _UTM_C_MIN, _UTM_C_MIN\n nMax = (_UTM_N_MAX + _UTM_N_SHIFT), _UTM_N_MAX\n nMin = _UTM_S_MIN, (_UTM_S_MIN - _UTM_N_SHIFT)\n\n\nclass UTMUPSError(ValueError):\n '''UTM/UPS parse, validate or other error.\n '''\n pass\n\n\ndef parseUTMUPS5(strUTMUPS, datum=Datums.WGS84, Utm=Utm, Ups=Ups, name=''):\n '''Parse a string representing a UTM or UPS coordinate, consisting\n of I{\"zone[band] hemisphere/pole easting northing\"}.\n\n @param strUTMUPS: A UTM or UPS coordinate (C{str}).\n @keyword datum: Optional datum to use (L{Datum}).\n @keyword Utm: Optional (sub-)class to return the UTM coordinate\n (L{Utm}) or C{None}.\n @keyword Ups: Optional (sub-)class to return the UPS coordinate\n (L{Ups}) or C{None}.\n @keyword name: Optional name (C{str}).\n\n @return: The UTM or UPS coordinate (L{Utm} or L{Ups}) or 5-tuple\n (C{zone, hemisphere/pole, easting, northing, Band}) if\n I{Utm} respectively I{Ups} or both are C{None} as (C{int,\n 'N'|'S', meter, meter, str}) where C{zone} is C{1..60}\n for UTM or C{0} for UPS and C{Band} is C{\"\"} or\n C{'C'|'D'..'W'|'X'} for UTM or C{'A'|'B'|'Y'|'Z'} for\n UPS.\n\n @raise UTMUPSError: Invalid I{strUTMUPS}.\n\n @see: Functions L{parseUTM5} and L{parseUPS5}.\n '''\n try:\n try:\n u = parseUTM5(strUTMUPS, datum=datum, Utm=Utm, name=name)\n except UTMError:\n u = parseUPS5(strUTMUPS, datum=datum, Ups=Ups, name=name)\n except (UTMError, UPSError):\n raise UTMUPSError('%s invalid: %r' % ('strUTMUPS', strUTMUPS))\n return u\n\n\ndef toUtmUps8(latlon, lon=None, datum=None, Utm=Utm, Ups=Ups, pole='', name=''):\n '''Convert a lat-/longitude point to a UTM or UPS coordinate.\n\n @param latlon: Latitude (C{degrees}) or an (ellipsoidal)\n geodetic C{LatLon} point.\n @keyword lon: Optional longitude (C{degrees}) or C{None}.\n @keyword datum: Optional datum to use this UTM coordinate,\n overriding I{latlon}'s datum (C{Datum}).\n @keyword Utm: Optional (sub-)class to return the UTM coordinate\n (L{Utm}) or C{None}.\n @keyword Ups: Optional (sub-)class to return the UPS coordinate\n (L{Ups}) or C{None}.\n @keyword pole: Optional top/center of UPS (stereographic)\n projection (C{str}, C{'N[orth]'} or C{'S[outh]'}).\n @keyword name: Optional name (C{str}).\n\n @return: The UTM or UPS coordinate (L{Utm} respectively L{Ups})\n or an 8-tuple (C{zone, hemisphere/pole, easting, northing,\n Band, datum, convergence, scale}) if I{Utm} respectively\n I{Ups} is C{None} or I{cmoff} is C{False} as (C{int,\n 'N'|'S', meter, meter, str, degrees, scalar}) where C{zone}\n is C{1..60} for UTM or C{0} for UPS and C{Band} is C{\"\"}\n or C{'C'|'D'..'W'|'X'} for UTM or C{'A'|'B'|'Y'|'Z'} for\n UPS.\n\n @raise RangeError: If I{lat} outside the valid UTM or UPS bands\n or if I{lat} or I{lon} outside the valid range\n and I{rangerrrors} set to C{True}.\n\n @raise TypeError: If I{latlon} is not ellipsoidal or I{lon}\n value is missing.\n\n @raise UTMUPSError: UTM or UPS validation failed.\n\n @raise ValueError: Invalid I{lat} or I{lon}.\n\n @see: Functions L{toUtm8} and L{toUps8}.\n '''\n lat, lon, d, name = _to4lldn(latlon, lon, datum, name)\n z, B, p, lat, lon = utmupsZoneBand5(lat, lon)\n\n if z == _UPS_ZONE:\n u = toUps8(lat, lon, datum=d, Ups=Ups, pole=pole or p, falsed=True, name=name)\n else:\n u = toUtm8(lat, lon, datum=d, Utm=Utm, cmoff=True, name=name)\n return u\n\n\ndef UtmUps(zone, hemipole, easting, northing, band='', datum=Datums.WGS84,\n falsed=True, name=''):\n '''Class-like function to create a UTM/UPS coordinate.\n\n @keyword zone: The UTM (longitudinal) zone with/-out Band\n letter for UTM or for UPS zone C{\"00\"} or\n C{0} (C{str} or C{int}).\n @keyword hemipole: UTM hemisphere or UPS top/center of projection\n (C{str}, C{'N[orth]'} or C{'S[outh]'}).\n @param easting: Easting, see I{falsed} (C{meter}).\n @param northing: Northing, see I{falsed} (C{meter}).\n @keyword band: Optional, UTM (latitudinal) Band letter\n C{'C'|'D'..'W'|'X'} or UPS (polar) Band letter\n C{'A'|'B'|'Y'|'Z'} (C{str}).\n @keyword datum: Optional, the coordinate's datum (L{Datum}).\n @keyword falsed: Both I{easting} and I{northing} are falsed (C{bool}).\n @keyword name: Optional name (C{str}).\n\n @return: New UTM or UPS instance (L{Utm} or L{Ups}).\n\n @raise UTMUPSError: UTM or UPS validation failed.\n\n @see: Classes L{Utm} and L{Ups} and Karney's U{UTMUPS\n }.\n '''\n z, B, hp = _to3zBhp(zone, band=band, hemipole=hemipole)\n U = Ups if z in (_UPS_ZONE, _UPS_ZONE_STR) else Utm\n return U(z, hp, easting, northing, band=B, datum=datum, falsed=falsed, name=name)\n\n\ndef utmupsValidate(coord, falsed=False, MGRS=False):\n '''Check a UTM or UPS coordinate.\n\n @param coord: The UTM or UPS coordinate (L{Utm}, L{Ups} or C{5+Tuple}).\n @keyword falsed: C{5+Tuple} easting and northing are falsed (C{bool}).\n @keyword MGRS: Increase easting and northing ranges (C{bool}).\n\n @return: C{None} if validation passed.\n\n @raise UTMUPSError: Validation failed.\n\n @see: Function L{utmupsValidateOK}.\n '''\n\n def _en(en, lo, hi, ename):\n try:\n if lo <= float(en) <= hi:\n return\n except (TypeError, ValueError):\n pass\n t = '%s range [%.0f, %.0f]' % (U, lo, hi)\n raise UTMUPSError('%s outside %s: %g' % (ename, t, en))\n\n if isinstance(coord, (Ups, Utm)):\n zone = coord.zone\n hemi = coord.hemisphere\n e, n = coord.easting, coord.northing\n band = coord.band\n enMM = coord.falsed\n elif isinstance(coord, tuple) and len(coord) > 4:\n zone, hemi, e, n, band = coord[:5]\n enMM = falsed\n else:\n raise UTMUPSError('%s invalid: %r' % ('coord', coord))\n\n z, B, h = _to3zBhp(zone, band, hemipole=hemi)\n\n if z == _UPS_ZONE: # UPS\n import ups as u # PYCHOK expected\n U, M = 'UPS', _UpsMinMax\n else: # UTM\n import utm as u # PYCHOK expected\n U, M = 'UTM', _UtmMinMax\n\n if MGRS:\n U, s = 'MGRS', _MGRS_TILE\n else:\n s = 0\n\n U = '%s %s%s %s' % (U, z,B, h)\n\n i = 'SN'.find(h)\n if i < 0 or z < _UTMUPS_ZONE_MIN \\\n or z > _UTMUPS_ZONE_MAX \\\n or B not in u._Bands:\n raise UTMUPSError('%s %s, %s or %s invalid: %r' % (U,\n 'zone', 'hemisphere', 'band', (zone, hemi, band)))\n\n if enMM:\n _en(e, M.eMin[i] - s, M.eMax[i] + s, 'easting') # PYCHOK .eMax .eMin\n _en(n, M.nMin[i] - s, M.nMax[i] + s, 'northing') # PYCHOK .nMax .nMin\n\n\ndef utmupsValidateOK(coord, falsed=False, ok=OK):\n '''Check a UTM or UPS coordinate.\n\n @param coord: The UTM or UPS coordinate (L{Utm}, L{Ups} or C{5+Tuple}).\n @keyword falsed: C{5+Tuple} easting and northing are falsed (C{bool}).\n @keyword ok: Result to return if validation passed (I{OK}).\n\n @return: I{ok} if validation passed, the L{UTMUPSError} otherwise.\n\n @see: Function L{utmupsValidate}.\n '''\n try:\n utmupsValidate(coord, falsed=falsed)\n return ok\n except UTMUPSError as x:\n return x\n\n\ndef utmupsZoneBand5(lat, lon, cmoff=False):\n '''Return the UTM/UPS zone number, Band letter, hemisphere/pole\n and clipped lat- and longitude for a given location.\n\n @param lat: Latitude in degrees (C{scalar} or C{str}).\n @param lon: Longitude in degrees (C{scalar} or C{str}).\n @keyword cmoff: Offset longitude from the zone's central\n meridian, for UTM only (C{bool}).\n\n @return: 5-Tuple (C{zone, Band, hemisphere/pole, lat, lon}) as\n (C{int, str, 'N'|'S', degrees90, degrees180}) where\n C{zone} is C{1..60} for UTM or C{0} for UPS and\n C{Band} is C{\"\"} or C{'C'|'D'..'W'|'X'} for UTM or\n C{'A'|'B'|'Y'|'Z'} for UPS.\n\n @raise RangeError: If I{lat} outside the valid UTM or UPS bands\n or if I{lat} or I{lon} outside the valid range\n and I{rangerrrors} set to C{True}.\n\n @raise ValueError: Invalid I{lat} or I{lon}.\n\n @see: Functions L{utmZoneBand5} and L{upsZoneBand5}.\n '''\n try:\n return utmZoneBand5(lat, lon, cmoff=cmoff)\n except RangeError:\n return upsZoneBand5(lat, lon)\n\n# **) MIT License\n#\n# Copyright (C) 2016-2019 -- mrJean1 at Gmail dot com\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\n# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n# OTHER DEALINGS IN THE SOFTWARE.\n","sub_path":"pygeodesy/utmups.py","file_name":"utmups.py","file_ext":"py","file_size_in_byte":11980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"423041868","text":"# 输入两个单调递增的链表,输出两个链表合成后的链表,当然我们需要合成后的链表满足单调不减规则。\n\n\n# 新建一个链表按大小存储\nclass Solution:\n def Merge(self, pHead1, pHead2):\n if not pHead1: return pHead2\n if not pHead2: return pHead1\n\n head, Node1, Node2 = ListNode(0), pHead1, pHead2\n Node = head\n while Node1 and Node2:\n if Node1.val < Node2.val:\n Node.next = Node1\n Node1 = Node1.next\n Node = Node.next\n else:\n Node.next = Node2\n Node2 = Node2.next\n Node = Node.next\n if Node1:\n Node.next = Node1\n if Node2:\n Node.next = Node2\n return head.next\n\n\n# 递归\nclass Solution:\n def Merge(self, pHead1, pHead2):\n if not pHead1: return pHead2\n if not pHead2: return pHead1\n if pHead1.val < pHead2.val:\n merge_head = pHead1\n merge_head.next = self.Merge(pHead1.next, pHead2)\n else:\n merge_head = pHead2\n merge_head.next = self.Merge(pHead1, pHead2.next)\n return merge_head","sub_path":"JZ16. 合并两个排序的链表.py","file_name":"JZ16. 合并两个排序的链表.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"479849960","text":"\"\"\"Run the bot.\n\nChannel messages, join/part/quit messages and the like are saved to\nfiles under irclogs and printed to stdout. Debugging messages are\nprinted to stderr and saved in botlog.txt.\n\"\"\"\n\nimport atexit\nimport collections\nimport glob\nimport logging\nimport os\nimport time\n\nimport curio\nfrom curio import socket, subprocess\n\nimport bot\n\n\nlogger = logging.getLogger(__name__)\n\nLOG_LEN = 1000\nlogs = {} # {channel: deque, ...}\n\n\ndef _format_msg(msg):\n return f\"[%s] %s\\n\" % (time.strftime('%d %b %H:%M:%S'), msg)\n\n\ndef _log_filename(channel):\n return os.path.join('irclogs', channel + '.txt')\n\n\nasync def log_msg(channel, msg):\n try:\n log = logs[channel]\n except KeyError:\n log = collections.deque(maxlen=LOG_LEN)\n try:\n async with curio.aopen(_log_filename(channel), 'r') as f:\n async for line in f:\n log.append(line)\n except FileNotFoundError:\n # We are running for the first time and nothing is logged\n # yet.\n pass\n logs[channel] = log\n\n print(f\"({channel})\", msg)\n log.append(_format_msg(msg))\n\n\n@atexit.register\ndef save_logs():\n logger.info(\"saving logs\")\n try:\n os.mkdir('irclogs')\n except FileExistsError:\n pass\n\n for channel, lines in logs.items():\n lines.append(_format_msg(\"* Shutting down.\"))\n with open(_log_filename(channel), 'w') as f:\n f.writelines(lines)\n\n\nasync def termbin(iterable):\n \"\"\"Paste the content of iterable to termbin and return URL.\n\n The iterable can be asynchronous or synchronous.\n \"\"\"\n try:\n logger.info(\"sending %d lines to termbin\", len(iterable))\n except TypeError:\n # probably a file object or some other iterator\n logger.info(\"sending content of %r to termbin\", iterable)\n\n async with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n await sock.connect(('termbin.com', 9999))\n\n if hasattr(type(iterable), '__aiter__'):\n async for string in iterable:\n # replace is not the best possible way, but at least\n # better than failing to termbin anything\n await sock.sendall(string.encode('utf-8', errors='replace'))\n else:\n for string in iterable:\n await sock.sendall(string.encode('utf-8', errors='replace'))\n\n byteurl = await sock.recv(1024)\n return byteurl.decode('ascii').strip()\n\n\n@bot.command(\"!log\")\nasync def termbin_log(event, channel=None):\n \"\"\"Termbin the log of the channel.\"\"\"\n if channel is None:\n channel_given = False\n channel = event.target\n else:\n channel_given = True\n\n lines = logs.get(channel, [])\n if lines:\n await event.reply(await termbin(lines))\n else:\n # termbin says \"Use netcat.\" if we send it nothing\n msg = f\"Nothing is logged from {channel} yet!\"\n if not channel_given:\n msg += (\" You can use '!log CHANNEL' to get logs from a \"\n \"specific channel.\")\n await event.reply(msg)\n\n\n@bot.command(\"!src\")\nasync def link_source(event):\n \"\"\"Send a link to my code :D\"\"\"\n linkbytes = await subprocess.check_output([\n 'git', 'config', '--get', 'remote.origin.url'])\n link = linkbytes.decode('utf-8').strip()\n await event.reply(f\"I'm from {link}.\")\n\n\n@bot.command(\"!wtf\")\nasync def do_wtf(event, acronym):\n \"\"\"Translate an acronym to English.\"\"\"\n acronym = acronym.upper()\n async with curio.aopen('wtf-words.txt', 'r') as f:\n async for line in f:\n if line.upper().startswith(acronym + ' '):\n initialisim, definition = line.split(' ', 1)\n definition = definition.lstrip()\n await event.reply(f'{initialisim}: {definition}')\n return\n await event.reply(f\"I have no idea what {acronym} means :(\")\n\n\nbot.add_help_command(\"!help\")\n\n\n@bot.join\n@bot.part\n@bot.quit\nasync def info_handler(event):\n logmsg = \"* {} {}s\".format(\n event.sender['nick'], event.msg_type.lower())\n await log_msg(event.target, logmsg)\n\n\n@bot.kick\nasync def kick_handler(event):\n logmsg = \"{} {}s {} (reason: {})\".format(\n event.sender['nick'], event.msg_type.lower(),\n event.target, event.reason)\n await log_msg(event.channel, logmsg)\n\n\n@bot.privmsg\nasync def privmsg_handler(event):\n await log_msg(event.target, \"<%s> %s\" % (\n event.sender['nick'], event.message))\n\n\ndef greeting():\n lines = [\n \"**************************\",\n \"** Welcome to curiomuz! **\",\n \"**************************\",\n \"\\n\",\n \" __ \",\n \" _ / / \",\n \" )/ / \",\n \" / /_ \",\n \" | | \\ \",\n \" |_/ \",\n \"\\n\\n\\n\",\n ]\n for line in lines:\n print(line.center(70).rstrip())\n\n\nasync def main():\n greeting()\n\n logging.basicConfig(\n filename='botlog.txt', datefmt='%d %b %H:%M:%S', level=logging.DEBUG,\n format=\"[%(asctime)s] %(name)s %(levelname)s: %(message)s\")\n # unfortunately it's not possible to log to file and stderr with\n # just basicConfig :(\n logging.getLogger().addHandler(logging.StreamHandler())\n\n bananabot = bot.IrcBot('curiomuz', ['#8banana'])\n await bananabot.connect('chat.freenode.net')\n await bananabot.mainloop()\n\n\nif __name__ == '__main__':\n curio.run(main())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"81"} +{"seq_id":"450917682","text":"a, b, k = map(int,input().split())\n# for i in range(k):\n# if a >= 1:\n# a -= 1\n# elif b >= 1:\n# b -= 1\n# else:\n# continue\n\ntmp = min(k, a)\na -= tmp\nk -= tmp\nb = max(0, b - k)\n\nprint(a, b)","sub_path":"Python_codes/p02818/s178807829.py","file_name":"s178807829.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"606660472","text":"import pandas as pd\nimport numpy as np\n\ndef clean_bp(file_path, sheet_name, usecols=list(range(55)), skiprows=2,\n index_col=0, skipfooter=10, drop_total=False):\n bp_df = pd.read_excel(str(file_path), sheet_name=sheet_name, \n usecols=usecols, skiprows=skiprows, index_col=index_col,\n skipfooter=skipfooter).dropna().T.reset_index()\n columns = bp_df.columns[0]\n bp_df = bp_df.melt(id_vars=columns)\n bp_df.columns = ['date', 'country', bp_df.columns[1]]\n if drop_total:\n bp_df = bp_df.loc[~bp_df.country.str.lower().str.contains('total'), :]\n bp_df.date = pd.to_datetime(bp_df.date, format='%Y')\n bp_df = bp_df.pivot_table(index=['country', 'date'], values=bp_df.columns[2])\n return bp_df","sub_path":"python/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"362942200","text":"from receiverSnd import SoundReceiver\nfrom receiverEKG import EKGReceiver\nfrom receiverEMG import EMGReceiver\nfrom threading import Thread, Condition, Event\nimport pyaudio\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport socket\n\nsoundBuffer = []\nekgBuffer = []\nmusicGraphicsCondition = Condition()\nbufferSize = 9\nnumOfEkgForChunk = 10 # 10 wartosci ekg przypada na jednego chunka\naudioReady = Event()\ngraphicsReady = Event()\n\nnumOfYPoints = 50\nchartsRangeOfTime = 10\nchartXStep = chartsRangeOfTime/float(numOfYPoints)\nchartsXs = np.arange(0, chartsRangeOfTime, chartXStep)\nchartsYs = [0.0] * numOfYPoints\nplotIter = 0\ncounter = 0 # zlicza, czy numOfYPoints zostalo przekroczone i trzeba zaktualizowac czas na osi x\npointWaitTime = chartXStep\ndataForTwoSec = []\nstopAllThreads = False\n\nclass ConsumerThread(Thread):\n def run(self):\n global soundBuffer, ekgBuffer\n\n # buforowanie\n SoundReceiver.condition.acquire()\n while len(SoundReceiver.all_recvd_data) < bufferSize:\n print(\"bufforowanie..\")\n SoundReceiver.condition.wait()\n\n EKGReceiver.condition.acquire()\n while len(EKGReceiver.parsed_data) < numOfEkgForChunk * bufferSize:\n print(\"bufforowanie.. ekg\")\n EKGReceiver.condition.wait()\n\n #print (\"dlugosc\", len(SoundReceiver.all_recvd_data))\n #print ([i for i in SoundReceiver.all_recvd_data])\n soundBuffer.append(SoundReceiver.all_recvd_data.pop()) # jeden taki pop() ma okolo 2s dlugosci\n\n for j in range(0, bufferSize):\n\n ekg_chunk = []\n for i in range(0, numOfEkgForChunk):\n ekg_chunk.append(EKGReceiver.parsed_data.pop())\n\n ekgBuffer.append(ekg_chunk)\n\n # koniec buforowania, rozpoczecie odtwarzania\n\n MusicThread().start()\n GraphicsThread().start()\n\n EKGReceiver.condition.release()\n SoundReceiver.condition.release()\n\n # dalsze pobieranie danych\n\n while not stopAllThreads:\n SoundReceiver.condition.acquire()\n if not SoundReceiver.all_recvd_data:\n print(\"oczekuje na dane dzwiekowe..\")\n SoundReceiver.condition.wait()\n\n EKGReceiver.condition.acquire()\n if len(EKGReceiver.parsed_data) < numOfEkgForChunk:\n print(\"oczekuje na dane ekg..\")\n EKGReceiver.condition.wait()\n\n if not stopAllThreads:\n musicGraphicsCondition.acquire()\n\n soundBuffer.append(SoundReceiver.all_recvd_data.pop())\n\n ekg_chunk = []\n for i in range(0, numOfEkgForChunk):\n ekg_chunk.append(EKGReceiver.parsed_data.pop())\n\n ekgBuffer.append(ekg_chunk)\n\n musicGraphicsCondition.notifyAll()\n musicGraphicsCondition.release()\n\n EKGReceiver.condition.release()\n SoundReceiver.condition.release()\n\n # po to, aby moc odblokowac pozostale watki i je zamknac\n musicGraphicsCondition.acquire()\n musicGraphicsCondition.notifyAll()\n musicGraphicsCondition.release()\n\n print (\"Zakonczono watek consumera 1/4\")\n\nclass MusicThread(Thread):\n\n play = pyaudio.PyAudio()\n # niewiedziec czemu, trzeba bylo podzielic 44100Hz na 2, zeby zachowac tempo z utworu wejsciowego\n stream = play.open(format=2, channels=2, rate=22050, output=True)\n\n def run(self):\n\n while not stopAllThreads:\n musicGraphicsCondition.acquire()\n if not soundBuffer or not ekgBuffer:\n musicGraphicsCondition.wait()\n musicGraphicsCondition.release()\n\n if not stopAllThreads:\n audioReady.set()\n graphicsReady.wait()\n audioReady.clear()\n\n #beg = time.time()\n MusicThread.stream.write(''.join(sum(soundBuffer.pop(), [])))\n #print (time.time() - beg)\n\n MusicThread.stream.stop_stream()\n MusicThread.stream.close()\n MusicThread.play.terminate()\n # azeby nie blokowac grafiki\n audioReady.set()\n print (\"Zakonczono watek dzwiekowy. 2/4\")\n\nclass GraphicsThread(Thread):\n\n def run(self):\n #global counter, plotIter, chartsXs, chartsYs\n global dataForTwoSec\n GraphicsHelper().start()\n\n while not stopAllThreads:\n\n musicGraphicsCondition.acquire()\n if not soundBuffer or not ekgBuffer:\n musicGraphicsCondition.wait()\n musicGraphicsCondition.release()\n\n if not stopAllThreads:\n dataForTwoSec = ekgBuffer.pop()\n\n graphicsNeedHelpReady.set()\n graphicsHelperReady.wait()\n graphicsNeedHelpReady.clear()\n\n graphicsReady.set()\n audioReady.wait()\n graphicsReady.clear()\n\n # azeby nie zablokowac watku helpera i dzwieku\n graphicsNeedHelpReady.set()\n graphicsReady.set()\n print (\"Zakonczono watek graficzny. 3/4\")\n #dataForTwoSec = ekgBuffer.pop()\n #print (ekgBuffer.pop())\n\ngraphicsHelperReady = Event()\ngraphicsNeedHelpReady = Event()\n\nclass GraphicsHelper(Thread):\n\n def run(self):\n global counter, plotIter, chartsXs, chartsYs\n global dataForTwoSec\n\n while not stopAllThreads:\n\n if len(dataForTwoSec) > 0:\n\n tmp_dataForTwoSec = dataForTwoSec\n dataForTwoSec = []\n graphicsHelperReady.set()\n\n for i in tmp_dataForTwoSec:\n newChartsYs = chartsYs\n newChartsYs.insert(0, i)\n newChartsYs.pop()\n plt.cla()\n plt.plot(chartsXs, newChartsYs) # to new jest po to, ze nie moge zmienic chartsYs kiedy jest uzywane przez plot\n chartsYs = newChartsYs\n plt.xlabel('time (s)')\n plt.ylabel('ekg value')\n plt.title('EKG chart')\n plt.ylim(0.8, 1.2)\n plt.draw()\n plt.pause(pointWaitTime-0.05)\n #time.sleep(pointWaitTime - 0.05)\n counter += 1\n\n if counter >= numOfYPoints:\n plotIter += 1\n counter = 0\n plt.cla()\n newChartsXs = np.arange(chartsRangeOfTime * plotIter, chartsRangeOfTime * (plotIter + 1), chartXStep)\n plt.xlim(chartsRangeOfTime * plotIter, chartsRangeOfTime * (plotIter + 1))\n plt.plot(newChartsXs, chartsYs)\n plt.xlabel('time (s)')\n plt.ylabel('ekg value')\n plt.title('EKG chart')\n plt.ylim(0.8, 1.2)\n chartsXs = newChartsXs\n plt.draw()\n else:\n graphicsNeedHelpReady.wait()\n graphicsHelperReady.clear()\n print (\"Zakonczono watek wykresowy. 4/4\")\n plt.cla()\n plt.clf()\n plt.close('all')\n\n\nSENDER_TCP_IP = '127.0.0.1'\nSENDER_TCP_PORT = 7000\n\nBUFFER_SIZE = 100\n\ndef stopReceiverListener():\n global stopAllThreads, sound_recv, ekg_recv\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((SENDER_TCP_IP, SENDER_TCP_PORT))\n s.listen(1)\n conn, addr = s.accept()\n while not stopAllThreads:\n yesornot = conn.recv(BUFFER_SIZE)\n if yesornot == 'y':\n stopAllThreads = True\n\n sound_recv.stopThread()\n ekg_recv.stopThread()\n\n if sound_recv.is_alive():\n sound_recv.join()\n\n if ekg_recv.is_alive():\n ekg_recv.join()\n\n print (\"Receiver otrzymal komunikat o zaprzestaniu wysylania danych przez czujniki. Zamknieto watki recv EKG i recv Sound.\")\n # plt.cla()\n # plt.clf()\n # plt.close('all')\n\nsound_recv = SoundReceiver()\nekg_recv = EKGReceiver()\nconsum = ConsumerThread()\n\n# START PROGRAMU\nif __name__ == '__main__':\n\n # plt.plot(chartsXs, chartsYs)\n # plt.show(block=False)\n # plt.close('all')\n #\n # emg = EMGReceiver()\n # emg.start()\n # emg.join()\n\n sound_recv.start()\n ekg_recv.start()\n consum.start()\n Thread(target=stopReceiverListener).start()\n\n plt.plot(chartsXs, chartsYs)\n plt.xlabel('time (s)')\n plt.ylabel('ekg value')\n plt.title('EKG chart')\n plt.ylim(0.8, 1.2)\n plt.show()\n\n if consum.is_alive():\n consum.join()\n\n print (\"Zakonczono wszystkie watki receivera.\")\n\n emg = EMGReceiver()\n emg.start()\n emg.join()\n\n print (\"Zaostala przeprowdzona analiza emg.\")\n\n\n\n\n\n\n\n","sub_path":"main_receiver.py","file_name":"main_receiver.py","file_ext":"py","file_size_in_byte":8690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"566324661","text":"def smallest_multiple(number):\n\n dividers = []\n\n for i in range(1, number + 1):\n dividers.append(i)\n\n smallest = number\n hits = 0\n\n while True:\n for i in range(0, len(dividers)):\n if smallest % dividers[i] == 0:\n hits += 1\n if hits == number:\n return smallest\n else:\n hits = 0\n smallest += 10\n break\n\nprint(smallest_multiple(20))\n","sub_path":"problem5.py","file_name":"problem5.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"266267381","text":"# -*- coding: utf-8 -*-\n#\n# discription: 作成済のHMMモデルから、隠れ状態を推定する\n# input: CSV形式の時系列数値データ\n# output:\n# - 隠れ状態の数列(csv)\n# - 元の数列と、隠れ状態の数列を重ねたプロット(png) ※output_png=Trueの場合のみ\n# arguments:\n# argvs[1]: データファイルのパス\n# argvs[2]: モデリング対象のカラム名\n# argvs[3]: アウトプットの出力先ディレクトリのパス\n# argvs[4]: 作成済のHMMモデルのpickleファイル\n# note:\n# データファイルの条件:\n# - pandasデータフレームとして読み込めること\n# - カラム名は必須、インデックス有無は問わない\n# - 1レコードが1時点を表す、時系列データを想定\n# - 対象カラムは、数値データであること\n# - 欠損があった場合は、欠損レコードを削除して動作する\n\n# --基本モジュール--\nimport numpy as np\nimport pandas as pd\nimport os\nimport sys\nimport pickle\nimport datetime\nimport re # ワイルドカード等の正規表現で使用\n\n# モデリング関連\nimport hmm_utils\n\n# ログ用\nimport traceback\nfrom logging import getLogger, StreamHandler, FileHandler, INFO, WARN\ncmd = \"run_hmm_predict\"\npid = str(os.getpid())\nlogfile = \"/tmp/hmm_tools_\"+pid+\".log\"\nlogger = getLogger(cmd)\nFhandler = FileHandler(logfile)\nFhandler.setLevel(INFO)\nlogger.addHandler(Fhandler)\nShandler = StreamHandler()\nShandler.setLevel(WARN)\nlogger.addHandler(Shandler)\nlogger.setLevel(INFO)\n\nnp.random.seed(0)\n\n# 変数定義\n# モデル\nmodel = \"G\" # Gaussian\n# model = \"GM\" # Gaussian-Mix\n# model = \"M\" # Multinomial # 未サポート\n\n# 固定パラメータ\noutput_png = False\ndiv = 0.1 # データのスケール調整\n\nif output_png is True:\n # 状態フィッティングプロット範囲\n plot_start = 0 # 描写開始位置\n plot_end = 96 # 描写終了位置\n\n\n# 処理開始\nif __name__ == '__main__':\n # 引数取得\n argvs = sys.argv\n arg_str = ' '.join(map(str, argvs))\n\n # ログ関数定義\n def error_exit(code, msg):\n d = datetime.datetime.today()\n logger.error(d.strftime(\"%Y-%m-%d %H:%M:%S\")+\" ERROR \"+cmd+\" - \"\n + str(msg)+\" command: \"+arg_str)\n logfile2 = \"/var/log/hmm_tools_\"+d.strftime(\"%Y%m%d%H%M%S\")+\"_\" \\\n + pid+\".log\"\n os.rename(logfile, logfile2)\n sys.exit(code)\n\n def warn_print(msg):\n d = datetime.datetime.today()\n logger.warn(d.strftime(\"%Y-%m-%d %H:%M:%S\")+\" WARN \"+cmd+\" - \"\n + str(msg)+\" command: \"+arg_str)\n\n def debug_print(msg):\n d = datetime.datetime.today()\n logger.info(d.strftime(\"%Y-%m-%d %H:%M:%S\")+\" INFO \"+cmd+\" - \"\n + str(msg)+\" command: \"+arg_str)\n\n debug_print(\"start process.\")\n # 引数チェック\n if len(argvs) <= 4:\n error_exit(1, \"number of args is less than expected. [main]\")\n\n try:\n in_file = str(argvs[1])\n tgt_col = str(argvs[2])\n out_file = str(argvs[3])\n pickle_file = str(argvs[4])\n except:\n error_exit(2, \"function error. trace: \"\n + traceback.format_exc(sys.exc_info()[2])+\" [str]\")\n\n # パス関連\n try:\n out_dir = os.path.dirname(out_file)\n if len(out_dir) == 0:\n out_dir = \".\"\n elif not os.path.exists(out_dir):\n os.makedirs(out_dir)\n except:\n error_exit(2, \"function error. trace: \"\n + traceback.format_exc(sys.exc_info()[2])\n + \" [os.path.dirname/exists/makedirs]\")\n\n if output_png is True:\n # 状態分布プロット格納フォルダ\n save_hmmdist_dir = out_dir+\"/distplot/\"\n # 状態フィッティングプロット格納フォルダ\n save_hmmfitting_dir = out_dir+\"/fitplot/\"\n if not os.path.exists(save_hmmdist_dir):\n os.makedirs(save_hmmdist_dir)\n if not os.path.exists(save_hmmfitting_dir):\n os.makedirs(save_hmmfitting_dir)\n\n # main処理\n debug_print(\"start reading input file.\")\n try:\n in_data = pd.read_csv(in_file)\n except:\n error_exit(2, \"function error. trace: \"\n + traceback.format_exc(sys.exc_info()[2])+\" [pd.read_csv]\")\n debug_print(\"end reading input file.\")\n\n if tgt_col not in in_data.columns:\n error_exit(1, tgt_col+\" NOT in \"+in_file+\". [main]\")\n\n # NAが含まれる場合の処理用\n NA_index_lst = in_data[in_data[tgt_col] != in_data[tgt_col]].index\n\n # データのスケール調整\n try:\n tmp_data = np.c_[np.array(in_data[tgt_col].dropna())]/div\n except:\n error_exit(2, \"function error. trace: \"\n + traceback.format_exc(sys.exc_info()[2])\n + \" [np.c_/np.array/dropna]\")\n if len(tmp_data) == 0:\n error_exit(1, tgt_col+\" in \"+in_file+\" is empty. [main]\")\n\n # モデルの読み込み\n debug_print(\"start loading pickle file.\")\n try:\n with open(pickle_file, 'r') as f:\n hmm_obj = pickle.load(f)\n except:\n error_exit(2, \"function error. trace: \"\n + traceback.format_exc(sys.exc_info()[2])\n + \" [open/pickle.load]\")\n if model == \"G\":\n try:\n debug_print(\"hmm_obj.startprob_:\"\n + str([str(x) for x in hmm_obj.startprob_])\n + \", hmm_obj.transmat_:\"\n + str([str(x) for x in hmm_obj.transmat_])\n + \", hmm_obj.means_:\"\n + str([str(x) for x in hmm_obj.means_])\n + \", hmm_obj.covars_:\"\n + str([str(x) for x in hmm_obj.covars_]))\n except:\n error_exit(2, \"function error. trace: \"\n + traceback.format_exc(sys.exc_info()[2])\n + \" [hmm_obj.XXX]\")\n debug_print(\"end loading pickle file.\")\n\n # m-hmm時の変換。要修正:変換のルールを学習データと同じにする\n if model == \"M\":\n try:\n tmp_data = hmm_utils.replace_data(tmp_data)\n except:\n error_exit(2, \"function error. trace: \"\n + traceback.format_exc(sys.exc_info()[2])\n + \" [hmm_utils.replace_data]\")\n\n # 隠れ状態の推定\n debug_print(\"start hmm state estimation.\")\n try:\n tmp_ans_state = hmm_obj.predict(tmp_data)\n except:\n error_exit(2, \"function error. trace: \"\n + traceback.format_exc(sys.exc_info()[2])\n + \" [hmm_obj.predict]\")\n debug_print(\"end hmm state estimation.\")\n\n # NAが含まれる場合の処理\n try:\n if len(NA_index_lst) >= 1:\n tmp_ans_state_new = []\n cnt = 0\n for tmp_index in range(0, len(in_data)):\n if tmp_index in NA_index_lst:\n tmp_ans_state_new.append(np.nan)\n else:\n tmp_ans_state_new.append(tmp_ans_state[cnt])\n cnt = cnt+1\n tmp_ans_state = tmp_ans_state_new\n except:\n error_exit(2, \"function error. trace: \"\n + traceback.format_exc(sys.exc_info()[2])\n + \" [append/other proc]\")\n\n # 状態推移列のDF化\n debug_print(\"start output hmmstate.\")\n try:\n hmmstate_DF = pd.DataFrame(tmp_ans_state)\n hmmstate_DF.columns = [tgt_col+\"_state\"]\n except:\n error_exit(2, \"function error. trace: \"\n + traceback.format_exc(sys.exc_info()[2])\n + \" [pd.DataFrame]\")\n\n # ��態番号逆転の補正\n debug_print(\"start correction of reverse phenomenon.\")\n try:\n # 元データと状態系列を結合\n merged_data = pd.concat([in_data, hmmstate_DF], axis=1)\n # 状態番号を取得\n state_lst = range(len(hmm_obj.startprob_))\n state_min = np.min(state_lst)\n state_max = np.max(state_lst)\n\n # 各状態の最大・最小値を取得\n max_vals = []\n min_vals = []\n for state in state_lst:\n tmp_merged = merged_data[merged_data[tgt_col+\"_state\"] == state]\n max_vals.append(tmp_merged[tgt_col].max())\n min_vals.append(tmp_merged[tgt_col].min())\n\n # リストから最小と最大を除く\n state_lst.remove(state_min)\n state_lst.remove(state_max)\n\n # 変数初期化\n reverse_before = pd.DataFrame()\n roop_cnt = 0\n while True:\n roop_cnt += 1\n\n # 逆転発生個所を探す。\n # 対象データの前後との差分項目を追加\n merged_data[tgt_col+\"_diff\"] = \\\n merged_data[tgt_col] - merged_data[tgt_col].shift(1)\n merged_data[tgt_col+\"_diff-1\"] = \\\n merged_data[tgt_col] - merged_data[tgt_col].shift(-1)\n\n # 対象データの状態番号の前後との差分項目を追加\n merged_data[tgt_col+\"_s_diff\"] = \\\n merged_data[tgt_col+\"_state\"] \\\n - merged_data[tgt_col+\"_state\"].shift(1)\n merged_data[tgt_col+\"_s_diff-1\"] = \\\n merged_data[tgt_col+\"_state\"] \\\n - merged_data[tgt_col+\"_state\"].shift(-1)\n\n # 前の時点に比べて、対象データの状態番号が下がっている、かつ、\n # 対象データが増えている時点のデータを抽出\n tmp_down_data = merged_data[merged_data[tgt_col+\"_s_diff\"] <= -1]\n down_data1 = tmp_down_data[tmp_down_data[tgt_col+\"_diff\"] > 0]\n\n # 前の時点に比べて、対象データの状態番号が上がっている、かつ、\n # 対象データが減っている時点のデータを抽出\n tmp_up_data = merged_data[merged_data[tgt_col+\"_s_diff\"] >= 1]\n up_data1 = tmp_up_data[tmp_up_data[tgt_col+\"_diff\"] < 0]\n\n # 次の時点と比べて、対象データの状態番号が下がっている、かつ、\n # 対象データが増えている時点のデータを抽出\n tmp_down_data = merged_data[merged_data[tgt_col+\"_s_diff-1\"] >= 1]\n down_data2 = tmp_down_data[tmp_down_data[tgt_col+\"_diff-1\"] < 0]\n\n # 次の時点に比べて、対象データの状態番号が上がっている、かつ、\n # 対象データが減っている時点のデータを抽出\n tmp_up_data = merged_data[merged_data[tgt_col+\"_s_diff-1\"] <= -1]\n up_data2 = tmp_up_data[tmp_up_data[tgt_col+\"_diff-1\"] > 0]\n\n # 逆転発生個所\n reverse = pd.concat([down_data2, down_data1, up_data2, up_data1])\n\n # 逆転が発生していない、または、これ以上の補正が出来ない場合は、ループ終了\n if (len(reverse) == 0):\n break\n elif (len(reverse) == len(reverse_before)):\n if (reverse.index == reverse_before.index).all():\n break\n\n # 念のため、補正回数が、全レコード数に達した場合も、ループ終了\n if roop_cnt >= len(in_data):\n warn_print(\"Correction of reverse phenomenon did not end.\")\n break\n\n # 逆転個所を保存\n reverse_before = reverse.copy()\n\n # 補正処理\n # 状態0(state_min)の最小値より、小さい値をとる状態があれば、状態0とする\n tmp_min = min_vals[state_min]\n if not np.isnan(tmp_min): # NAの場合は補正不要\n tmp_reverse = reverse[reverse[tgt_col] < tmp_min]\n tmp_index = tmp_reverse.index\n if len(tmp_index) != 0:\n merged_data.loc[tmp_index, tgt_col+\"_state\"] = state_min\n continue\n\n # 状態2(state_min)の最大値より、大きい値をとる状態があれば、状態2とする\n tmp_max = max_vals[state_max]\n if not np.isnan(tmp_max): # NAの場合は補正不要\n tmp_reverse = reverse[reverse[tgt_col] > tmp_max]\n tmp_index = tmp_reverse.index\n if len(tmp_index) != 0:\n merged_data.loc[tmp_index, tgt_col+\"_state\"] = state_max\n continue\n\n # 状態番号が最小でも最大でもない場合\n for state in state_lst:\n # 状態nの最小値よりも、小さい値をとる状態で、\n # かつ、状態番号がnよりも大きい場合は、状態をnとする\n tmp_min = min_vals[state]\n if not np.isnan(tmp_min): # NAの場合は補正不要\n tmp_reverse = reverse[(reverse[tgt_col] < tmp_min) &\n (reverse[tgt_col+\"_state\"] > state)]\n tmp_index = tmp_reverse.index\n if len(tmp_index) != 0:\n merged_data.loc[tmp_index, tgt_col+\"_state\"] = state\n\n # 状態nの最大値よりも、大きい値をとる状態で、\n # かつ、状態番号がnよりも小さい場合は、状態をnとする\n tmp_max = max_vals[state]\n if not np.isnan(tmp_max): # NAの場合は補正不要\n tmp_reverse = reverse[(reverse[tgt_col] > tmp_max) &\n (reverse[tgt_col+\"_state\"] < state)]\n tmp_index = tmp_reverse.index\n if len(tmp_index) != 0:\n merged_data.loc[tmp_index, tgt_col+\"_state\"] = state\n\n # 状態推移列の取得\n hmmstate_DF = pd.DataFrame(merged_data[tgt_col+\"_state\"])\n hmmstate_DF.columns = [tgt_col+\"_state\"]\n except:\n error_exit(2, \"function error. trace: \"\n + traceback.format_exc(sys.exc_info()[2])\n + \" [main]\")\n debug_print(\"end correction of reverse phenomenon.\")\n\n # 状態系列のint化(NAが含まれていない場合のみ)\n try:\n if not hmmstate_DF.isnull().values.any():\n hmmstate_DF = hmmstate_DF.astype(int)\n except:\n error_exit(2, \"function error. trace: \"\n + traceback.format_exc(sys.exc_info()[2])\n + \" [main]\")\n\n # 状態推移列の出力\n try:\n hmmstate_DF.to_csv(out_file, index=False, header=True)\n except:\n error_exit(2, \"function error. trace: \"\n + traceback.format_exc(sys.exc_info()[2])\n + \" [to_csv]\")\n debug_print(\"end output hmmstate.\")\n\n if output_png is True:\n debug_print(\"start output png file.\")\n plot_data = np.c_[np.array(in_data[tgt_col])]\n\n # ファイル名の区切り記号\n spl_code = '\\.csv'\n regex = re.compile(spl_code)\n\n # ファイル名取得\n tmp_fn = os.path.basename(out_file)\n file_name = regex.split(tmp_fn)[0]\n\n # 状態分布プロット\n tmp_output_fn = \"hmmdis_\"+file_name+\".png\"\n if model == \"G\":\n try:\n hmm_utils.plot_G_HMMdist(tmp_data, hmm_obj,\n save_hmmdist_dir+tmp_output_fn,\n cmd_arg=arg_str)\n except:\n error_exit(2, \"function error. trace: \"\n + traceback.format_exc(sys.exc_info()[2])\n + \" [hmm_utils.plot_G_HMMdist]\")\n elif model == \"GM\":\n try:\n hmm_utils.plot_GMM_HMMdist(tmp_data, hmm_obj,\n save_hmmdist_dir+tmp_output_fn,\n cmd_arg=arg_str)\n except:\n error_exit(2, \"function error. trace: \"\n + traceback.format_exc(sys.exc_info()[2])\n + \" [hmm_utils.plot_GMM_HMMdist]\")\n elif model == \"M\":\n try:\n hmm_utils.plot_M_HMMdist(tmp_data, hmm_obj,\n save_hmmdist_dir+tmp_output_fn,\n cmd_arg=arg_str)\n except:\n error_exit(2, \"function error. trace: \"\n + traceback.format_exc(sys.exc_info()[2])\n + \" [hmm_utils.plot_M_HMMdist]\")\n # 状態フィッティングプロット\n tmp_output_fn = \"hmmfit_\"+file_name+\".png\"\n try:\n hmm_utils.plot_HMMfit(plot_data, tmp_ans_state, plot_start,\n min(plot_end, len(plot_data)),\n save_hmmfitting_dir+tmp_output_fn)\n except:\n error_exit(2, \"function error. trace: \"\n + traceback.format_exc(sys.exc_info()[2])\n + \" [hmm_utils.plot_HMMfit]\")\n debug_print(\"end output png file.\")\n\n debug_print(\"end process.\")\n\n os.remove(logfile)\n\n sys.exit(0)\n","sub_path":"run_hmm_predict.py","file_name":"run_hmm_predict.py","file_ext":"py","file_size_in_byte":17160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"470917338","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 08 17:43:51 2017\r\n\r\n@author: samgale\r\n\r\nannotation data:\r\nhttp://download.alleninstitute.org/informatics-archive/current-release/mouse_ccf/annotation/ccf_2016/\r\n\r\nexpression data example:\r\nhttp://api.brain-map.org/grid_data/download/73636098?include=density\r\nhttp://api.brain-map.org/grid_data/download/73636098?include=intensity\r\nhttp://api.brain-map.org/grid_data/download/73636098?include=energy\r\n\r\n\"\"\"\r\n\r\nimport math, os, urllib, zipfile\r\nfrom xml.dom import minidom\r\nimport nrrd\r\nimport cv2\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\ndef loadRawData(filePath,dtype=np.float32):\r\n fid = open(filePath,'rb')\r\n shape = (67,41,58)\r\n data = np.fromfile(fid,dtype=dtype,count=np.prod(shape)).reshape(shape,order='F').transpose((1,2,0))\r\n fid.close()\r\n return data\r\n\r\ndef getAnnotationIndex(annotationData,annotationStuctures,structureAcronym):\r\n for structure in annotationStructures.getElementsByTagName('structure'):\r\n if structure.childNodes[7].childNodes[0].nodeValue[1:-1]==structureAcronym:\r\n structureID = [int(sub.childNodes[0].nodeValue) for sub in structure.getElementsByTagName('id')]\r\n break\r\n inRegion = np.in1d(annotationData,structureID).reshape(annotationData.shape)\r\n inRegion[:,inRegion.shape[1]//2:] = False\r\n return inRegion\r\n \r\ndef saveFigImg(fig,imageShape,filePath):\r\n imgH,imgW = imageShape\r\n figW,figH = fig.get_size_inches()\r\n fig.patch.set_alpha(0)\r\n fig.set_size_inches(((figW/imgW)*imgW,(figW/imgW)*imgH))\r\n fig.set_dpi((figW/imgW)*fig.get_dpi())\r\n a = fig.gca()\r\n a.set_frame_on(False)\r\n a.set_xticks([])\r\n a.set_yticks([])\r\n plt.axis('off')\r\n plt.xlim(0,imgW)\r\n plt.ylim(imgH,0)\r\n fig.savefig(filePath,transparent=True,bbox_inches='tight',pad_inches=0)\r\n plt.close(fig)\r\n\r\n\r\nbaseDir = r'C:\\Users\\SVC_CCG\\Desktop\\Data\\Atlas'\r\n# baseDir = r'/Users/Gale/Documents/AllenAtlas'\r\n\r\n\r\n# download xml files with experiment info\r\n# using experiments with differential expression between thalamus and gray matter\r\nnumRows = 1000\r\nfor i,startRow in enumerate(range(0,10000,numRows)):\r\n url = 'http://mouse.brain-map.org/api/v2/data/query.xml?criteria=model::Structure,rma::criteria,structure_sets[id$eq2],rma::options[only$eq%27id%27],pipe::list[xstructures$eq%27id%27],service::differential_rows[set$eq%27P56%27][domain1$eq%278%27][domain1_threshold$eq%270,50%27][domain2$eq%27549%27][domain2_threshold$eq%271,50%27][start_row$eq'+str(startRow)+'][num_rows$eq'+str(numRows)+']'\r\n filePath = os.path.join(baseDir,'Genes','thalamus_genes_'+str(i)+'.xml')\r\n # py2\r\n urllib.urlretrieve(url,filePath)\r\n # py3\r\n # urllib.request.urlretrieve(url,filePath)\r\n\r\n\r\n# convert xml info to dataframe\r\nparams = ('id','gene-id','gene-symbol','gene-name','plane-of-section')\r\ndataDict = {key:[] for key in params}\r\nfor i in range(10):\r\n xml = minidom.parse(os.path.join(baseDir,'Genes','thalamus_genes_'+str(i)+'.xml'))\r\n objs = xml.getElementsByTagName('object')\r\n for obj in objs:\r\n for key in params:\r\n dataDict[key].append(obj.getElementsByTagName(key)[0].childNodes[0].nodeValue)\r\ndataframe = pd.DataFrame.from_dict(dataDict)\r\ndataframe.to_hdf(os.path.join(baseDir,'Genes','dataframe.hdf5'),'table')\r\n\r\n\r\n# define ROIs (using 25 micron voxel CCF template)\r\nannotationStructures = minidom.parse(os.path.join(baseDir,'annotationStructures.xml'))\r\nannotationData = loadRawData(os.path.join(baseDir,'gridAnnotation.raw'),dtype=np.uint32)\r\n# annotationData = nrrd.read(os.path.join(baseDir,'annotation_100.nrrd'))[0].transpose((1,2,0))\r\n\r\ninLP = getAnnotationIndex(annotationData,annotationStructures,'LP')\r\ninLP[:,inLP.shape[1]//2:,:] = False\r\ninPosteriorLP = inLP.copy()\r\ninPosteriorLP[:,:,:308] = False\r\ninAnteriorLateralLP = inLP.copy()\r\ninAnteriorLateralLP[:,:,308:] = False\r\ninAnteriorMedialLP = inAnteriorLateralLP.copy()\r\ninAnteriorLateralLP[:,174:,:] = False\r\ninAnteriorMedialLP[:,:174,:] = False\r\n\r\ninLGd = getAnnotationIndex(annotationData,annotationStructures,'LGd')\r\ninLGd[:,inLP.shape[1]//2:,:] = False\r\ninLD = getAnnotationIndex(annotationData,annotationStructures,'LD')\r\ninLD[:,inLP.shape[1]//2:,:] = False\r\n\r\n\r\n# get experession energy for each experiment and roi\r\ntempZip = os.path.join(baseDir,'Genes','tempZip.zip')\r\ntempExtract = os.path.join(baseDir,'Genes','tempExtract')\r\n\r\nrois = (inLP,inPosteriorLP,inAnteriorLateralLP,inAnteriorMedialLP,inLGd,inLD)\r\nroiNames = ('LP','PosteriorLP','AnteriorLateralLP','AnteriorMedialLP','LGd','LD')\r\nroiEnergy = {r:np.full(dataframe.shape[0],np.nan) for r in roiNames}\r\n\r\nfor n,expID in enumerate(dataframe.id):\r\n print('analyzing experiment '+str(n+1)+' of '+str(dataframe.shape[0])) \r\n \r\n # download and extract raw energy image\r\n url = 'http://api.brain-map.org/grid_data/download/'+expID+'?include=energy'\r\n # py2\r\n # urllib.urlretrieve(url,tempZip)\r\n # py3\r\n urllib.request.urlretrieve(url,tempZip)\r\n zf = zipfile.ZipFile(tempZip)\r\n zf.extract('energy.raw',tempExtract)\r\n data = loadRawData(os.path.join(tempExtract,'energy.raw'))\r\n \r\n # get mean expression energy for each roi\r\n data[data<0] = np.nan\r\n for roiName,roi in zip(roiNames,rois):\r\n roiEnergy[roiName][n] = np.nanmean(data[roi])\r\n\r\n# add expression energy to dataframe \r\nfor region,energy in roiEnergy.items():\r\n dataframe[region] = energy\r\ndataframe.to_hdf(os.path.join(baseDir,'Genes','dataframe.hdf5'),'table')\r\n\r\n\r\n#\r\na = dataframe.AnteriorLateralLP\r\nb = dataframe.AnteriorMedialLP\r\nr = a/b\r\nr[np.logical_and(a<0.5,b<0.5)] = np.nan\r\nr[r<1] = -1/r[r<1]+1\r\nr[r>=1] -= 1\r\nax = plt.subplot(1,1,1)\r\nax.hist(r[~np.isnan(r)],np.arange(math.floor(r.min()),math.ceil(r.max()),0.5))\r\nax.set_yscale('log')\r\nax.set_ylim([0.9,10000])\r\n\r\n\r\n#\r\ngenesOfInterest = ('Calb1','Calb2','Cbln2','Chrna4','Enc1','Fam81a','Gabra4','Gad1','Gad2','Gbx2','Gfra1','Glra3','Hap1','Homer2','Htr2c','Nrgn','Pamr1','Ralyl','Rgs4','Rorb','Slc6a1','Slc29a1','Tnnt1','Zic1')\r\n\r\npadding = 1\r\nlpRange = [[r.min()-padding,r.max()+padding] for r in np.where(inLP)]\r\nlpIndex = np.s_[lpRange[0][0]:lpRange[0][1]+1,lpRange[1][0]:lpRange[1][1]+1,lpRange[2][0]:lpRange[2][1]+1]\r\n\r\ntempXml = os.path.join(baseDir,'Genes','tempXml.xml')\r\ntempZip = os.path.join(baseDir,'Genes','tempZip.zip')\r\ntempExtract = os.path.join(baseDir,'Genes','tempExtract')\r\nfor gene in genesOfInterest:\r\n url = 'http://api.brain-map.org/api/v2/data/SectionDataSet/query.xml?criteria=products%5Bid$eq1%5D,genes%5Bacronym$eq%27'+gene+'%27%5D&include=genes'\r\n # py2\r\n urllib.urlretrieve(url,tempXml)\r\n # py3\r\n # urllib.request.urlretrieve(url,tempXml)\r\n xml = minidom.parse(tempXml)\r\n experiments = xml.getElementsByTagName('section-data-set')\r\n horzProj = np.full((len(experiments),)+tuple(r[1]-r[0]+1 for r in lpRange[2:0:-1]),np.nan)\r\n for ind,exp in enumerate(experiments): \r\n expID = exp.getElementsByTagName('id')[0].childNodes[0].nodeValue\r\n url = 'http://api.brain-map.org/grid_data/download/'+expID+'?include=energy'\r\n # py2\r\n urllib.urlretrieve(url,tempZip)\r\n # py3\r\n # urllib.request.urlretrieve(url,tempZip)\r\n try:\r\n zf = zipfile.ZipFile(tempZip)\r\n except:\r\n continue\r\n zf.extract('energy.raw',tempExtract)\r\n data = loadRawData(os.path.join(tempExtract,'energy.raw'))\r\n \r\n# shape = annotationData.shape\r\n# z = np.zeros((shape[0],shape[1],data.shape[2]),dtype=data.dtype)\r\n# for i in range(data.shape[2]):\r\n# z[:,:,i] = cv2.resize(data[:,:,i],shape[1::-1],interpolation=cv2.INTER_LINEAR)\r\n# d = np.zeros(shape,dtype=data.dtype)\r\n# for i in range(z.shape[0]):\r\n# d[i] = cv2.resize(z[i],shape[2:0:-1],interpolation=cv2.INTER_LINEAR)\r\n \r\n data[~inLP] = 0\r\n horzProj[ind] = data[lpIndex].max(axis=0).T\r\n horzProj[ind] /= horzProj[ind].max()\r\n \r\n horzProj[horzProj<0] = np.nan\r\n horzProj = np.nanmean(horzProj,axis=0)\r\n horzProj *= 255/np.nanmax(horzProj)\r\n horzProj = horzProj.round().astype(np.uint8)\r\n\r\n _,contours,_ = cv2.findContours(inLP[lpIndex].astype(np.uint8).max(axis=0).T.copy(order='C'),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\r\n x,y = np.squeeze(contours).T\r\n \r\n fig = plt.figure(facecolor='w',tight_layout=True)\r\n ax = plt.subplot(1,1,1)\r\n ax.imshow(horzProj,cmap='gray',interpolation='none')\r\n ax.plot(np.append(x,x[0]),np.append(y,y[0]),'w',linewidth=4)\r\n saveFigImg(fig,horzProj.shape[0:2],os.path.join(baseDir,'Genes','genesOfInterest',gene+'.png'))\r\n\r\n\r\n","sub_path":"AllenGeneAnalysis.py","file_name":"AllenGeneAnalysis.py","file_ext":"py","file_size_in_byte":8709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"394944775","text":"from fnal_column_analysis_tools.analysis_objects.JaggedCandidateArray import JaggedCandidateArray\nfrom fnal_column_analysis_tools.striped.StripedColumnTransformer import PhysicalColumnGroup\nimport awkward\n\ndef jaggedFromColumnGroup(cgroup):\n if isinstance(cgroup,PhysicalColumnGroup):\n return JaggedCandidateArray.candidatesfromcounts(counts = cgroup.counts(),\n p4 = cgroup.p4Column(),\n **cgroup.otherColumns())\n else:\n return awkward.JaggedArray.fromcounts(cgroup.counts(),\n awkward.Table(cgroup.columns()))\n \n","sub_path":"fnal_column_analysis_tools/striped/ColumnGroup2JaggedTable.py","file_name":"ColumnGroup2JaggedTable.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"621000832","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python2.7/site-packages/dbmanagr/daemon.py\n# Compiled at: 2015-10-11 07:17:06\nimport os, sys, BaseHTTPServer, json, urllib2, logging, time, traceback\nfrom dbmanagr.jsonable import Jsonable, as_json\nfrom dbmanagr.utils import mute_stderr\nlogger = logging.getLogger(__name__)\n\nclass Encoder(json.JSONEncoder):\n\n def default(self, obj):\n if isinstance(obj, Jsonable):\n return obj.as_json()\n return as_json(obj)\n\n\nclass DaemonHTTPServer(BaseHTTPServer.HTTPServer):\n\n def __init__(self, *args, **kwargs):\n BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)\n self.active = True\n\n def serve_forever(self, poll_interval=0.5):\n while self.active:\n self.handle_request()\n\n\nclass DaemonHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):\n\n def do_POST(self):\n from dbmanagr.command import navigator, exporter, differ, executer\n from dbmanagr.command import grapher\n commands = {'navigator': navigator, \n 'exporter': exporter, \n 'differ': differ, \n 'executer': executer, \n 'grapher': grapher}\n parts = self.path.split('/')\n command = parts[1]\n if command == 'server-status':\n self.send_response(200)\n self.end_headers()\n return\n if command == 'server-stop':\n self.send_response(200)\n self.end_headers()\n self.server.active = False\n return\n if command not in commands:\n self.send_error(404)\n return\n args = json.loads(self.rfile.read(int(self.headers.getheader('content-length'))))\n try:\n items = mute_stderr(commands[command].execute)(args)\n self.send_response(200)\n self.send_header('Content-Type', 'application/json')\n self.end_headers()\n self.wfile.write(json.dumps(items, cls=Encoder))\n except BaseException as e:\n logger.debug(e)\n self.send_response(500)\n self.send_header('Content-Type', 'application/json')\n self.end_headers()\n self.wfile.write(json.dumps({'__cls__': str(e.__class__.__name__), \n 'message': e.message, \n 'traceback': as_json(traceback.extract_tb(sys.exc_info()[2]))}))\n\n def log_message(self, format_, *args):\n logger.info(format_, args)\n\n\ndef is_running(config):\n try:\n urllib2.urlopen(('http://{host}:{port}/server-status').format(host=config.host, port=config.port), '')\n except BaseException:\n return False\n\n return True\n\n\ndef start_server(config):\n try:\n httpd = DaemonHTTPServer((\n config.host, config.port), DaemonHTTPRequestHandler)\n if os.fork() == 0:\n httpd.serve_forever()\n sys.exit(0)\n return True\n except BaseException:\n pass\n\n return False\n\n\ndef start(config):\n sys.stdout.write('Starting server... ')\n if start_server(config):\n sys.stdout.write('OK\\n')\n else:\n sys.stdout.write('already running\\n')\n\n\ndef stop(config):\n sys.stdout.write('Stopping server... ')\n try:\n urllib2.urlopen(('http://{host}:{port}/server-stop').format(host=config.host, port=config.port), '')\n except BaseException:\n sys.stdout.write('failed\\n')\n else:\n sys.stdout.write('OK\\n')\n\n\ndef restart(config):\n stop(config)\n time.sleep(1)\n start(config)\n\n\ndef status(config):\n if is_running(config):\n sys.stdout.write('Status: online\\n')\n else:\n sys.stdout.write('Status: offline\\n')","sub_path":"pycfiles/dbmanagr-0.28.3.macosx-10.10-x86_64.tar/daemon.py","file_name":"daemon.py","file_ext":"py","file_size_in_byte":3773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"316152101","text":"gi\n#------------------------------------------------\n# BUILT FOR PYTHON 3.x\n#------------------------------------------------\n#import librarys\n\n#data analysis\nimport pandas as PD\nimport numpy\n\n#FTP access\nfrom ftplib import FTP, error_perm, FTP_TLS\n\n#current directories etc\nimport glob, os, sys\n\n#database connect\nimport psycopg2 as pg\nfrom sqlalchemy import create_engine\n#------------------------------------------------\n#import itertools, sys\n#spinner = itertools.cycle(['-', '/', '|', '\\\\'])\n#sys.stdout.write(next(spinner)) # write the next character\n#sys.stdout.flush() # flush stdout buffer (actual character display)\n#sys.stdout.write('\\r\\b') # erase the last written char\nfilesFound = []\n\n\n#creating the file types we might be looking for\nclass FileType:\n def __init__(self, givenExtension, includeInSearch):\n self.extension = givenExtension\n self.search = includeInSearch\n\ncsv = FileType(\".csv\", True)\nxml = FileType(\".xml\", False)\n\n\n#an optional progress bar to use within the code (otherwise it might seem that the script has chrashed without visual feedback)\ndef progressBar(current, total, full_progbar_length):\n frac = current/total\n filled_progbar = round(frac * full_progbar_length)\n sys.stdout.flush() #to prevent the print function freezing from time to time\n print('\\r\\033[0;32;40m', '#'*filled_progbar + '-'*(full_progbar_length-filled_progbar), '[{:>7.2%}]'.format(frac), end =\"\\033[0;37;40m\") #special characters \\r, \\033 and :>7.2% are used to overwrite the previous print, make the bar green and have the % at the end the same length at all times, respectively\n\n\n#defining fuction to download the appropriate file(s)\ndef download_file(file):\n print(\"\\nDownloading %s\" %file, end = \" | \")\n with open(os.getcwd() + \"/files/\" + file, 'wb') as file_handle: #using while to automatically close the file as well, even when write fails\n ftps.retrbinary(\"RETR %s\" %file, file_handle.write)\n print(\"\\033[0;32;40m finished\\033[0;37;40m\")\n\n\ndef find_files(ftps, dirpath):\n #progressBarCount = 0 #reset count after entering a new directory\n prev_dir = ftps.pwd()\n try:\n ftps.cwd(dirpath)\n except error_perm as e:\n print(str(e))\n return # ignore ones we cannot enter\n fileListing = ftps.nlst() #get the folders and files in current directory\n dirLength = len(fileListing) #see how many files are in the directory to set our progress bar end point\n print(\"\\r Looking through: \" + str(ftps.pwd()), end = \"\") #just a visual echo of the current working ftp directory\n for file in fileListing:\n #progressBarCount += 1\n #progressBar(progressBarCount, dirLength, 20)\n if not(file.startswith('_')):\n if(file.endswith(xml.extension)):\n if (xml.search == True):\n filesFound.append(file)\n #print(\"\\n Found: \" + file)\n #download_file(file)\n else:\n continue\n elif(file.endswith(csv.extension)):\n if (csv.search == True):\n filesFound.append(file)\n #print(\"\\n Found: \" + file)\n #download_file(file)\n else:\n continue\n else:\n find_files(ftps, file)\n else:\n continue\n if(ftps.pwd() != \"/\"):\n ftps.cwd(prev_dir)\n else:\n print(\"\\n\\033[0;32;40mFinished. \\n\\033[0;37;40mFound a total of: \" + str(len(filesFound)) + \" matching files.\")\n print(\"Downloaded the following files: \" + str(filesFound))\n\n\n#connect to postgresql database and write the downloaded files to tables\ndef connect_to_database(database_path):\n try:\n engine = create_engine(database_path)\n except sqlalchemy.all_errors as e:\n print(\"\\033[0;31;40mCould not connect to database.\")\n print(str(e))\n else:\n write_to_database(engine)\n finally:\n engine.dispose()\n\ndef write_to_database(engine):\n path = (os.getcwd() + \"/files\")\n os.chdir(path)\n fileList = os.listdir(os.curdir)\n #print(\"Current listing of files: %s\" %fileList)\n if(len(fileList) > 0):\n for file in fileList:\n if(file.endswith('.csv') and file.startswith('')):\n df = PD.read_csv(file, encoding = \"latin1\")\n for columnName in df.columns:\n if(('opened' in columnName) or ('closed' in columnName)):\n df[columnName] = PD.to_datetime(df[columnName])\n elif('percentage' in columnName):\n df[columnName] = df[columnName].str.extract('(\\d+)')\n df[columnName] = PD.to_numeric(df[columnName], downcast='float')\n else:\n continue\n print(\"\\nAdding %s to database\" %file, end = \" | \")\n #df.to_sql('table1', engine, if_exists=\"append\")\n print(\" success\")\n print(\"Removing %s from current directory\" %file, end = \" | \")\n os.remove(file)\n print(\" removed\")\n fileList = os.listdir(os.curdir)\n elif(file.endswith('.csv')):\n df = PD.read_csv(file, encoding = \"latin1\")\n for columnName in df.columns:\n if(('opened' in columnName) or ('closed' in columnName)):\n df[columnName] = PD.to_datetime(df[columnName])\n elif('percentage' in columnName):\n df[columnName] = df[columnName].str.extract('(\\d+)')\n df[columnName] = PD.to_numeric(df[columnName], downcast='float')\n else:\n continue\n print(\"\\nAdding %s to database\" %file, end = \" | \")\n #df.to_sql('table2', engine, if_exists=\"append\")\n print(\"\\033[0;32;40m success\\033[0;37;40\")\n print(\"Removing %s from current directory\" %file, end = \" | \")\n os.remove(file)\n print(\"\\033[0;32;40m removed\\033[0;37;40\")\n fileList = os.listdir(os.curdir)\n else:\n print(\"\\033[0;31;40mThere aren't any .csv files in %s\\033[0;37;40m\" %path)\n print(\"\\033[0;32;40mAll files successfully added to database and removed from local directory!\\033[0;37;40m\")\n else:\n print(\"\\033[0;31;40mThere are no files in %s to add to database. Check your script again.\\033[0;37;40m\" %path)\n\n\n#defining fuction to connect to ftp\ndef establish_ftp_connection(host, user, password):\n try:\n ftps = FTP_TLS(host) #trying to connect to host address\n ftps.login(user,password) and ftps.prot_p()\n print(\"Connected to %s \" %host)\n\n except ftplib.all_errors as e: #ready to handle exception\n print(\"\\033[0;31;40mCould not connect. Error code: \" + str(e) + \"\\033[0;37;40\")\n ftps.close()\n\n else:\n print(\"\\033[0;37;40mLooking for files in FTP directories...\")\n find_files(ftps, ftps.pwd()) #look for files in ftp\n ask_for_approval = input(\"Should the listed files be imported to database? Y/N \")\n if(ask_for_approval == \"Y\" or ask_for_approval == \"y\"):\n connect_to_database(\"\")\n elif(ask_for_approval == \"N\" or ask_for_approval == \"n\"):\n print(\"The files have not been imported to the database.\")\n else:\n print(\"Invalid input. Closing the script.\")\n finally:\n ftps.close()\n\n\nif __name__ == (\"__main__\"):\n establish_ftp_connection('', '', '')\n","sub_path":"automation.py","file_name":"automation.py","file_ext":"py","file_size_in_byte":7664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"93212952","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 14 08:39:45 2018\nBEER package\nversion 25/5/18 added fold_and_bin\nversion 1/8/18 update_fold_and bin with stat.binned_statistic\nversion 6/8/18 added loadParams2Bundle\nversion 15/8/18 support for limb darkening coesfs in sysconfig and in load params\nversion 15/8/18 revived check logg function\nversion 19/8/18 added ld coefs to param text\nversion 22/8/18 added q to param text\nversion 28/8/18 added bundle2paramArray function\nversion 29/8/18 added unfoldLC function\n 29/8/18 added beaming and ref to param text\n 3/9/18 added T1,m1,R1 to params\n 20/10/18 corrected time2nu\n 20/10/18 removed mbeam - calc of beamimg amplitude\n 31/10/18 added T0s to T0\n 7/11/18 added atm1,2 to bundle2sysconfig and sysconfig2bundle\n 13/11/18\n 21/11/18:\n 21/11/18 adapted to Phoebe 2.1 - replaced rpole by requiv\nversion 26/12/18:\n added sysconfig2fitparams\n added rootpath fuction\nversion 17/4/19:\n added Rv calculation from orbital elements\n merged pyBEER with pyBEERnas\nversion 30/10/19:\n added limb darkening and grav bright. table interpolation\n Morris Naftilan amplitude calc\nversion 18/11/19:\n corrected the eBEER model adding first non harmonic terms\nversion 18/12/19:\n removed the 2 in the first term of the eBEER model\n@author: Micha\n\"\"\"\nimport numpy as np\nimport scipy.stats as stat\nfrom scipy.interpolate import griddata\n#from astropy.stats import median_absolute_deviation as mad\nfrom sys import gettrace\n\n\ndef time2nu(period,ecc,T0,times):\n \"\"\"\n Calculates true anomaly values for a vector of times\n uses Kepler equation in pyastro package\n https://github.com/paulgriffiths/pyastro\n input:\n T0 - time of periastron\n times - vecor of times [days]\n period [days]\n e - eccenricity\n returns:\n nus - a vector of true anomalies\n \"\"\"\n import pyastro\n\n meanAnomalies=2*np.pi/period*(times-T0)\n eccAnomalies=np.zeros(len(meanAnomalies))\n nus=np.zeros(len(meanAnomalies))\n\n for i in np.arange(len(meanAnomalies)):\n eccAnomalies[i]=pyastro.kepler(meanAnomalies[i],ecc)\n \n s=np.sqrt(1.+ecc)*np.sin(eccAnomalies/2.)\n c=np.sqrt(1.-ecc)*np.cos(eccAnomalies/2.)\n nus=2*np.arctan2(s,c)\n return nus\n\ndef nu2time(period,ecc,T0,nus):\n \"\"\"\n See https://en.wikipedia.org/wiki/Eccentric_anomaly\n test with /home/user/Dropbox/spyder/testTime2nu.py\n \"\"\"\n sinE=np.sqrt(1-ecc**2)*np.sin(nus)/(1+ecc*np.cos(nus))\n cosE=(ecc+np.cos(nus))/(1+ecc*np.cos(nus))\n E=np.arctan2(sinE,cosE)\n E[E<0]+=2*np.pi\n M=E-ecc*np.sin(E)\n times = M*period/2/np.pi+T0\n return times\n\ndef getT0(period,ecc,omega,t0s):\n \"\"\"\n T0 = getT0(period,ecc,omega,t0s) \\n\n inputs: \n t0s - time of sup conj JD \\n\n omega - omega in deg \\n\n ecc = eccentricity \\n\n period days\n \"\"\"\n# omegarad = np.deg2rad(omega)\n# nu0s = np.pi/2. -omegarad\n# \n# # see wikipedia eccentric anomaly\n# sinE0 = np.sqrt(1-ecc**2)*np.sin(nu0s)/(1+ecc*np.cos(nu0s))\n# cosE0=(ecc+np.cos(nu0s))/(1+ecc*np.cos(nu0s))\n# E0=np.arctan2(sinE0,cosE0)\n# \n# M0 = E0- ecc*np.sin(E0)\n# T0 = t0s - M0*period/2/np.pi\n \n omegarad = np.deg2rad(omega) \n nu0s = np.pi/2. -omegarad \n \n#nu0 =omegarad+ np.pi/2.0\n # see wikipedia eccentric anomaly\n sinE0 = np.sqrt(1-ecc**2)*np.sin(nu0s)/(1+ecc*np.cos(nu0s))\n cosE0=(ecc+np.cos(nu0s))/(1+ecc*np.cos(nu0s))\n E0=np.arctan2(sinE0,cosE0)\n# \n M0 = E0- ecc*sinE0\n T0 = t0s - M0*period/2/np.pi\n# \n return T0\n\ndef massfn(m1,m2,incl):\n \"\"\"\n Calculate mass function in solr masses\n :param m1, m2 [msun]\n :param incl deg\n :return:\n \"\"\"\n sini = np.sin(np.deg2rad(incl))\n q= m2/m1\n msfn = m1*q**3*sini**3/(1+q)**2\n return msfn\n\ndef k2msfn(period,k):\n\t\"\"\"Calculate mass function from K and the period\n\n\t:param period:\n\t:param k:\n\t:return:\n\t\"\"\"\n\tGconst = 6.67408e-11 # mks m**3/kg/sec**2\n\tmsun = 1.98847e+30\n\tpsec = period*24*3600\n\tkms =k*1e3\n\tmsfn = psec*kms**3/(2*np.pi*Gconst)/msun\n\treturn msfn\n\n\ndef msfn2k(period,msfn):\n \"\"\"\n Calculate K [km/s] form the mass function [msun]\n :param period: [d]\n :param msfn: [msun]\n :return: K [km/s]\n \"\"\"\n Gconst = 6.67408e-11 # mks m**3/kg/sec**2\n psec = period*24*3600\n msun = 1.98847e+30\n K = (msfn*msun*2*np.pi*Gconst/psec)**(1./3.)/1000.\n return K\n\n\ndef calcModelRVs(K,ecc,omega,gamma,nus):\n \"\"\"\n calculates RVs based on spectroscopic orbit params\n inputs:\n K - RV semi amplitude [km/s]\n ecc,omega - eccentricity and omega [deg]\n gamma - system velocity [km/s]\n nus - true anomaly angles vecctor [rad]\n returns a vector of RVs\n \"\"\"\n omegaRad = np.deg2rad(omega)\n return K*(np.cos(nus+omegaRad)+ ecc*np.cos(omegaRad))+gamma\n\ndef sysparams2RVs(period,t0s,ecc,omega,incl,m1,m2,times,gamma=0.):\n T0 = getT0(period,ecc,omega,t0s)\n nus = time2nu(period,ecc,T0,times)\n msfn = massfn(m1,m2,incl)\n K = msfn2k(period,msfn)\n rvs = calcModelRVs(K,ecc,omega,gamma,nus)\n\n return rvs\n\n\ndef chisq(observed,model,sigma):\n\n \"\"\"\n Calculate chi squared\n \"\"\"\n chisq = np.sum(((model-observed)/sigma)**2)\n return chisq\n\ndef bundle2sysConfig(phoebeBundle):\n \"\"\"\n Extract sysConfig dictionary from a Phoebe bundle\n sysConfig,runConfig = dictionary with system parameters\n \"\"\"\n \n keylist_c=['period','incl','ecc','w','m1','m2','rp1','rp2','teff1','teff2',\n 'rho1','rho2','measMassfn','t0supconj',\n 'gravb1','gravb2','ldcoeffs1','ldcoeffs2']\n# keylist_r=['atm1','atm2','boost','irm','ldfunc1','ldfunc2','ntr1','ntr2','RVsigmasScale',\n# 'LCsigmasScale','chosenParams','constraint_type']\n sysConfig=dict.fromkeys(keylist_c,0)\n# runConfig = dict.fromkeys(keylist_r,0)\n sysConfig['period']=phoebeBundle['period@binary@component'].value\n sysConfig['incl']=phoebeBundle['incl@orbit@component'].value\n sysConfig['ecc']=phoebeBundle['ecc@orbit@component'].value\n sysConfig['w']=phoebeBundle['per0@orbit@component'].value\n sysConfig['m1']= phoebeBundle['mass@primary@star@component'].value\n q=phoebeBundle['q@binary@component'].value\n sysConfig['m2']=q*sysConfig['m1']\n sysConfig['rp1']=phoebeBundle['requiv@primary@star@component'].value\n sysConfig['rp2']=phoebeBundle['requiv@secondary@star@component'].value\n sysConfig['teff1']= phoebeBundle['teff@primary@star@component'].value\n sysConfig['teff2']=phoebeBundle['teff@secondary@star@component'].value\n sysConfig['rho1']= phoebeBundle['irrad_frac_refl_bol@primary'].value\n sysConfig['rho2']=phoebeBundle['irrad_frac_refl_bol@secondary'].value\n sysConfig['measMassfn']=0.\n sysConfig['t0supconj'] = phoebeBundle['t0_supconj@binary@component'].value\n sysConfig['gravb1'] = phoebeBundle['gravb_bol@primary'].value\n sysConfig['gravb2'] = phoebeBundle['gravb_bol@secondary'].value\n# runConfig['boost'] = phoebeBundle['boosting_method@phoebe01'].value \n# runConfig['irm'] = phoebeBundle['irrad_method@phoebe01'].value \n# runConfig['ldfunc1'] = phoebeBundle['ld_func_bol@primary'].value\n# runConfig['ldfunc2'] = phoebeBundle['ld_func_bol@secondary'].value\n sysConfig['ldcoeffs1'] = phoebeBundle['ld_coeffs_bol@primary'].value.tolist()\n sysConfig['ldcoeffs2'] = phoebeBundle['ld_coeffs_bol@secondary'].value.tolist()\n# runConfig['atm1'] = phoebeBundle['atm@primary'].value\n# runConfig['atm2'] = phoebeBundle['atm@secondary'].value\n# runConfig['ntr1'] = phoebeBundle['ntriangles@primary'].value \n# runConfig['ntr2'] = phoebeBundle['ntriangles@secondary'].value \n\n return sysConfig\n\n\ndef bundle2runConfig(phoebeBundle):\n \"\"\"\n Extract sysConfig dictionary from a Phoebe bundle\n sysConfig,runConfig = dictionary with system parameters\n \"\"\"\n \n\n keylist_r=['atm1','atm2','boost','irm','ldfunc1','ldfunc2','ntr1','ntr2','RVsigmasScale',\n 'LCsigmasScale','chosenParams','constraint_type']\n\n runConfig = dict.fromkeys(keylist_r,0)\n\n runConfig['boost'] = phoebeBundle['boosting_method@phoebe01'].value\n runConfig['irm'] = phoebeBundle['irrad_method@phoebe01'].value\n runConfig['ldfunc1'] = phoebeBundle['ld_func_bol@primary'].value\n runConfig['ldfunc2'] = phoebeBundle['ld_func_bol@secondary'].value\n\n runConfig['atm1'] = phoebeBundle['atm@primary'].value\n runConfig['atm2'] = phoebeBundle['atm@secondary'].value\n runConfig['ntr1'] = phoebeBundle['ntriangles@primary'].value\n runConfig['ntr2'] = phoebeBundle['ntriangles@secondary'].value\n\n return runConfig\n\n\ndef sysConfig2Bundle(sysConfig,phoebeBundle):\n \"\"\"\n phoebeBundle = phoebe binary bundle\n sysConfig = system configuration dictionary\n\n \"\"\"\n phoebeBundle['period@binary@component']=sysConfig['period']\n phoebeBundle['incl@orbit@component']=sysConfig['incl']\n phoebeBundle['ecc@orbit@component']=sysConfig['ecc']\n phoebeBundle['per0@orbit@component']=sysConfig['w']\n phoebeBundle['mass@primary@star@component']=sysConfig['m1']\n phoebeBundle['q@binary@component']=sysConfig['m2']/sysConfig['m1']\n phoebeBundle['requiv@primary@star@component']= sysConfig['rp1']\n phoebeBundle['requiv@secondary@star@component']=sysConfig['rp2']\n phoebeBundle['teff@primary@star@component']=sysConfig['teff1']\n phoebeBundle['teff@secondary@star@component']=sysConfig['teff2']\n if 'syncpar1' in sysConfig.keys():\n phoebeBundle['syncpar@primary@component']=sysConfig['syncpar1']\n else:\n phoebeBundle['syncpar@primary@component']=1.0\n if 'syncpar2' in sysConfig.keys():\n phoebeBundle['syncpar@secondary@component']=sysConfig['syncpar2']\n else:\n phoebeBundle['syncpar@secondary@component']=1.0\n phoebeBundle['irrad_frac_refl_bol@primary']=sysConfig['rho1']\n phoebeBundle['irrad_frac_refl_bol@secondary'] =sysConfig['rho2']\n phoebeBundle['t0_supconj@binary@component'] = sysConfig['t0supconj']\n# phoebeBundle['boosting_method@phoebe01'] = runConfig['boost']\n# phoebeBundle['irrad_method@phoebe01'] = runConfig['irm']\n phoebeBundle['gravb_bol@primary'] = sysConfig['gravb1']\n phoebeBundle['gravb_bol@secondary'] = sysConfig['gravb2']\n# phoebeBundle['ld_func_bol@primary'] = sysConfig['ldfunc1']\n# phoebeBundle['ld_func_bol@secondary'] = sysConfig['ldfunc2']\n phoebeBundle['ld_coeffs_bol@primary'] = sysConfig['ldcoeffs1']\n phoebeBundle['ld_coeffs_bol@secondary']= sysConfig['ldcoeffs2']\n\n return phoebeBundle\n\n\n\n \ndef runConfig2Bundle(runConfig,phoebeBundle):\n \"\"\"\n phoebeBundle = phoebe binary bundle\n runConfig = run configuration dictionary\n\n\n\n \"\"\"\n\n phoebeBundle['boosting_method@phoebe01'] = runConfig['boost']\n phoebeBundle['irrad_method@phoebe01'] = runConfig['irm']\n phoebeBundle['ld_func_bol@primary'] = runConfig['ldfunc1']\n phoebeBundle['ld_func_bol@secondary'] = runConfig['ldfunc2']\n\n\n if 'atm1' in runConfig:\n phoebeBundle['atm@primary'] = runConfig['atm1']\n if 'atm2' in runConfig:\n phoebeBundle['atm@secondary'] = runConfig['atm2']\n\n phoebeBundle['ntriangles@primary'] = runConfig['ntr1']\n phoebeBundle['ntriangles@secondary'] = runConfig['ntr2']\n\n return phoebeBundle\n \n#def ini2config(conf):\n# \n# return(general,config,runconfig,fitpar)\n\ndef phaseShift(dt,times,fluxes,period = 1):\n \"\"\"\n shiftedTimes,shiftedFluxes = phaseShift(dt,times,fluxes,period = 1)\n shifts the light curve by dt\n inputs:\n times,fluxes - vectors (for period =1 times = 0.0--1.0)\n period (period =1 default)\n returns times, fluxes shifted by dt folded on period\n \"\"\"\n\n rawTimes=(times+dt) % period\n indx=np.argsort(rawTimes)\n shiftedTimes=rawTimes[indx]\n shiftedFluxes=fluxes[indx]\n\n return(shiftedTimes,shiftedFluxes)\n\n\ndef foldData(period,t0,times,values,sigmas= np.array([0])):\n \"\"\"\n folds data on period around t0\n [phases , values] = foldData(period,t0,times,values,sigmas= np.array([0]))\n sigmas is optional if not given use:\n [phases , values,_] = foldData(times,values,period,t0)]\n \"\"\"\n ltimes=times.copy()\n lvalues=values.copy()\n lsigmas=sigmas.copy()\n ltimes-=t0\n phases=(ltimes % period)/period\n indcs = np.argsort(phases)\n lvalues = lvalues[indcs]\n phases = phases[indcs]\n if sigmas.any():\n lsigmas = lsigmas[indcs]\n return phases,lvalues,lsigmas\n\ndef fold_and_bin(period,t0,times,values,nbins):\n \"\"\"\n Folds and bins a light curve:\\n\n [phases,foldedvalues,bphases,bvalues,bsigmas] = fold_and_bin(period,t0,times,values,nbins) \\n\n uses scipy.stats.binned_statistics \\n\n inputs:\n period, t0, times, values , nbins = number of bins\n if nbins =0 fold only without binning\n returns:\n phases, foldedvalues = folded data before binning \\n\n bphases = phases of the binned data, bvalues = binned data\n\n \"\"\"\n ltimes=times.copy()\n lvalues=values.copy()\n ltimes-=t0\n phases=(ltimes % period)/period\n indcs = np.argsort(phases)\n foldedvalues = lvalues[indcs]\n phases = phases[indcs]\n if nbins > 0:\n bvalues,bphases,binnumber = stat.binned_statistic(phases,foldedvalues,\n statistic='median',bins=np.arange(0.,1.000001,1./nbins))\n bvalues -= bvalues.mean()\n bcount,_,_ = stat.binned_statistic(phases,foldedvalues,\n statistic='count',bins=np.arange(0.,1.000001,1./nbins))\n bstd,_,_ = stat.binned_statistic(phases,foldedvalues,\n statistic=np.std,bins=np.arange(0.,1.000001,1./nbins))\n bphases = bphases[:-1]+0.5/nbins\n bsigmas = bstd /np.sqrt(bcount)\n else:\n bphases = phases\n bvalues = foldedvalues\n bvalues -= bvalues.mean()\n bsigmas = np.ones(len(bvalues))\n return(phases,foldedvalues,bphases,bvalues,bsigmas)\n\n\n\ndef timeBinFast(tin,yin,w):\n \"\"\"\n [tout,yout,eout] = timeBinFast(tin,yin,w)\n Performs time binning by calculating the weighted average of points in each bin\n The function divides the total time span of tin into evenly spaced bins of width w\n It then calculates the average of points in each bin, yout\n and the uncertainty of the average eout=1.4826*mad(samples in the bin)\n tin - input time axis. Assumed to be monotonically increasing.\n yin - input data\n w - width of a bin in tin units\n\n\n \"\"\"\n t = tin[np.isfinite(yin)]\n y = yin[np.isfinite(yin)]\n if t.size == 0:\n return\n eps = 1e-4*w\n tout = np.arange(t[0]-eps+w/2.,t[-1]+w/2., w)\n Bins = (np.floor((t-(t[0]-eps))/w)).astype(int)\n ys = np.bincount(Bins,weights=y) # sum y's in each bin\n ns = np.bincount(Bins) # number of points in each bin\n yout = ys/ns\n eout=np.zeros(len(yout))\n for i in range(max(Bins)+1):\n yb = y[Bins == i]\n yb = yb[~np.isnan(yb) ] # ignore nans\n nsb = len(yb)\n eout[i] = 1.4826*np.median(np.abs(yb-np.median(yb)))/np.sqrt(nsb)\n return(tout,yout,eout)\n\n\n\ndef flux2DF(flux): \n \"\"\" Converts flux to differntial flux DF=(F[i]-F[0])/F[0]-mean(DF)\n DF=flux2DF(flux)\n \"\"\"\n #DF = (flux-flux[0])/flux[0] # median\n DF=flux/np.nanmean(flux)-1\n DF -= np.nanmean(DF)\n return(DF)\n\n\n\ndef qTraj(fm,m1):\n \"\"\"\n incl,q = qTraj(fm,m1):\n given m1 and the mass function - calculates\n vector q corresponding to incls.\n\n\n \"\"\"\n incl=np.array([5.,10.,20.,30.,40.,50.,60.,70.,80.,85.,90.])\n sini=np.sin(np.radians(incl))\n A=m1/fm*sini**3\n Qreal=[]\n for curA in A:\n coeffs=[1,2,1,-curA]\n Qs=np.roots(coeffs)\n Qreal.append(np.real(Qs[np.isreal(Qs)][0]))\n q=1/np.asanyarray(Qreal)\n return(incl,q)\n\n\ndef qfromIncl(fm,m1,incl):\n \"\"\"\n q = qfromIncl(fm,m1,incl)\n given m1 and the mass function and incl - calculates q\n \"\"\"\n sini=np.sin(np.radians(incl))\n A=m1/fm*sini**3\n coeffs=[1,2,1,-A]\n Q=np.roots(coeffs)\n Qreal= np.real(Q[np.isreal(Q)][0])\n q=1/Qreal\n return(q)\ndef sysParamText(sysConfig,runConfig):\n \"\"\"\n paramText = sysParamText(sysConfig,runConfig)\n generates a multiline text string with the system params\n input sysConfig and runConfig dictionary\n\n \"\"\"\n paramTextLines=[]\n paramTextLines.append('P= %.3f [d] T0_supconj= %.3f [KJD] \\n'%(sysConfig['period'],\n sysConfig['t0supconj']) )\n paramTextLines.append('e=%.3f w = %.2f incl= %.2f \\n'%(sysConfig['ecc'],\n sysConfig['w'],\n sysConfig['incl']))\n\n paramTextLines.append('M1=%.2f R1= %.2f Teff1= %.2f \\n'%(sysConfig['m1'],\n sysConfig['rp1'],\n sysConfig['teff1']\n ))\n\n paramTextLines.append('M2=%.2f R2= %.2f Teff2= %.2f \\n'%(sysConfig['m2'],\n sysConfig['rp2'],\n sysConfig['teff2']\n ))\n paramTextLines.append('q= %.3f\\n'%(sysConfig['m2']/sysConfig['m1']))\n paramTextLines.append('rho1=%.3f rho2=%.3f \\n'%(sysConfig['rho1'],\n sysConfig['rho2']))\n if 'syncpar1' in sysConfig.keys():\n paramTextLines.append('syncpar1= %.3f \\n'%(sysConfig['syncpar1']))\n\n if 'syncpar2' in sysConfig.keys():\n paramTextLines.append('syncpar2= %.3f \\n'%(sysConfig['syncpar2']))\n\n paramTextLines.append('gb1=%.3f gb2=%.3f \\n'%(sysConfig['gravb1'],\n sysConfig['gravb2']))\n paramTextLines.append('ldfunc1 %s \\n'%runConfig['ldfunc1'])\n if runConfig['ldfunc1'] == 'quadratic':\n paramTextLines.append(\n 'LD_coeffs1= %5g , %5g \\n'%(sysConfig['ldcoeffs1'][0],sysConfig['ldcoeffs1'][1]))\n elif runConfig['ldfunc2'] == 'linear':\n# paramTextLines.append(\n# 'LD_coeff1= %5g , \\n'%sysConfig['ldcoeffs1'][0])\n paramTextLines.append(\n 'LD_coeff1= %5g , \\n'%sysConfig['ldcoeffs1'])\n else:\n paramTextLines.append(\n 'LD_coeff1= undefined \\n')\n paramTextLines.append('ldfunc2 %s \\n'%runConfig['ldfunc2'])\n if runConfig['ldfunc2'] == 'quadratic':\n paramTextLines.append(\n 'LD_coeffs2= %5g , %5g \\n'%(sysConfig['ldcoeffs2'][0],sysConfig['ldcoeffs2'][1]))\n elif runConfig['ldfunc2'] == 'linear':\n# paramTextLines.append(\n# 'LD_coeff2= %5g , \\n'%sysConfig['ldcoeffs2'][0])\n paramTextLines.append(\n 'LD_coeff2= %5g , \\n'%sysConfig['ldcoeffs2'])\n else:\n paramTextLines.append(\n 'LD_coeff2= undefined \\n')\n paramTextLines.append('ntr1=%d ntr2 = %d \\n'%(runConfig['ntr1'],\n runConfig['ntr2']))\n paramTextLines.append('Beaming= %s Ref. = %s \\n'%(runConfig['boost'],\n runConfig['irm']))\n paramTextLines.append('atm1= %s atm2= %s \\n'%(runConfig['atm1'],\\\n runConfig['atm2']))\n paramTextLines.append('alpharefl1=%.2e alpharefl2=%.2e\\n'%(sysConfig['alpharefl1'],sysConfig['alpharefl2']))\n paramText =''.join(paramTextLines)\n return paramText\n\ndef reducedSysParamText(sysConfig):\n \"\"\"\n paramText = reducedSysParamText(sysConfig)\n generates a multiline text string with the system params\n input sysConfig dictionary\n\n \"\"\"\n paramTextLines=[]\n paramTextLines.append('P= %.3f [d] T0_supconj= %.3f [KJD] \\n'%(sysConfig['period'],\n sysConfig['t0supconj']) )\n paramTextLines.append('e=%.3f w = %.2f incl= %.2f \\n'%(sysConfig['ecc'],\n sysConfig['w'],\n sysConfig['incl']))\n\n paramTextLines.append('M1=%.2f R1= %.2f Teff1= %.2f \\n'%(sysConfig['m1'],\n sysConfig['rp1'],\n sysConfig['teff1']\n ))\n\n paramTextLines.append('M2=%.2f R2= %.2f Teff2= %.2f \\n'%(sysConfig['m2'],\n sysConfig['rp2'],\n sysConfig['teff2']\n ))\n paramTextLines.append('q= %.3f\\n'%(sysConfig['m2']/sysConfig['m1']))\n paramText =''.join(paramTextLines)\n return paramText\n\n\n\ndef generateLCRVset(bundle,LCtimes,RVtimes):\n \"\"\"\n Calculate Df and RV for a vector of given times (JDs) \\n\n Dfs,RVs = generateLCRVset(k,LCtimes,RVtimes) \\n\n Dfs in ppm \\n\n bundle - pheobe bundle \\n\n\n \"\"\"\n# \n if 'calcRVs' in bundle.datasets:\n bundle.remove_dataset('calcRVs')\n bundle.add_dataset('rv', times=RVtimes, dataset='calcRVs')\n\n bundle['passband@calcRVs@dataset']='Kepler:mean'\n\n if 'calcLC' in bundle.datasets:\n bundle.remove_dataset('calcLC')\n bundle.add_dataset('lc', times=LCtimes, dataset='calcLC')\n\n bundle['passband@LCcalc@dataset']='Kepler:mean'\n\n bundle.run_compute(model='curModel')\n fluxes = bundle['fluxes@curModel@calcLC'].value\n Dfs=flux2DF(fluxes)*1.0e6\n RVs = bundle['rvs@primary@curModel@calcRVs'].value\n RVs-=RVs.mean()\n\n return(Dfs,RVs)\n\n\ndef setBundleParam(bundle,pName,value):\n \"\"\"\n sets a single parameter into a bundle\n \"\"\"\n if pName in ['P','period']:\n bundle['period@binary@component'] = value # period [0]\n if pName in ['t0s','t0supconj']:\n bundle['t0_supconj@binary@component'] = value # t0s [1]\n if pName in ['e','ecc']:\n bundle['ecc@orbit@component'] = value #e [2]\n if pName =='w':\n bundle['per0@orbit@component'] = value #w [3]\n if pName =='incl':\n bundle['incl@orbit@component'] = value # incl [4]\n if pName in ['T1','teff1']:\n bundle['teff@primary@star@component'] = value # T1 [5]\n if pName =='m1':\n bundle['mass@primary@star@component'] = value # M1 [6]\n if pName in ['R1','rp1']:\n bundle['requiv@primary@star@component'] = value # R1 [7]\n if pName =='q':\n bundle['q@binary@component'] = value #q [8]\n if pName in ['T2','teff2']:\n bundle['teff@secondary@star@component'] = value #T2 [9]\n if pName in ['R2' ,'rp2']:\n bundle['requiv@secondary@star@component'] = value # R2 [10]\n if pName =='rho1':\n bundle['irrad_frac_refl_bol@primary'] = value #rho1 [11]\n if pName =='rho2':\n bundle['irrad_frac_refl_bol@secondary'] = value #rho2 [12]\n if pName in ['gb1','gravb1']:\n bundle['gravb_bol@primary'] = value #gb1 [13]\n if pName in ['gb2','gravb2']:\n bundle['gravb_bol@secondary'] = value #gb2 [14]\n # TODO implement setting one ldc\n # if 'ldc11' and 'ldc12' in par_names:\n # ldc1arr = np.array([par_values[par_names.index('ldc11')],par_values[par_names.index('ldc12')]])\n # bundle['ld_coeffs_bol@primary'] = ldc1arr #ldcoeffs1 [12,13]\n # if 'ldc21' and 'ldc22' in par_names:\n # ldc2arr = np.array([par_values[par_names.index('ldc21')],par_values[par_names.index('ldc22')]])\n # bundle['ld_coeffs_bol@secondary'] = ldc2arr #ldcoeffs1 [14,15]\n\n return(bundle)\n\ndef loadParams2Bundle(bundle,par_names,par_values):\n \"\"\"\n bundle=loadParams2Bundle(bundle,par_names,par_values) \\n\n loads parameter values into a bundle \\n\n inputs:\n bundle \\n\n par_names = list of paramater names \\n\n par_values - list of parameter values with the same order as parameter names\n\n\n \"\"\"\n\n # assign params:\n if 'P' in par_names:\n bundle['period@binary@component'] = par_values[par_names.index('P')] # period [0]\n if 'period' in par_names:\n bundle['period@binary@component'] = par_values[par_names.index('period')] # period [0]\n\n if 't0supconj' in par_names:\n bundle['t0_supconj@binary@component'] = par_values[par_names.index('t0supconj')] # t0s [1]\n if 't0s' in par_names:\n bundle['t0_supconj@binary@component'] = par_values[par_names.index('t0s')] # t0s [1]\n\n if 'e' in par_names:\n bundle['ecc@orbit@component'] = par_values[par_names.index('e')] #e [2]\n if 'ecc' in par_names:\n bundle['ecc@orbit@component'] = par_values[par_names.index('ecc')] #e [2]\n\n if 'w' in par_names:\n bundle['per0@orbit@component'] = par_values[par_names.index('w')] #w [3]\n if 'incl' in par_names:\n bundle['incl@orbit@component'] = par_values[par_names.index('incl')] # incl [4]\n\n if 'T1' in par_names:\n bundle['teff@primary@star@component'] = par_values[par_names.index('T1')]# T1 [5]\n if 'teff1' in par_names:\n bundle['teff@primary@star@component'] = par_values[par_names.index('teff1')]# T1 [5]\n\n if 'm1' in par_names:\n bundle['mass@primary@star@component'] = par_values[par_names.index('m1')] # M1 [6]\n if 'M1' in par_names:\n bundle['mass@primary@star@component'] = par_values[par_names.index('M1')] # M1 [6]\n\n if 'R1'in par_names:\n bundle['requiv@primary@star@component'] = par_values[par_names.index('R1')] # R1 [7]\n if 'rp1'in par_names:\n bundle['requiv@primary@star@component'] = par_values[par_names.index('rp1')] # R1 [7]\n\n if 'q' in par_names:\n bundle['q@binary@component'] = par_values[par_names.index('q')] #q [8]\n\n if 'T2' in par_names:\n bundle['teff@secondary@star@component'] = par_values[par_names.index('T2')] #T2 [9]\n if 'teff2' in par_names:\n bundle['teff@secondary@star@component'] = par_values[par_names.index('teff2')] #T2 [9]\n\n if 'R2' in par_names:\n bundle['requiv@secondary@star@component'] = par_values[par_names.index('R2')] # R2 [10]\n if 'rp2' in par_names:\n bundle['requiv@secondary@star@component'] = par_values[par_names.index('rp2')] # R2 [10]\n\n if 'rho1' in par_names:\n bundle['irrad_frac_refl_bol@primary'] = par_values[par_names.index('rho1')] #rho1 [11]\n if 'rho2' in par_names:\n bundle['irrad_frac_refl_bol@secondary'] = par_values[par_names.index('rho2')] #rho2 [12]\n if 'gb1' in par_names:\n bundle['gravb_bol@primary'] = par_values[par_names.index('gb1')] #gb1 [13]\n if 'gravb1' in par_names:\n bundle['gravb_bol@primary'] = par_values[par_names.index('gravb1')] #gb1 [13]\n if 'gb2' in par_names:\n bundle['gravb_bol@secondary'] = par_values[par_names.index('gb2')] #gb2 [14]\n if 'gravb2' in par_names:\n bundle['gravb_bol@secondary'] = par_values[par_names.index('gravb2')] #gb2 [14]\n if 'ldc11' and 'ldc12' in par_names:\n ldc1arr = \\\n np.array([par_values[par_names.index('ldc11')],par_values[par_names.index('ldc12')]])\n bundle['ld_coeffs_bol@primary'] = ldc1arr #ldcoeffs1 [15,16]\n if 'ldc21' and 'ldc22' in par_names:\n ldc2arr = \\\n np.array([par_values[par_names.index('ldc21')],par_values[par_names.index('ldc22')]])\n bundle['ld_coeffs_bol@secondary'] = ldc2arr #ldcoeffs1 [17,18]\n return(bundle)\n\n\ndef bundle2paramArray(bundle,names):\n \"\"\"\n Generate a param array based on a list of param names\n Reads the param values from the bundle\n the parameters are in the same order as names\n \"\"\"\n paramList =[]\n for curName in names:\n # assign params:\n if curName in ['period','P']:\n paramList.append(bundle['period@binary@component'].value) # period [0]\n if curName in ['t0s' ,'t0supconj']:\n paramList.append(bundle['t0_supconj@binary@component'].value) # t0s [1]\n if curName in['ecc','e']:\n paramList.append(bundle['ecc@orbit@component'].value) #e [2]\n if curName =='w':\n paramList.append(bundle['per0@orbit@component'].value) #w [3]\n if curName =='incl':\n paramList.append(bundle['incl@orbit@component'].value) # incl [4]\n if curName in ['T1','teff1']:\n paramList.append(bundle['teff@primary@star@component'].value) # T1 [5]\n if curName in [ 'm1','M1']:\n paramList.append(bundle['mass@primary@star@component'].value) # M1 [6]\n if curName in ['R1','rp1']:\n paramList.append(bundle['requiv@primary@star@component'].value) # R1 [7]\n if curName =='q':\n paramList.append(bundle['q@binary@component'].value) #q [8]\n if curName in ['T2','teff2']:\n paramList.append(bundle['teff@secondary@star@component'].value) #T2 [9]\n if curName in ['R2','rp2']:\n paramList.append(bundle['requiv@secondary@star@component'].value) # R2 [10]\n if curName =='rho1':\n paramList.append(bundle['irrad_frac_refl_bol@primary'].value) #rho1 [11]\n if curName =='rho2':\n paramList.append(bundle['irrad_frac_refl_bol@secondary'].value) #rho2 [12]\n if curName in ['gb1','gravb1']:\n paramList.append(bundle['gravb_bol@primary'].value) #gb1 [13]\n if curName in ['gb2','gravb2']:\n paramList.append(bundle['gravb_bol@secondary'].value) #gb2 [14]\n if curName =='ldc11':\n paramList.append(bundle['ld_coeffs_bol@primary'].value[0]) #ldcoeffs1 [15,16]\n if curName =='ldc12':\n paramList.append(bundle['ld_coeffs_bol@primary'].value[1])\n if curName =='ldc21':\n paramList.append(bundle['ld_coeffs_bol@secondary'].value[0]) #ldcoeffs1 [17,18]\n if curName =='ldc22':\n paramList.append(bundle['ld_coeffs_bol@secondary'].value[1])\n\n\n paramArray = np.asarray(paramList)\n return(paramArray)\n#\ndef checkLogg(phoebe_bundle):\n \"\"\"\n logg1,logg2=checkLogg(phoebe_bundle)\n k is a phoebe bundle\n \"\"\"\n\n logg_sun=4.43812\n rp1=phoebe_bundle['requiv@primary@star@component'].value\n rp2=phoebe_bundle['requiv@secondary@star@component'].value\n m1= phoebe_bundle['mass@primary@star@component'].value\n q=phoebe_bundle['q@binary@component'].value\n logg1=logg_sun+np.log10((m1)/rp1**2)\n logg2=logg_sun+np.log10((m1*q)/rp2**2)\n return (logg1,logg2)\n\ndef calcLogg(M,R):\n \"\"\"\n Calc logg from M and R (in solar units)\n logg = calcLog(M,R)\n logg = 4.43812 + np.log10(M) - 2*np.log10(R) \n \"\"\"\n logg_sun=4.43812 \n logg=logg_sun+np.log10((M)/R**2)\n if np.isnan(logg):\n print('logg %.2f M= %.1f R= %.1f\\n'%(logg,M,R))\n return logg\n\n\ndef unfoldLC(phases,foldedLC,times,t0,P):\n \"\"\"\n [times,unfoldedLC,fphases,fLC]= unfoldLC(phases,foldedLC,times,t0,P)\n unfolds phased LC\n inputs:\n phases, foldedLC\n time - vector of full LC times\n t0 and P are the t0 and P of the folded LC\n if phases do not contain 0 and 1\n the mean values of the first and last LC elements are inserted\n output:\n times,unfoldedLC = unfolded LC\n fphases, flc = phased data with pahse 0 ans 1 added (n+2 points)\n \"\"\"\n # TODO - test the function\n edgevalue=np.mean([foldedLC[0],foldedLC[-1]])\n # add 0 and 1 phase entries\n fLC = foldedLC.copy()\n fphases = phases.copy()\n if 0. not in fphases:\n fphases = np.insert(phases,0,0.0)\n fLC = np.insert(fLC,0,edgevalue)\n if 1.0 not in phases:\n fphases = np.append(fphases,1.0)\n fLC = np.append(fLC,edgevalue)\n\n unfLCphases = ((times-t0) % P)/P\n\n unfoldedLC =np.interp(unfLCphases,fphases,fLC)\n unfoldedLC -= unfoldedLC.mean()\n \n \n return times,unfoldedLC,fphases,fLC\n\n\ndef BEERmodel(period,t0s,ecc,omega,incl,primary,secondary,alphab,alpha_refl,grav,limb,flux,times):\n \"\"\"\n Aellip,Abeam,Arefl, ABEER =\n BEERmodel(period,t0s,ecc,omega,incl,primary,secondary,alphab,alpha_refl,grav,limb,F,times) \\n\n Calculate the BEER model from the system parameters\n m1,2,R1,2 in solar units \\n\n Inputs: \\n\n period - days \\n\n t0s - time of superior conjunction (the function converts it to T0 - time of periastron) \\n\n ecc, omega [deg], incl [deg] \\n\n primary = tuple (m1,T1,R1) secondary tuple (m2,T2,R2) \\n\n alphab,alpha_refl,gb,limb,grav - 2 tuples (x1,x2) \\n\n times - JDs same scale as t0s \\n\n\n returns the total and component modulations\n \"\"\"\n\n# import pyastro as pa # solve kepler eq\n T0 = getT0(period,ecc,omega,t0s)\n\n nus = time2nu(period,ecc,T0, times)\n\n\n m1 = primary[0]\n# T1 = primary[1]\n R1 = primary[2]\n m2 = secondary[0]\n# T2 = secondary[1]\n R2 = secondary[2]\n alphab1 = alphab[0]\n alphab2 = alphab[1]\n alpha_refl1 = alpha_refl[0]\n alpha_refl2 = alpha_refl[1]\n grav1 = grav[0]\n grav2 = grav[1]\n limb1 = limb[0]\n limb2 = limb[1]\n q= m2/m1\n beta = (1.0+ecc*np.cos(nus))/(1.0-ecc**2)\n sM = m1+m2\n\n\n #\n alpha_e1_1 = 15.0*limb1*(2.0+grav1)/(32.0*(3.0-limb1))\n alpha_e1_2 = 15.0*limb2*(2.0+grav2)/(32.0*(3.0-limb2))\n alpha_e2_1 = 3.0*(15.0+limb1)*(1.0+grav1)/(20.0*(3.0-limb1))\n alpha_e2_2 = 3.0*(15.0+limb2)*(1.0+grav2)/(20.0*(3.0-limb2))\n alpha_e2b_1 = 15.0*(1.0-limb1)*(3.0+grav1)/(64.0*(3.0-limb1))\n alpha_e2b_2 = 15.0*(1.0-limb2)*(3.0+grav2)/(64.0*(3.0-limb2))\n alpha_e3_1 = 5.0/3.0*alpha_e1_1\n alpha_e3_2 = 5.0/3.0*alpha_e1_2\n alpha_e4_1 = 7.0*alpha_e2b_1/4.0\n alpha_e4_2 = 7.0*alpha_e2b_2/4.0\n ##\n alpha_e0_1 = alpha_e2_1/9.0\n alpha_e0_2 = alpha_e2_2/9.0\n alpha_e0b_1 = 3.0*alpha_e2b_1/20.\n alpha_e0b_2 = 3.0*alpha_e2b_2/20.\n\n\n\n\n\n# nus = time2nu(period,ecc,t0s, times) - np.pi/2.0\n inclRad=np.radians(incl)\n omegaRad=np.radians(omega)\n\n lum1 = flux[0]*R1**2\n lum2 = flux[1]*R2**2\n f1=(lum1/(lum1 + lum2))\n f2 = 1.0-f1\n\n sini = np.sin(inclRad)\n # beaming amplitude (in ppm)\n Abeam = -2830.0*(sini*((m1+m2))**(-2./3.)*period**(-1./3.)* \\\n np.cos(omegaRad+nus)/np.sqrt(1.0-ecc**2))* \\\n (f1*alphab1*m2 - f2*alphab2*m1)\n\n # Ellipsoidal amplitude (in ppm)\n\n Mel = np.zeros((8,beta.size))\n\n Mel[0,:]=\\\n 13435.0*(f1*alpha_e0_1*(2.+2.*q)*R1**3+f2*alpha_e0_2*(2.+2./q)*R2**3)*\\\n (2-3*sini**2)*sM**(-1)*period**(-2)\n Mel[1,:]=\\\n 13435.0*(f1*alpha_e0_1*(3.*q)*R1**3+f2*alpha_e0_2*(3./q)*R2**3)*\\\n (2-3*sini**2)*sM**(-1)*period**(-2)*beta**3\n Mel[2,:] =\\\n 759.0*(f1*alpha_e0b_1*q*R1**5+f2*alpha_e0b_2/q*R2**5)*\\\n (8-40*sini**2+35*sini**4)*sM**(-5.0/3.0)*period**(-10.0/3.0)*beta**5\n\n Mel[3,:] =\\\n 3194.0*(f1*alpha_e1_1*q*R1**4-f2*alpha_e1_2/q*R2**4)*\\\n (4*sini-5*sini**3)*sM**(-4.0/3.0)*period**(-8.0/3.0)*beta**4*np.sin(omegaRad+nus)\n\n Mel[4,:] =\\\n 13435.0*(f1*alpha_e2_1*q*R1**3+f2*alpha_e2_2/q*R2**3)*\\\n sini**2*sM**(-1)*period**(-2)*beta**3*np.cos(2*(omegaRad+nus))\n\n Mel[5,:] =\\\n 759.0*(f1*alpha_e2b_1*q*R1**5+f2*alpha_e2b_2/q*R2**5)*\\\n (6*sini**2-7*sini**4)*sM**(-5.0/3.0)*period**(-10.0/3.0)*beta**5*np.cos(2*(omegaRad+nus))\n\n Mel[6,:] =\\\n 3194.0*(f1*alpha_e3_1*q*R1**4-f2*alpha_e3_2/q*R2**4)*\\\n sini**3*sM**(-4.0/3.0)*period**(-8.0/3.0)*beta**4*np.sin(3*(omegaRad+nus))\n\n Mel[7,:] =\\\n 759.0*(f1*alpha_e4_1*q*R1**5+f2*alpha_e4_2/q*R2**5)*\\\n sini**4*sM**(-5.0/3.0)*period**(-10.0/3.0)*beta**5*np.cos(4*(omegaRad+nus))\n\n# Aellip = Mel0 + Mel1 + Mel2 + Mel3 + Mel4 + Mel5 + Mel6 + Mel7\n Aellip = Mel.sum(axis=0)\n\n\n\n Mrefp = 56514.0*sM**(-2.0/3.0)*period**(-4.0/3.0)*beta**2\n Mref1 = f1*alpha_refl2*R2**2*\\\n (0.64-sini*np.sin(omegaRad+nus)+0.18*sini**2)*(1-np.cos(2*(omegaRad+nus)))\n Mref2 = f2*alpha_refl1*R1**2*\\\n (0.64-sini*np.sin(omegaRad+nus+np.pi)+0.18*sini**2)*(1-np.cos(2*(omegaRad+nus+np.pi)))\n\n Arefl = Mrefp*(Mref1+Mref2)\n\n\n Mref =np.column_stack((Mref1,Mref2))\n # Total BEER amplitude\n\n ABEER = Abeam + Aellip + Arefl\n\n return Mel,Mref,Aellip,Abeam,Arefl, ABEER\n\ndef eBEERmodel1(period,t0s,ecc,omega,incl,primary,secondary,table_objects,alpha_refl,times):\n \"\"\"\n ********************************************************************************************************************\n eBEER modle that does the interpolation inside based on table objects\n ********************************************************************************************************************\n Mel,Mref,Aellip,Abeam,Arefl, ABEER =\n BEERmodel(period,t0s,ecc,omega,incl,primary,secondary,table_objects,times) \\n\n Calculate the BEER model from the system parameters\n m1,2,R1,2,teff1,2 in solar units \\n\n Inputs: \\n\n period - days \\n\n t0s - time of superior conjunction (the function converts it to T0 - time of periastron) \\n\n ecc, omega [deg], incl [deg] \\n\n primary = tuple (m1,teff1,R1) secondary tuple (m2,teff2,R2) \\n\n times - JDs same scale as t0s \\n\n\n returns:\n Mel,Mref,Aellip,Abeam,Arefl, ABEER\n \"\"\"\n ld_obj = table_objects[0]\n grav_obj = table_objects[1]\n alpha_obj = table_objects[2]\n flux_obj = table_objects[3]\n# import pyastro as pa # solve kepler eq\n T0 = getT0(period,ecc,omega,t0s)\n\n nus = time2nu(period,ecc,T0, times)\n\n\n m1 = primary[0]\n R1 = primary[1]\n teff1 = primary[2]\n\n m2 = secondary[0]\n R2 = secondary[1]\n teff2 = secondary[2]\n\n logg1 = calcLogg(m1,R1)\n logg2 = calcLogg(m2, R2)\n # Limiting the range of Teff and logg to avoid NaNs\n teff1_table = min(max(teff1, 3500), 9999)\n logg1_table = min(max(logg1, 0), 5)\n z1 = 0. # Ilya took 0 as default value\n teff2_table = min(max(teff2, 3500), 9999)\n logg2_table = min(max(logg2, 0), 5)\n z2 = 0. #\n fluxpoint = np.array([[teff1_table, logg1_table], [teff2_table, logg2_table]])\n # fluxpoint = np.array([[teff1, logg1_table], [teff2, logg2_table]])\n flux = flux_obj.__call__(fluxpoint)\n if np.any(np.isnan(flux)):\n print('flux contains NaN')\n print(fluxpoint)\n print(flux)\n alphabpoint = np.array([[teff1_table, logg1_table, z1], [teff2_table, logg2_table, z2]])\n alphab = alpha_obj.__call__(alphabpoint)\n if np.any(np.isnan(alphab)):\n print('alphab contains NaN')\n print(alphabpoint)\n print(alphab)\n alphab1,alphab2 = alphab[0:2]\n\n\n alpha_refl1 = alpha_refl[0]\n alpha_refl2 = alpha_refl[1]\n\n gravpoint = np.array([[np.log10(teff1_table), logg1_table, z1], [np.log10(teff2_table), logg2_table, z2]])\n grav = grav_obj.__call__(gravpoint)\n if np.any(np.isnan(grav)):\n print('grav contains NaN')\n print(gravpoint)\n print(grav)\n grav1, grav2 = grav[0:2]\n\n ldpoint = np.array([[teff1_table, logg1_table, z1], [teff2_table, logg2_table, z2]])\n limb = ld_obj.__call__(ldpoint)\n if np.any(np.isnan(limb)):\n print('limb contains NaN')\n print(ldpoint)\n print(limb)\n limb1, limb2 = limb[0:2]\n\n q= m2/m1\n beta = (1.0+ecc*np.cos(nus))/(1.0-ecc**2)\n sM = m1+m2\n\n\n #\n alpha_e1_1 = 15.0*limb1*(2.0+grav1)/(32.0*(3.0-limb1))\n alpha_e1_2 = 15.0*limb2*(2.0+grav2)/(32.0*(3.0-limb2))\n alpha_e2_1 = 3.0*(15.0+limb1)*(1.0+grav1)/(20.0*(3.0-limb1))\n alpha_e2_2 = 3.0*(15.0+limb2)*(1.0+grav2)/(20.0*(3.0-limb2))\n alpha_e2b_1 = 15.0*(1.0-limb1)*(3.0+grav1)/(64.0*(3.0-limb1))\n alpha_e2b_2 = 15.0*(1.0-limb2)*(3.0+grav2)/(64.0*(3.0-limb2))\n alpha_e3_1 = 5.0/3.0*alpha_e1_1\n alpha_e3_2 = 5.0/3.0*alpha_e1_2\n alpha_e4_1 = 7.0*alpha_e2b_1/4.0\n alpha_e4_2 = 7.0*alpha_e2b_2/4.0\n ##\n alpha_e0_1 = alpha_e2_1/9.0\n alpha_e0_2 = alpha_e2_2/9.0\n alpha_e0b_1 = 3.0*alpha_e2b_1/20.\n alpha_e0b_2 = 3.0*alpha_e2b_2/20.\n\n inclRad=np.radians(incl)\n omegaRad=np.radians(omega)\n\n lum1 = flux[0]*R1**2\n lum2 = flux[1]*R2**2\n f1=(lum1/(lum1 + lum2))\n f2 = 1.0-f1\n\n sini = np.sin(inclRad)\n\n\n # beaming amplitude (in ppm)\n Abeam = -2830.0*(sini*((m1+m2))**(-2./3.)*period**(-1./3.)* \\\n np.cos(omegaRad+nus)/np.sqrt(1.0-ecc**2))* \\\n (f1*alphab1*m2 - f2*alphab2*m1)\n\n # Ellipsoidal amplitude (in ppm)\n#\n Mel = np.zeros((8,beta.size))\n\n Mel[0,:]=\\\n 13435.0*(f1*alpha_e0_1*(2.+2.*q)*R1**3+f2*alpha_e0_2*(2.+2./q)*R2**3)*\\\n (2-3*sini**2)*sM**(-1)*period**(-2)\n Mel[1,:]=\\\n 13435.0*(f1*alpha_e0_1*(3.*q)*R1**3+f2*alpha_e0_2*(3./q)*R2**3)*\\\n (2-3*sini**2)*sM**(-1)*period**(-2)*beta**3\n Mel[2,:] =\\\n 759.0*(f1*alpha_e0b_1*q*R1**5+f2*alpha_e0b_2/q*R2**5)*\\\n (8-40*sini**2+35*sini**4)*sM**(-5.0/3.0)*period**(-10.0/3.0)*beta**5\n\n Mel[3,:] =\\\n 3194.0*(f1*alpha_e1_1*q*R1**4-f2*alpha_e1_2/q*R2**4)*\\\n (4*sini-5*sini**3)*sM**(-4.0/3.0)*period**(-8.0/3.0)*beta**4*np.sin(omegaRad+nus)\n\n Mel[4,:] =\\\n 13435.0*(f1*alpha_e2_1*q*R1**3+f2*alpha_e2_2/q*R2**3)*\\\n sini**2*sM**(-1)*period**(-2)*beta**3*np.cos(2*(omegaRad+nus))\n\n Mel[5,:] =\\\n 759.0*(f1*alpha_e2b_1*q*R1**5+f2*alpha_e2b_2/q*R2**5)*\\\n (6*sini**2-7*sini**4)*sM**(-5.0/3.0)*period**(-10.0/3.0)*beta**5*np.cos(2*(omegaRad+nus))\n\n Mel[6,:] =\\\n 3194.0*(f1*alpha_e3_1*q*R1**4-f2*alpha_e3_2/q*R2**4)*\\\n sini**3*sM**(-4.0/3.0)*period**(-8.0/3.0)*beta**4*np.sin(3*(omegaRad+nus))\n\n Mel[7,:] =\\\n 759.0*(f1*alpha_e4_1*q*R1**5+f2*alpha_e4_2/q*R2**5)*\\\n sini**4*sM**(-5.0/3.0)*period**(-10.0/3.0)*beta**5*np.cos(4*(omegaRad+nus))\n\n Aellip = Mel.sum(axis=0)\n\n # reflection amplitude in ppm\n\n Mrefp = 56514.0*sM**(-2.0/3.0)*period**(-4.0/3.0)*beta**2\n Mref1 = f1*alpha_refl2*R2**2*\\\n (0.64-sini*np.sin(omegaRad+nus)+0.18*sini**2)*(1-np.cos(2*(omegaRad+nus)))\n Mref2 = f2*alpha_refl1*R1**2*\\\n (0.64-sini*np.sin(omegaRad+nus+np.pi)+0.18*sini**2)*(1-np.cos(2*(omegaRad+nus+np.pi)))\n\n Arefl = Mrefp*(Mref1+Mref2)\n\n Mref =np.column_stack((Mref1,Mref2))\n\n\n ABEER = Abeam + Aellip + Arefl\n\n return Mel,Mref,Aellip,Abeam,Arefl, ABEER\n\ndef eBEERchi2(period,t0s,ecc,omega,incl,primary,secondary,table_objects,alpha_refl,lc,rv):\n '''\n calculates the chi2 of a given lc and eBEER model\n using the above eBEERmodel1 function\n :params period,t0s,ecc,omega,incl,primary,secondary,table_objects,alpha_refl\n :param lc 3 col vector [times,lc,sigmalc] rv 3 col vector [times,rv,sigmarv]\n :return: eBchi2\n '''\n f_phases = np.arange(0., 1.0, 0.01) + 0.005\n f_times = t0s + period * f_phases\n lctimes = lc[:,0]\n measdfs = lc[:,1]\n measdfsigmas = lc[:,2]\n lcpoints = lctimes.size\n _, _, _, _, _, ABEER = \\\n eBEERmodel1(period, t0s, ecc, omega, incl, primary, secondary, table_objects, alpha_refl, f_times)\n\n u_phases = ((lctimes - t0s) % period) / period\n\n uABEER = np.interp(u_phases, f_phases, ABEER)\n uABEER -= uABEER.mean()\n # ABEER -= ABEER.mean()\n ebchi2 = chisq(measdfs, uABEER, measdfsigmas)\n redebchi2 = ebchi2/lcpoints\n\n m1 = primary[0]\n m2 = secondary[0]\n modelrvs = sysparams2RVs(period, t0s, ecc, omega, incl, m1, m2, rv[:,0])\n rvchi2 = chisq(rv[:,1],modelrvs,rv[:,2])\n rvpoints = modelrvs.size\n return ebchi2,lcpoints,rvchi2,rvpoints\n\ndef eB_log_prob_fn(state,params,verbose = False):\n '''\n calculates the chi2 of a given lc and eBEER model\n using the above eBEERmodel1 function\n :param\n state:period,t0s,ecc,omega,incl,primary,secondary,alpha_refl\n params: a tuple (lc,rv,bounds)\n lc 3 col vector [times,lc,sigmalc] rv 3 col vector [times,rv,sigmarv]\n bounds a 2 col vector of upper and lower bounds of the params\n\n :return: -0.5*(eBchi2+rvchi2)\n '''\n lc, rv, table_objects,bounds = params\n # calculate logprior\n # if log prior returns -inf return\n logprior = eB_log_prior(state,bounds)\n if not np.isfinite(logprior): #prevent computing outside of parameter space\n return -np.inf\n\n period, t0s, ecc, omega, incl, m1,r1,teff1, m2,r2,teff2, alpha_refl1,alpha_refl2 = list(state)\n\n #debug\n # if gettrace():\n # print('P=%.1f t0s=%.1f e=%.2f w=%.1f i=%.1f m1=%.1f r1=%.1f T1=%.0f m2=%.1f r2=%.1f T2=%.0f alph1=%.1e alph2=%.1e \\n' \\\n # %(period, t0s, ecc, omega, incl, m1, r1, teff1, m2, r2, teff2, alpha_refl1,alpha_refl2)\n # )\n\n primary = (m1,r1,teff1)\n secondary = (m2,r2,teff2)\n alpha_refl = (alpha_refl1,alpha_refl2)\n # f_phases = np.arange(0., 1.0, 0.01) + 0.005\n nsteps = 200.\n f_phases = np.append(np.arange(0., 1.0, 1./nsteps),1.0)\n f_times = t0s + period * f_phases\n lctimes = lc[:,0]\n measdfs = lc[:,1]\n measdfsigmas = lc[:,2]\n\n _, _, _, _, _, ABEER = \\\n eBEERmodel1(period, t0s, ecc, omega, incl, primary, secondary, table_objects, alpha_refl, f_times)\n\n u_phases = ((lctimes - t0s) % period) / period\n\n uABEER = np.interp(u_phases, f_phases, ABEER)\n uABEER -= uABEER.mean()\n # ABEER -= ABEER.mean()\n ebchi2 = chisq(measdfs, uABEER, measdfsigmas)\n\n\n # m1 = primary[0]\n # m2 = secondary[0]\n modelrvs = sysparams2RVs(period, t0s, ecc, omega, incl, m1, m2, rv[:,0])\n modelrvs-=modelrvs.mean()\n rvchi2 = chisq(rv[:,1],modelrvs,rv[:,2])\n\n lpr =-0.5*(ebchi2+rvchi2)\n\n if verbose:\n return np.array([ebchi2, rvchi2])\n\n elif np.isnan(lpr):\n return -np.Inf\n else:\n return lpr +logprior\n\ndef eB_log_prior(state,bounds):\n \"\"\"\n Defines log prior probailiity od the parameters in the state\n :param state:\n state is a numpy arry of parameter values according to the list below\n period, t0s, ecc, omega, incl, m1, r1, teff1, m2, r2, teff2, alpha_refl1, alpha_refl2 = state\n ubounds,lbounds = bounds\n :return:\n -np.inf outside the boundaries of the parameter space\n 0.0 inside the boundaries (flat non informative\n \"\"\"\n #TODO add constrain on incl <90\n ubounds,lbounds = bounds\n if np.any(stateubounds) :\n return -np.inf\n else:\n return 0.0\n\ndef corrlen(x):\n \"\"\"\n Roy Gomel function\n Calculates the correlation length of a vector\n :param x: vector of values\n :return: cL, autocor\n \"\"\"\n nx = x - x.mean()\n nx = nx / ((nx ** 2).mean()) ** 0.5\n result = np.correlate(nx, nx, mode='full')\n autocor = result[round(result.size / 2):]\n autocor /= autocor[0]\n cL = np.where(autocor < 0.5)[0][0]\n return cL, autocor\n\ndef getAlphaRef(Aref,period,ecc,omega,incl,m1,R1,m2,R2,f1,t0s,times):\n \"\"\"\n Calculates alpha reflection coefficients by ordinary least squares matrix\n solution\n inputs:\n Aref - the \"measured\" reflection amplitude with length of times\n period,ecc,omega,incl,m1,R1,m2,R2\n f1,f2 - relatve fluxes\n t0s,times - t0s and a vector of times\n returns:\n alphas - a vector [alpha0,alpha1,alpha2]\n where alpha1 and alpha2 are the primary and secondary reflection coeffs.\n \"\"\"\n f2 = 1.0-f1\n T0 = getT0(period,ecc,omega,t0s)\n\n nus = time2nu(period,ecc,T0, times)\n\n # q= m2/m1\n beta = (1.0+ecc*np.cos(nus))/(1.0-ecc**2)\n sM = m1+m2\n sini = np.sin(np.deg2rad(incl))\n omegaRad=np.deg2rad(omega)\n Mrefp = 56514.0*sM**(-2.0/3.0)*period**(-4.0/3.0)*beta**2\n Mr1 = Mrefp*f1*R2**2*\\\n (0.64-sini*np.sin(omegaRad+nus)+0.18*sini**2)*(1-np.cos(2*(omegaRad+nus)))\n Mr2 = Mrefp*f2*R1**2*\\\n (0.64-sini*np.sin(omegaRad+nus+np.pi)+0.18*sini**2)*(1-np.cos(2*(omegaRad+nus+np.pi)))\n Mr0 = np.ones(len(nus))\n DesignMatrix = np.column_stack((Mr0,Mr1,Mr2))\n\n invDeM = np.linalg.pinv(DesignMatrix)\n alphas = np.dot(invDeM,Aref)\n return(alphas)\n\ndef getMS_RadTemp(M):\n \"\"\"\n [R,T,logg] = getMS_RadTemp(M)\n estimates the MS radius and temperature of a star\n based on its mass M [in units of sol mass]\n \n \"\"\"\n R = np.max([0.1,M**0.8]) # radius assuming it is a main sequence star\n # Secondary temperature assuming it is a main sequence star \n T= 5920.0 * ( np.min( [np.max( [0.23*M**2.3 , M**4] ) ,1.5*M**3.5 ])/R**2.)**(1./4.) \n logg = 4.438 + np.log10(M) - 2*np.log10(R) \n # for Phoebe force logg <5\n if logg>=5.0:\n logR = 0.5*(np.log10(M)-0.562)\n R= 10**logR +0.005\n logg = 4.438 + np.log10(M) - 2*np.log10(R)\n \n return(R,T,logg)\n \n \n\n\n\ndef sysconfig2fitpars(sysconfig):\n \"\"\"\n converts sys config to initpars list\n input:\n sysconfig - sysconfig dictionary\n output:\n fit params list\n \"\"\" \n \n fitparNames =[\"P\",\"t0s\",\"e\", \"w\", \"incl\", \"T1\", \"m1\", \"R1\", \"q\",\"T2\",\"R2\", \"rho1\",\"rho2\",\"gb1\", \"gb2\",\n \"ldc11\",\"ldc12\",\"ldc21\",\"ldc22\"]\n fitpars=[None]*len(fitparNames)\n fitpars[fitparNames.index('P')] = sysconfig['period']\n fitpars[fitparNames.index('t0s')] = sysconfig['t0supconj']\n fitpars[fitparNames.index('e')] = sysconfig['ecc']\n fitpars[fitparNames.index('w')] = sysconfig['w']\n fitpars[fitparNames.index('incl')] = sysconfig['incl']\n fitpars[fitparNames.index('T1')] = sysconfig['teff1']\n fitpars[fitparNames.index('m1')] = sysconfig['m1']\n fitpars[fitparNames.index('R1')] = sysconfig['rp1']\n fitpars[fitparNames.index('q')] = sysconfig['m2']/sysconfig['m1']\n fitpars[fitparNames.index('T2')] = sysconfig['teff2']\n fitpars[fitparNames.index('R2')] = sysconfig['rp2']\n fitpars[fitparNames.index('rho1')] = sysconfig['rho1']\n fitpars[fitparNames.index('rho2')] = sysconfig['rho2']\n fitpars[fitparNames.index('gb1')] = sysconfig['gravb1']\n fitpars[fitparNames.index('gb2')] = sysconfig['gravb2']\n fitpars[fitparNames.index('ldc11')] = sysconfig['ldcoeffs1'][0]\n fitpars[fitparNames.index('ldc12')] = sysconfig['ldcoeffs1'][1]\n fitpars[fitparNames.index('ldc21')] = sysconfig['ldcoeffs2'][0]\n fitpars[fitparNames.index('ldc22')] = sysconfig['ldcoeffs2'][1]\n\n return fitpars\n\n\ndef fitpars2sysconfig(sysconfig,names,pars):\n \"\"\"\n updates sysconfig with fit parameters\n input:\n sysconfig - sysconfig dictionary\n names - a list of parameter names (subset of a fitparam name list)\n pars - a list of values corresponding to names\n output:\n updated sysconfig with the fit params\n \"\"\"\n for curname in names:\n sysconfig[curname]= pars[names.index(curname)]\n# if 'P' in names:\n# sysconfig['period'] = pars[names.index('P')]\n# if 't0s' in names:\n# sysconfig['t0supconj'] = pars[names.index('t0s')]\n# if 'e' in names:\n# sysconfig['ecc'] = pars[names.index('e')]\n# if 'w' in names:\n# sysconfig['w'] = pars[names.index('w')]\n# if 'incl' in names:\n# sysconfig['incl'] = pars[names.index('incl')]\n# if 'T1' in names:\n# sysconfig['teff1'] = pars[names.index('T1')]\n\n return sysconfig\n\n\ndef get_rootPath(machine):\n \"\"\"\n rootpath,location,ncpus = get_rootPath(machine)\n gets root path according to the machine we are running on\n returns:\n location string\n ncpus - number of processors on the machine\n root path - string\n \"\"\"\n import sys\n if machine == 'user-Latitude-E5570':\n rootpath = r'/home/user/Dropbox/KBEER_phoebe_ipynb/'\n\n location='dell'\n ncpus =4\n\n\n elif machine == 'eshel-blue':\n rootpath = r'/home/eshel-blue/micha/dropboxClone/'\n\n location = 'blue'\n ncpus =6\n\n elif machine == 'engelmic@astrophys.tau.ac.il':\n rootpath = r'/storage/home/engelmic/KBEER/'\n\n location = 'astro'\n ncpus = 16\n\n else:\n print('unknown Machine - setup paths.')\n sys.exit()\n\n return rootpath,location,ncpus \n \n\n\n\ndef fm2k(fm,P,ecc):\n \"\"\" \n Calculate K from the mass function\n inputs:\n fm - mass function in Msun units\n P - period in days\n ecc - eccentricity\n \"\"\"\n A=1.036e-7 # coversion factor to the required units [Msun*s/km/d]\n K= (fm/A/P/(((1-ecc**2))**(2./3.)))**(1./3.)\n return K\n \n \ndef errorEstimate(xs,ys,fitwidthd,fitwidthu,chi2threshold):\n \"\"\"\n (err,poly_xs, poly_ys) = errorEstimate(xs,ys,fitwidth,chi2threshold)\n\n chi2threshold - the increase iv chi2 above the fitted minimum (should be 1)\n fitwidthu,d = number of points to take for the fit around the minimum u=above, d= below\n \"\"\"\n minindex=ys.tolist().index(ys.min())\n polys = ys[minindex-fitwidthd:minindex+fitwidthu]\n poly_xs = xs[minindex-fitwidthd:minindex+fitwidthu]\n polcoeffs=np.polyfit(poly_xs,polys,2)\n poly_ys= np.polyval(polcoeffs,poly_xs)\n p1 =polcoeffs.copy()\n p1[2] = polcoeffs[2]-poly_ys.min()-chi2threshold\n sols = np.roots(p1)\n err = (sols[0]-sols[1])/2\n\n return(err,poly_xs, poly_ys)\n\n\ndef elMN93(m1,r1,m2, period,incl,u1,tau1):\n \"\"\"\n aelMN = elMN93(m1,r1,m2, period,incl,u1,tau1)\n Calculates the ellipsoidal modulation amplitude of a star with non luminous companion\n for a specific photometric band according to Morris Naftilan 93\n m1,r1,m2,period parameters of the system\n u1 - limb darkening coeff in the band\n tau1 - gravity darkening coefficient for the band (not bolometric!!)\n tau1 and u1 are from claret11 or ilya and is band sensitive\n \"\"\"\n # a2cMN = alpha * (r1/sma)**3 * q * (np.sin(np.radians(incl)))**2\n q = m2/m1\n alpha = 3*(15+u1)*(1+tau1)/20/(3-u1)\n aelMN = 0.013 * alpha * r1**3 * m1**-1 * period**-2 * q/(1.+q) * (np.sin(np.radians(incl)))**2\n\n return aelMN\n\n\ndef claret2011LD(fullldtable,logg,teff):\n \"\"\"\n ld_coeff = claret2011LD(fullldtable,logg,teff)\n Inputs:\n fullldtable - ld table that was pre loaded with\n 'logg','teff','z','xi','u','filt','met','mod' columns\n logg,teff - parameters for which the value is interpolated\n A sub table is taken for z=0 and xi = 2 km/s (microturbulence velocity)\n uses griddata function from scipy.interpolate\n \"\"\"\n teff_int = teff+0.\n logg_int = logg+0.\n teff_int = min(max(teff_int,3500),40000)\n logg_int = min(max(logg_int,0),5)\n ld_claret = fullldtable[(fullldtable[:]['xi']==2.0) * (fullldtable[:]['z']==0.) ]\n ldpoints = np.column_stack((ld_claret[:]['logg'],ld_claret[:]['teff']))\n ldvals = ld_claret[:]['u']\n ld_coeff= np.asscalar(\n griddata(ldpoints,ldvals,(logg_int,teff_int),method = 'linear'))\n\n return ld_coeff\n\ndef claret2004Gravbol(gbtable,logg,teff,mass):\n \"\"\"\n gb = claret2004Gravbol(gbtable,logg,teff,mass)\n Inputs:\n gbtable - gb table that was pre loaded with\n 'logm' (log initial mass),'logg','logteff','mass','beta1' columns\n logg,teff,mass- parameters for which the value is interpolated\n\n uses griddata function from scipy.interpolate\n \"\"\"\n mass_int = mass +0.\n teff_int = teff+0.\n logg_int = logg+0.\n teff_int = min(max(teff_int,3208.),62000.)\n logg_int = min(max(logg_int,-0.8),6.)\n mass_int = min(max(mass_int,0.8),15.)\n # the table consists of columns: log initial mass logg log teff and mass --> gravbol\n gbpoints = np.column_stack((gbtable[:]['logg'],gbtable[:]['logteff'],gbtable[:]['mass']))\n gbvals = gbtable[:]['beta1']\n gb=np.asscalar(\n griddata(gbpoints,gbvals,(logg_int,np.log10(teff_int),mass_int),method = 'linear'))\n if np.isnan(gb):\n #print(teff,logg,mass)\n gb = np.asscalar(\n griddata(gbpoints, gbvals, (logg_int, np.log10(teff_int), mass_int), method='nearest'))\n print('claret2004Gravbol returned nan for T %d logg %.1f M %.1f \\n setting gb to nearest value %.3f \\n' % (\n teff, logg, mass, gb))\n return gb\n\n# def eBEERchi2(period,t0s,ecc,omega,incl,primary,secondary,alphab,alpha_refl,grav,limb,flux,times,measlc):\n# \"\"\"\n# :param period,t0s,ecc,omega,incl\n# :param primary: m1,rp1,teff1\n# :param secondary: m2,rp2,tef2\n# :param alphab: alpha beam - from Ilya's table\n# :param alpha_refl: (alpharefl1,alpharefl2)\n# :param grav: (grav1,grav2) from claret tables\n# :param limb: (ld1,ld2) from calret tables\n# :param flux: (flux1,flux2) from Ilya's tables\n# :param times: times vector (relative to t0s) to calculate the model\n# :param measlc: measured lc 3 col: times,meas_lc,sigmas\n# :return: ch12\n# \"\"\"\n#\n#\n#\n#\n#\n#\n#\n# return chi2\n\n\n\n\n\nif __name__== '__main__':\n print('Testng pyBEER functions')","sub_path":"packages/pyBEERm.py","file_name":"pyBEERm.py","file_ext":"py","file_size_in_byte":56174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"67849396","text":"from __future__ import absolute_import, division, print_function\nimport utool\nfrom ibeis.dev import params\nfrom ibeis.dev import ibsfuncs\n# Inject utool functions\n(print, print_, printDBG, rrr, profile) = utool.inject(\n __name__, '[main_helpers]', DEBUG=False)\n\n\ndef register_utool_aliases():\n #print('REGISTER UTOOL ALIASES')\n import utool\n import matplotlib as mpl\n from ibeis.control import IBEISControl\n from ibeis.gui import guiback\n from ibeis.gui import guifront\n utool.extend_global_aliases([\n (IBEISControl.IBEISController, 'ibs'),\n (guiback.MainWindowBackend, 'back'),\n (guifront.MainWindowFrontend, 'front'),\n (mpl.figure.Figure, 'fig')\n ])\n\n\n@utool.indent_func\n@profile\ndef get_test_qaids(ibs):\n \"\"\" Gets test annotation_rowids based on command line arguments \"\"\"\n #print('[main_helpers]')\n test_qaids = []\n valid_aids = ibs.get_valid_aids()\n printDBG('1. valid_aids = %r' % valid_aids[0:5])\n #print(utool.dict_str(vars(params.args)))\n\n if params.args.qaid is not None:\n printDBG('Testing qaid=%r' % params.args.qaid)\n test_qaids.extend(params.args.qaid)\n\n if params.args.all_cases:\n printDBG('Testing all %d cases' % (len(valid_aids),))\n printDBG('1. test_qaids = %r' % test_qaids[0:5])\n test_qaids.extend(valid_aids)\n printDBG('2. test_qaids = %r' % test_qaids[0:5])\n else:\n is_hard_list = ibsfuncs.get_annot_is_hard(ibs, valid_aids)\n hard_aids = utool.filter_items(valid_aids, is_hard_list)\n printDBG('Testing %d known hard cases' % len(hard_aids))\n test_qaids.extend(hard_aids)\n\n if params.args.all_gt_cases:\n has_gt_list = ibs.get_annot_has_groundtruth(valid_aids)\n hasgt_aids = utool.filter_items(valid_aids, has_gt_list)\n print('Testing all %d ground-truthed cases' % len(hasgt_aids))\n test_qaids.extend(hasgt_aids)\n\n # Sample a large pool of query indexes\n # Filter only the ones you want from the large pool\n if params.args.index is not None:\n indexes = utool.ensure_iterable(params.args.index)\n #printDBG('Chosen indexes=%r' % (indexes,))\n #printDBG('test_qaids = %r' % test_qaids[0:5])\n _test_qaids = [test_qaids[xx] for xx in indexes]\n test_qaids = _test_qaids\n #printDBG('test_qaids = %r' % test_qaids)\n elif len(test_qaids) == 0 and len(valid_aids) > 0:\n #printDBG('no hard or gt aids. Defaulting to the first ANNOTATION')\n test_qaids = valid_aids[0:1]\n\n #print('test_qaids = %r' % test_qaids)\n test_qaids = utool.unique_keep_order2(test_qaids)\n return test_qaids\n","sub_path":"ibeis/dev/main_helpers.py","file_name":"main_helpers.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"}